blob: 48ff8c22ba1ffebba8859a3c078f53a700ea733c [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070022#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070034
Felix Fietkauc6663872010-04-19 19:57:33 +020035static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070036 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070045};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
Felix Fietkau82b873a2010-11-11 03:18:37 +010049static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010051 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053052static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070053 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053055static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
56 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010057static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010058static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020061static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
Sujithe8324352009-01-16 21:38:42 +053063
Felix Fietkau545750d2009-11-23 22:21:01 +010064enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020065 MCS_HT20,
66 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010067 MCS_HT40,
68 MCS_HT40_SGI,
69};
70
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071static int ath_max_4ms_framelen[4][32] = {
72 [MCS_HT20] = {
73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
77 },
78 [MCS_HT20_SGI] = {
79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010083 },
84 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020085 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 }
96};
97
Sujithe8324352009-01-16 21:38:42 +053098/*********************/
99/* Aggregation logic */
100/*********************/
101
Sujithe8324352009-01-16 21:38:42 +0530102static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
103{
104 struct ath_atx_ac *ac = tid->ac;
105
106 if (tid->paused)
107 return;
108
109 if (tid->sched)
110 return;
111
112 tid->sched = true;
113 list_add_tail(&tid->list, &ac->tid_q);
114
115 if (ac->sched)
116 return;
117
118 ac->sched = true;
119 list_add_tail(&ac->list, &txq->axq_acq);
120}
121
Sujithe8324352009-01-16 21:38:42 +0530122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
123{
Felix Fietkau066dae92010-11-07 14:59:39 +0100124 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530125
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200126 WARN_ON(!tid->paused);
127
Sujithe8324352009-01-16 21:38:42 +0530128 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530130
131 if (list_empty(&tid->buf_q))
132 goto unlock;
133
134 ath_tx_queue_tid(txq, tid);
135 ath_txq_schedule(sc, txq);
136unlock:
137 spin_unlock_bh(&txq->axq_lock);
138}
139
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100146}
147
Sujithe8324352009-01-16 21:38:42 +0530148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
149{
Felix Fietkau066dae92010-11-07 14:59:39 +0100150 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530151 struct ath_buf *bf;
152 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200153 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100154 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200155
Sujithe8324352009-01-16 21:38:42 +0530156 INIT_LIST_HEAD(&bf_head);
157
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530159 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530160
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530163 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164
Felix Fietkaue1566d12010-11-20 03:08:46 +0100165 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200170 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200172 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100173 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530174 }
175
176 spin_unlock_bh(&txq->axq_lock);
177}
178
179static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 int seqno)
181{
182 int index, cindex;
183
184 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
186
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200187 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530188
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
192 }
193}
194
195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100196 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530197{
198 int index, cindex;
199
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100200 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200202 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530203
204 if (index >= ((tid->baw_tail - tid->baw_head) &
205 (ATH_TID_MAX_BUFS - 1))) {
206 tid->baw_tail = cindex;
207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
208 }
209}
210
211/*
212 * TODO: For frame(s) that are in the retry state, we will reuse the
213 * sequence number(s) without setting the retry bit. The
214 * alternative is to give up on these and BAR the receiver's window
215 * forward.
216 */
217static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
218 struct ath_atx_tid *tid)
219
220{
221 struct ath_buf *bf;
222 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700223 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100224 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700225
226 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530227 INIT_LIST_HEAD(&bf_head);
228
229 for (;;) {
230 if (list_empty(&tid->buf_q))
231 break;
Sujithe8324352009-01-16 21:38:42 +0530232
Sujithd43f30152009-01-16 21:38:53 +0530233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
234 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530235
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100236 fi = get_frame_info(bf->bf_mpdu);
237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530239
240 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530242 spin_lock(&txq->axq_lock);
243 }
244
245 tid->seq_next = tid->seq_start;
246 tid->baw_tail = tid->baw_head;
247}
248
Sujithfec247c2009-07-27 12:08:16 +0530249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530251{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100252 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530253 struct ieee80211_hdr *hdr;
254
Sujithfec247c2009-07-27 12:08:16 +0530255 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100256 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100257 return;
Sujithe8324352009-01-16 21:38:42 +0530258
Sujithe8324352009-01-16 21:38:42 +0530259 hdr = (struct ieee80211_hdr *)skb->data;
260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
261}
262
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200263static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
264{
265 struct ath_buf *bf = NULL;
266
267 spin_lock_bh(&sc->tx.txbuflock);
268
269 if (unlikely(list_empty(&sc->tx.txbuf))) {
270 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL;
272 }
273
274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
275 list_del(&bf->list);
276
277 spin_unlock_bh(&sc->tx.txbuflock);
278
279 return bf;
280}
281
282static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
283{
284 spin_lock_bh(&sc->tx.txbuflock);
285 list_add_tail(&bf->list, &sc->tx.txbuf);
286 spin_unlock_bh(&sc->tx.txbuflock);
287}
288
Sujithd43f30152009-01-16 21:38:53 +0530289static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
290{
291 struct ath_buf *tbf;
292
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200293 tbf = ath_tx_get_buffer(sc);
294 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530295 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530296
297 ATH_TXBUF_RESET(tbf);
298
299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530302 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530303
304 return tbf;
305}
306
Felix Fietkaub572d032010-11-14 15:20:07 +0100307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100311 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
Sujithd43f30152009-01-16 21:38:53 +0530339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100341 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530342{
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530345 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100346 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530349 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530351 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530353 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200356 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100357 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200358 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100359 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200360 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530361
Sujitha22be222009-03-30 15:28:36 +0530362 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530363 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530364
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800365 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366
Felix Fietkau78c46532010-06-25 01:26:16 +0200367 memcpy(rates, tx_info->control.rates, sizeof(rates));
368
Sujith1286ec62009-01-27 13:30:37 +0530369 rcu_read_lock();
370
Ben Greear686b9cb2010-09-23 09:44:36 -0700371 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530372 if (!sta) {
373 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200374
Felix Fietkau31e79a52010-07-12 23:16:34 +0200375 INIT_LIST_HEAD(&bf_head);
376 while (bf) {
377 bf_next = bf->bf_next;
378
379 bf->bf_state.bf_type |= BUF_XRETRY;
380 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
381 !bf->bf_stale || bf_next != NULL)
382 list_move_tail(&bf->list, &bf_head);
383
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100384 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200385 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
386 0, 0);
387
388 bf = bf_next;
389 }
Sujith1286ec62009-01-27 13:30:37 +0530390 return;
Sujithe8324352009-01-16 21:38:42 +0530391 }
392
Sujith1286ec62009-01-27 13:30:37 +0530393 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100394 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
395 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530396
Felix Fietkaub11b1602010-07-11 12:48:44 +0200397 /*
398 * The hardware occasionally sends a tx status for the wrong TID.
399 * In this case, the BA status cannot be considered valid and all
400 * subframes need to be retransmitted
401 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100402 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200403 txok = false;
404
Sujithe8324352009-01-16 21:38:42 +0530405 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530406 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530407
Sujithd43f30152009-01-16 21:38:53 +0530408 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700409 if (ts->ts_flags & ATH9K_TX_BA) {
410 seq_st = ts->ts_seqnum;
411 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530412 } else {
Sujithd43f30152009-01-16 21:38:53 +0530413 /*
414 * AR5416 can become deaf/mute when BA
415 * issue happens. Chip needs to be reset.
416 * But AP code may have sychronization issues
417 * when perform internal reset in this routine.
418 * Only enable reset in STA mode for now.
419 */
Sujith2660b812009-02-09 13:27:26 +0530420 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530421 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530422 }
423 }
424
425 INIT_LIST_HEAD(&bf_pending);
426 INIT_LIST_HEAD(&bf_head);
427
Felix Fietkaub572d032010-11-14 15:20:07 +0100428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530429 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100430 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530431 bf_next = bf->bf_next;
432
Felix Fietkau78c46532010-06-25 01:26:16 +0200433 skb = bf->bf_mpdu;
434 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100435 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200436
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530438 /* transmit completion, subframe is
439 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530440 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530441 } else if (!isaggr && txok) {
442 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530443 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530444 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200445 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530446 /*
447 * cleanup in progress, just fail
448 * the un-acked sub-frames
449 */
450 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200451 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
452 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
453 !an->sleeping)
454 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
455
456 clear_filter = true;
457 txpending = 1;
458 } else {
459 bf->bf_state.bf_type |= BUF_XRETRY;
460 txfail = 1;
461 sendbar = 1;
462 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530463 }
464 }
465
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700477 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530478 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530479 }
480
Felix Fietkau90fa5392010-09-20 13:45:38 +0200481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100487 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530488 spin_unlock_bh(&txq->axq_lock);
489
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200491 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 rc_update = false;
494 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100495 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530496 }
497
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530500 } else {
Sujithd43f30152009-01-16 21:38:53 +0530501 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200502 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400503 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
504 if (bf->bf_next == NULL && bf_last->bf_stale) {
505 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530506
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400507 tbf = ath_clone_txbuf(sc, bf_last);
508 /*
509 * Update tx baw and complete the
510 * frame with failed status if we
511 * run out of tx buf.
512 */
513 if (!tbf) {
514 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100515 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400516 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400517
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400518 bf->bf_state.bf_type |=
519 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100520 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100521 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 ath_tx_complete_buf(sc, bf, txq,
523 &bf_head,
524 ts, 0, 0);
525 break;
526 }
527
528 ath9k_hw_cleartxdesc(sc->sc_ah,
529 tbf->bf_desc);
530 list_add_tail(&tbf->list, &bf_head);
531 } else {
532 /*
533 * Clear descriptor status words for
534 * software retry
535 */
536 ath9k_hw_cleartxdesc(sc->sc_ah,
537 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400538 }
Sujithe8324352009-01-16 21:38:42 +0530539 }
540
541 /*
542 * Put this buffer to the temporary pending
543 * queue to retain ordering
544 */
545 list_splice_tail_init(&bf_head, &bf_pending);
546 }
547
548 bf = bf_next;
549 }
550
Felix Fietkau4cee7862010-07-23 03:53:16 +0200551 /* prepend un-acked frames to the beginning of the pending frame queue */
552 if (!list_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200553 if (an->sleeping)
554 ieee80211_sta_set_tim(sta);
555
Felix Fietkau4cee7862010-07-23 03:53:16 +0200556 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200557 if (clear_filter)
558 tid->ac->clear_ps_filter = true;
Felix Fietkau4cee7862010-07-23 03:53:16 +0200559 list_splice(&bf_pending, &tid->buf_q);
560 ath_tx_queue_tid(txq, tid);
561 spin_unlock_bh(&txq->axq_lock);
562 }
563
Sujithe8324352009-01-16 21:38:42 +0530564 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200565 ath_tx_flush_tid(sc, tid);
566
Sujithe8324352009-01-16 21:38:42 +0530567 if (tid->baw_head == tid->baw_tail) {
568 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530569 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530570 }
Sujithe8324352009-01-16 21:38:42 +0530571 }
572
Sujith1286ec62009-01-27 13:30:37 +0530573 rcu_read_unlock();
574
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530575 if (needreset) {
576 spin_unlock_bh(&sc->sc_pcu_lock);
Sujithe8324352009-01-16 21:38:42 +0530577 ath_reset(sc, false);
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530578 spin_lock_bh(&sc->sc_pcu_lock);
579 }
Sujithe8324352009-01-16 21:38:42 +0530580}
581
582static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
583 struct ath_atx_tid *tid)
584{
Sujithe8324352009-01-16 21:38:42 +0530585 struct sk_buff *skb;
586 struct ieee80211_tx_info *tx_info;
587 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530588 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530589 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530590 int i;
591
Sujitha22be222009-03-30 15:28:36 +0530592 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530593 tx_info = IEEE80211_SKB_CB(skb);
594 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530595
596 /*
597 * Find the lowest frame length among the rate series that will have a
598 * 4ms transmit duration.
599 * TODO - TXOP limit needs to be considered.
600 */
601 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
602
603 for (i = 0; i < 4; i++) {
604 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100605 int modeidx;
606 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530607 legacy = 1;
608 break;
609 }
610
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200611 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100612 modeidx = MCS_HT40;
613 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200614 modeidx = MCS_HT20;
615
616 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
617 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100618
619 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530620 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530621 }
622 }
623
624 /*
625 * limit aggregate size by the minimum rate if rate selected is
626 * not a probe rate, if rate selected is a probe rate then
627 * avoid aggregation of this packet.
628 */
629 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
630 return 0;
631
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530632 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
633 aggr_limit = min((max_4ms_framelen * 3) / 8,
634 (u32)ATH_AMPDU_LIMIT_MAX);
635 else
636 aggr_limit = min(max_4ms_framelen,
637 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530638
639 /*
640 * h/w can accept aggregates upto 16 bit lengths (65535).
641 * The IE, however can hold upto 65536, which shows up here
642 * as zero. Ignore 65536 since we are constrained by hw.
643 */
Sujith4ef70842009-07-23 15:32:41 +0530644 if (tid->an->maxampdu)
645 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530646
647 return aggr_limit;
648}
649
650/*
Sujithd43f30152009-01-16 21:38:53 +0530651 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530652 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530653 */
654static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
655 struct ath_buf *bf, u16 frmlen)
656{
Sujithe8324352009-01-16 21:38:42 +0530657 struct sk_buff *skb = bf->bf_mpdu;
658 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530659 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530660 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100661 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200662 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100663 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530664
665 /* Select standard number of delimiters based on frame length alone */
666 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
667
668 /*
669 * If encryption enabled, hardware requires some more padding between
670 * subframes.
671 * TODO - this could be improved to be dependent on the rate.
672 * The hardware can keep up at lower rates, but not higher rates
673 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100674 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530675 ndelim += ATH_AGGR_ENCRYPTDELIM;
676
677 /*
678 * Convert desired mpdu density from microeconds to bytes based
679 * on highest rate in rate series (i.e. first rate) to determine
680 * required minimum length for subframe. Take into account
681 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530682 *
Sujithe8324352009-01-16 21:38:42 +0530683 * If there is no mpdu density restriction, no further calculation
684 * is needed.
685 */
Sujith4ef70842009-07-23 15:32:41 +0530686
687 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530688 return ndelim;
689
690 rix = tx_info->control.rates[0].idx;
691 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530692 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
693 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
694
695 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530696 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530697 else
Sujith4ef70842009-07-23 15:32:41 +0530698 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530699
700 if (nsymbols == 0)
701 nsymbols = 1;
702
Felix Fietkauc6663872010-04-19 19:57:33 +0200703 streams = HT_RC_2_STREAMS(rix);
704 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530705 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
706
Sujithe8324352009-01-16 21:38:42 +0530707 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530708 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
709 ndelim = max(mindelim, ndelim);
710 }
711
712 return ndelim;
713}
714
715static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530716 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530717 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100718 struct list_head *bf_q,
719 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530720{
721#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530722 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
723 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530724 u16 aggr_limit = 0, al = 0, bpad = 0,
725 al_delta, h_baw = tid->baw_size / 2;
726 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200727 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530729
730 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
731
732 do {
733 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100734 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530735
Sujithd43f30152009-01-16 21:38:53 +0530736 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100737 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530738 status = ATH_AGGR_BAW_CLOSED;
739 break;
740 }
741
742 if (!rl) {
743 aggr_limit = ath_lookup_rate(sc, bf, tid);
744 rl = 1;
745 }
746
Sujithd43f30152009-01-16 21:38:53 +0530747 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100748 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530749
Sujithd43f30152009-01-16 21:38:53 +0530750 if (nframes &&
751 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530752 status = ATH_AGGR_LIMITED;
753 break;
754 }
755
Felix Fietkau0299a502010-10-21 02:47:24 +0200756 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
757 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
758 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
759 break;
760
Sujithd43f30152009-01-16 21:38:53 +0530761 /* do not exceed subframe limit */
762 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530763 status = ATH_AGGR_LIMITED;
764 break;
765 }
Sujithd43f30152009-01-16 21:38:53 +0530766 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530767
Sujithd43f30152009-01-16 21:38:53 +0530768 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530769 al += bpad + al_delta;
770
771 /*
772 * Get the delimiters needed to meet the MPDU
773 * density for this node.
774 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100775 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530776 bpad = PADBYTES(al_delta) + (ndelim << 2);
777
778 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530780
Sujithd43f30152009-01-16 21:38:53 +0530781 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100782 if (!fi->retries)
783 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530784 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
785 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530786 if (bf_prev) {
787 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400788 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
789 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530790 }
791 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530792
Sujithe8324352009-01-16 21:38:42 +0530793 } while (!list_empty(&tid->buf_q));
794
Felix Fietkau269c44b2010-11-14 15:20:06 +0100795 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530796
Sujithe8324352009-01-16 21:38:42 +0530797 return status;
798#undef PADBYTES
799}
800
801static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
802 struct ath_atx_tid *tid)
803{
Sujithd43f30152009-01-16 21:38:53 +0530804 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530805 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100806 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530807 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100808 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530809
810 do {
811 if (list_empty(&tid->buf_q))
812 return;
813
814 INIT_LIST_HEAD(&bf_q);
815
Felix Fietkau269c44b2010-11-14 15:20:06 +0100816 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530817
818 /*
Sujithd43f30152009-01-16 21:38:53 +0530819 * no frames picked up to be aggregated;
820 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530821 */
822 if (list_empty(&bf_q))
823 break;
824
825 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530826 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530827
Felix Fietkau55195412011-04-17 23:28:09 +0200828 if (tid->ac->clear_ps_filter) {
829 tid->ac->clear_ps_filter = false;
830 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
831 }
832
Sujithd43f30152009-01-16 21:38:53 +0530833 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100834 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100835 fi = get_frame_info(bf->bf_mpdu);
836
Sujithe8324352009-01-16 21:38:42 +0530837 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530838 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100839 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530840 ath_tx_txqaddbuf(sc, txq, &bf_q);
841 continue;
842 }
843
Sujithd43f30152009-01-16 21:38:53 +0530844 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530845 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100846 ath_buf_set_rate(sc, bf, aggr_len);
847 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530848
Sujithd43f30152009-01-16 21:38:53 +0530849 /* anchor last desc of aggregate */
850 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530851
Sujithe8324352009-01-16 21:38:42 +0530852 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530853 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530854
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100855 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530856 status != ATH_AGGR_BAW_CLOSED);
857}
858
Felix Fietkau231c3a12010-09-20 19:35:28 +0200859int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
860 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530861{
862 struct ath_atx_tid *txtid;
863 struct ath_node *an;
864
865 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530866 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200867
868 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
869 return -EAGAIN;
870
Sujithf83da962009-07-23 15:32:37 +0530871 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200872 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700873 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200874
Felix Fietkau2ed72222011-01-10 17:05:49 -0700875 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
876 txtid->baw_head = txtid->baw_tail = 0;
877
Felix Fietkau231c3a12010-09-20 19:35:28 +0200878 return 0;
Sujithe8324352009-01-16 21:38:42 +0530879}
880
Sujithf83da962009-07-23 15:32:37 +0530881void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530882{
883 struct ath_node *an = (struct ath_node *)sta->drv_priv;
884 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100885 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530886
887 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530888 return;
Sujithe8324352009-01-16 21:38:42 +0530889
890 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530891 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530892 return;
Sujithe8324352009-01-16 21:38:42 +0530893 }
894
Sujithe8324352009-01-16 21:38:42 +0530895 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200896 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200897
898 /*
899 * If frames are still being transmitted for this TID, they will be
900 * cleaned up during tx completion. To prevent race conditions, this
901 * TID can only be reused after all in-progress subframes have been
902 * completed.
903 */
904 if (txtid->baw_head != txtid->baw_tail)
905 txtid->state |= AGGR_CLEANUP;
906 else
907 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530908 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530909
Felix Fietkau90fa5392010-09-20 13:45:38 +0200910 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530911}
912
Felix Fietkau55195412011-04-17 23:28:09 +0200913bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
914{
915 struct ath_atx_tid *tid;
916 struct ath_atx_ac *ac;
917 struct ath_txq *txq;
918 bool buffered = false;
919 int tidno;
920
921 for (tidno = 0, tid = &an->tid[tidno];
922 tidno < WME_NUM_TID; tidno++, tid++) {
923
924 if (!tid->sched)
925 continue;
926
927 ac = tid->ac;
928 txq = ac->txq;
929
930 spin_lock_bh(&txq->axq_lock);
931
932 if (!list_empty(&tid->buf_q))
933 buffered = true;
934
935 tid->sched = false;
936 list_del(&tid->list);
937
938 if (ac->sched) {
939 ac->sched = false;
940 list_del(&ac->list);
941 }
942
943 spin_unlock_bh(&txq->axq_lock);
944 }
945
946 return buffered;
947}
948
949void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
950{
951 struct ath_atx_tid *tid;
952 struct ath_atx_ac *ac;
953 struct ath_txq *txq;
954 int tidno;
955
956 for (tidno = 0, tid = &an->tid[tidno];
957 tidno < WME_NUM_TID; tidno++, tid++) {
958
959 ac = tid->ac;
960 txq = ac->txq;
961
962 spin_lock_bh(&txq->axq_lock);
963 ac->clear_ps_filter = true;
964
965 if (!list_empty(&tid->buf_q) && !tid->paused) {
966 ath_tx_queue_tid(txq, tid);
967 ath_txq_schedule(sc, txq);
968 }
969
970 spin_unlock_bh(&txq->axq_lock);
971 }
972}
973
Sujithe8324352009-01-16 21:38:42 +0530974void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
975{
976 struct ath_atx_tid *txtid;
977 struct ath_node *an;
978
979 an = (struct ath_node *)sta->drv_priv;
980
981 if (sc->sc_flags & SC_OP_TXAGGR) {
982 txtid = ATH_AN_2_TID(an, tid);
983 txtid->baw_size =
984 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
985 txtid->state |= AGGR_ADDBA_COMPLETE;
986 txtid->state &= ~AGGR_ADDBA_PROGRESS;
987 ath_tx_resume_tid(sc, txtid);
988 }
989}
990
Sujithe8324352009-01-16 21:38:42 +0530991/********************/
992/* Queue Management */
993/********************/
994
Sujithe8324352009-01-16 21:38:42 +0530995static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
996 struct ath_txq *txq)
997{
998 struct ath_atx_ac *ac, *ac_tmp;
999 struct ath_atx_tid *tid, *tid_tmp;
1000
1001 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1002 list_del(&ac->list);
1003 ac->sched = false;
1004 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1005 list_del(&tid->list);
1006 tid->sched = false;
1007 ath_tid_drain(sc, txq, tid);
1008 }
1009 }
1010}
1011
1012struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1013{
Sujithcbe61d82009-02-09 13:27:12 +05301014 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001015 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301016 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001017 static const int subtype_txq_to_hwq[] = {
1018 [WME_AC_BE] = ATH_TXQ_AC_BE,
1019 [WME_AC_BK] = ATH_TXQ_AC_BK,
1020 [WME_AC_VI] = ATH_TXQ_AC_VI,
1021 [WME_AC_VO] = ATH_TXQ_AC_VO,
1022 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001023 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301024
1025 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001026 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301027 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1028 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1029 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1030 qi.tqi_physCompBuf = 0;
1031
1032 /*
1033 * Enable interrupts only for EOL and DESC conditions.
1034 * We mark tx descriptors to receive a DESC interrupt
1035 * when a tx queue gets deep; otherwise waiting for the
1036 * EOL to reap descriptors. Note that this is done to
1037 * reduce interrupt load and this only defers reaping
1038 * descriptors, never transmitting frames. Aside from
1039 * reducing interrupts this also permits more concurrency.
1040 * The only potential downside is if the tx queue backs
1041 * up in which case the top half of the kernel may backup
1042 * due to a lack of tx descriptors.
1043 *
1044 * The UAPSD queue is an exception, since we take a desc-
1045 * based intr on the EOSP frames.
1046 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001047 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1048 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1049 TXQ_FLAG_TXERRINT_ENABLE;
1050 } else {
1051 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1052 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1053 else
1054 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1055 TXQ_FLAG_TXDESCINT_ENABLE;
1056 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001057 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1058 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301059 /*
1060 * NB: don't print a message, this happens
1061 * normally on parts with too few tx queues
1062 */
1063 return NULL;
1064 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001065 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001066 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001067 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1068 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301069 return NULL;
1070 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001071 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1072 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301073
Ben Greear60f2d1d2011-01-09 23:11:52 -08001074 txq->axq_qnum = axq_qnum;
1075 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301076 txq->axq_link = NULL;
1077 INIT_LIST_HEAD(&txq->axq_q);
1078 INIT_LIST_HEAD(&txq->axq_acq);
1079 spin_lock_init(&txq->axq_lock);
1080 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001081 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001082 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001083 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001084
1085 txq->txq_headidx = txq->txq_tailidx = 0;
1086 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1087 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1088 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301089 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001090 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301091}
1092
Sujithe8324352009-01-16 21:38:42 +05301093int ath_txq_update(struct ath_softc *sc, int qnum,
1094 struct ath9k_tx_queue_info *qinfo)
1095{
Sujithcbe61d82009-02-09 13:27:12 +05301096 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301097 int error = 0;
1098 struct ath9k_tx_queue_info qi;
1099
1100 if (qnum == sc->beacon.beaconq) {
1101 /*
1102 * XXX: for beacon queue, we just save the parameter.
1103 * It will be picked up by ath_beaconq_config when
1104 * it's necessary.
1105 */
1106 sc->beacon.beacon_qi = *qinfo;
1107 return 0;
1108 }
1109
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001110 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301111
1112 ath9k_hw_get_txq_props(ah, qnum, &qi);
1113 qi.tqi_aifs = qinfo->tqi_aifs;
1114 qi.tqi_cwmin = qinfo->tqi_cwmin;
1115 qi.tqi_cwmax = qinfo->tqi_cwmax;
1116 qi.tqi_burstTime = qinfo->tqi_burstTime;
1117 qi.tqi_readyTime = qinfo->tqi_readyTime;
1118
1119 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001120 ath_err(ath9k_hw_common(sc->sc_ah),
1121 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301122 error = -EIO;
1123 } else {
1124 ath9k_hw_resettxqueue(ah, qnum);
1125 }
1126
1127 return error;
1128}
1129
1130int ath_cabq_update(struct ath_softc *sc)
1131{
1132 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001133 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301134 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301135
1136 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1137 /*
1138 * Ensure the readytime % is within the bounds.
1139 */
Sujith17d79042009-02-09 13:27:03 +05301140 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1141 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1142 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1143 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301144
Steve Brown9814f6b2011-02-07 17:10:39 -07001145 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301146 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301147 ath_txq_update(sc, qnum, &qi);
1148
1149 return 0;
1150}
1151
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001152static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1153{
1154 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1155 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1156}
1157
Sujith043a0402009-01-16 21:38:47 +05301158/*
1159 * Drain a given TX queue (could be Beacon or Data)
1160 *
1161 * This assumes output has been stopped and
1162 * we do not need to block ath_tx_tasklet.
1163 */
1164void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301165{
1166 struct ath_buf *bf, *lastbf;
1167 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001168 struct ath_tx_status ts;
1169
1170 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301171 INIT_LIST_HEAD(&bf_head);
1172
Sujithe8324352009-01-16 21:38:42 +05301173 for (;;) {
1174 spin_lock_bh(&txq->axq_lock);
1175
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001176 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1177 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1178 txq->txq_headidx = txq->txq_tailidx = 0;
1179 spin_unlock_bh(&txq->axq_lock);
1180 break;
1181 } else {
1182 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1183 struct ath_buf, list);
1184 }
1185 } else {
1186 if (list_empty(&txq->axq_q)) {
1187 txq->axq_link = NULL;
1188 spin_unlock_bh(&txq->axq_lock);
1189 break;
1190 }
1191 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1192 list);
Sujithe8324352009-01-16 21:38:42 +05301193
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001194 if (bf->bf_stale) {
1195 list_del(&bf->list);
1196 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301197
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001198 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001199 continue;
1200 }
Sujithe8324352009-01-16 21:38:42 +05301201 }
1202
1203 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301204
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001205 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1206 list_cut_position(&bf_head,
1207 &txq->txq_fifo[txq->txq_tailidx],
1208 &lastbf->list);
1209 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1210 } else {
1211 /* remove ath_buf's of the same mpdu from txq */
1212 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1213 }
1214
Sujithe8324352009-01-16 21:38:42 +05301215 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001216 if (bf_is_ampdu_not_probing(bf))
1217 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301218 spin_unlock_bh(&txq->axq_lock);
1219
1220 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001221 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1222 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301223 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001224 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301225 }
1226
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001227 spin_lock_bh(&txq->axq_lock);
1228 txq->axq_tx_inprogress = false;
1229 spin_unlock_bh(&txq->axq_lock);
1230
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001231 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1232 spin_lock_bh(&txq->axq_lock);
1233 while (!list_empty(&txq->txq_fifo_pending)) {
1234 bf = list_first_entry(&txq->txq_fifo_pending,
1235 struct ath_buf, list);
1236 list_cut_position(&bf_head,
1237 &txq->txq_fifo_pending,
1238 &bf->bf_lastbf->list);
1239 spin_unlock_bh(&txq->axq_lock);
1240
1241 if (bf_isampdu(bf))
1242 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001243 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001244 else
1245 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1246 &ts, 0, 0);
1247 spin_lock_bh(&txq->axq_lock);
1248 }
1249 spin_unlock_bh(&txq->axq_lock);
1250 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001251
1252 /* flush any pending frames if aggregation is enabled */
1253 if (sc->sc_flags & SC_OP_TXAGGR) {
1254 if (!retry_tx) {
1255 spin_lock_bh(&txq->axq_lock);
1256 ath_txq_drain_pending_buffers(sc, txq);
1257 spin_unlock_bh(&txq->axq_lock);
1258 }
1259 }
Sujithe8324352009-01-16 21:38:42 +05301260}
1261
Felix Fietkau080e1a22010-12-05 20:17:53 +01001262bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301263{
Sujithcbe61d82009-02-09 13:27:12 +05301264 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001265 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301266 struct ath_txq *txq;
1267 int i, npend = 0;
1268
1269 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001270 return true;
Sujith043a0402009-01-16 21:38:47 +05301271
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001272 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301273
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001274 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301275 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001276 if (!ATH_TXQ_SETUP(sc, i))
1277 continue;
1278
1279 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301280 }
1281
Felix Fietkau080e1a22010-12-05 20:17:53 +01001282 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001283 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301284
1285 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001286 if (!ATH_TXQ_SETUP(sc, i))
1287 continue;
1288
1289 /*
1290 * The caller will resume queues with ieee80211_wake_queues.
1291 * Mark the queue as not stopped to prevent ath_tx_complete
1292 * from waking the queue too early.
1293 */
1294 txq = &sc->tx.txq[i];
1295 txq->stopped = false;
1296 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301297 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001298
1299 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301300}
1301
Sujithe8324352009-01-16 21:38:42 +05301302void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1303{
1304 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1305 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1306}
1307
Ben Greear7755bad2011-01-18 17:30:00 -08001308/* For each axq_acq entry, for each tid, try to schedule packets
1309 * for transmit until ampdu_depth has reached min Q depth.
1310 */
Sujithe8324352009-01-16 21:38:42 +05301311void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1312{
Ben Greear7755bad2011-01-18 17:30:00 -08001313 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1314 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301315
Felix Fietkau21f28e62011-01-15 14:30:14 +01001316 if (list_empty(&txq->axq_acq) ||
1317 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301318 return;
1319
1320 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001321 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301322
Ben Greear7755bad2011-01-18 17:30:00 -08001323 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1324 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1325 list_del(&ac->list);
1326 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301327
Ben Greear7755bad2011-01-18 17:30:00 -08001328 while (!list_empty(&ac->tid_q)) {
1329 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1330 list);
1331 list_del(&tid->list);
1332 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301333
Ben Greear7755bad2011-01-18 17:30:00 -08001334 if (tid->paused)
1335 continue;
Sujithe8324352009-01-16 21:38:42 +05301336
Ben Greear7755bad2011-01-18 17:30:00 -08001337 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301338
Ben Greear7755bad2011-01-18 17:30:00 -08001339 /*
1340 * add tid to round-robin queue if more frames
1341 * are pending for the tid
1342 */
1343 if (!list_empty(&tid->buf_q))
1344 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301345
Ben Greear7755bad2011-01-18 17:30:00 -08001346 if (tid == last_tid ||
1347 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1348 break;
Sujithe8324352009-01-16 21:38:42 +05301349 }
Ben Greear7755bad2011-01-18 17:30:00 -08001350
1351 if (!list_empty(&ac->tid_q)) {
1352 if (!ac->sched) {
1353 ac->sched = true;
1354 list_add_tail(&ac->list, &txq->axq_acq);
1355 }
1356 }
1357
1358 if (ac == last_ac ||
1359 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1360 return;
Sujithe8324352009-01-16 21:38:42 +05301361 }
1362}
1363
Sujithe8324352009-01-16 21:38:42 +05301364/***********/
1365/* TX, DMA */
1366/***********/
1367
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001368/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001369 * Insert a chain of ath_buf (descriptors) on a txq and
1370 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001371 */
Sujith102e0572008-10-29 10:15:16 +05301372static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1373 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001374{
Sujithcbe61d82009-02-09 13:27:12 +05301375 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001376 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001377 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301378
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001379 /*
1380 * Insert the frame on the outbound list and
1381 * pass it on to the hardware.
1382 */
1383
1384 if (list_empty(head))
1385 return;
1386
1387 bf = list_first_entry(head, struct ath_buf, list);
1388
Joe Perches226afe62010-12-02 19:12:37 -08001389 ath_dbg(common, ATH_DBG_QUEUE,
1390 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001391
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001392 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1393 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1394 list_splice_tail_init(head, &txq->txq_fifo_pending);
1395 return;
1396 }
1397 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001398 ath_dbg(common, ATH_DBG_XMIT,
1399 "Initializing tx fifo %d which is non-empty\n",
1400 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001401 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1402 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1403 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001404 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001405 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001406 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1407 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001408 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001409 list_splice_tail_init(head, &txq->axq_q);
1410
1411 if (txq->axq_link == NULL) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001412 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001413 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001414 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1415 txq->axq_qnum, ito64(bf->bf_daddr),
1416 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001417 } else {
1418 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001419 ath_dbg(common, ATH_DBG_XMIT,
1420 "link[%u] (%p)=%llx (%p)\n",
1421 txq->axq_qnum, txq->axq_link,
1422 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001423 }
1424 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1425 &txq->axq_link);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001426 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001427 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001428 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001429 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001430 if (bf_is_ampdu_not_probing(bf))
1431 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001432}
1433
Sujithe8324352009-01-16 21:38:42 +05301434static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001435 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301436{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001437 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001438 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301439
Sujithe8324352009-01-16 21:38:42 +05301440 bf->bf_state.bf_type |= BUF_AMPDU;
1441
1442 /*
1443 * Do not queue to h/w when any of the following conditions is true:
1444 * - there are pending frames in software queue
1445 * - the TID is currently paused for ADDBA/BAR request
1446 * - seqno is not within block-ack window
1447 * - h/w queue depth exceeds low water mark
1448 */
1449 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001450 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001451 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001452 /*
Sujithe8324352009-01-16 21:38:42 +05301453 * Add this frame to software queue for scheduling later
1454 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001455 */
Ben Greearbda8add2011-01-09 23:11:48 -08001456 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001457 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301458 ath_tx_queue_tid(txctl->txq, tid);
1459 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001460 }
1461
Felix Fietkau04caf862010-11-14 15:20:12 +01001462 INIT_LIST_HEAD(&bf_head);
1463 list_add(&bf->list, &bf_head);
1464
Sujithe8324352009-01-16 21:38:42 +05301465 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001466 if (!fi->retries)
1467 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301468
1469 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001470 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301471 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001472 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001473 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301474}
1475
Felix Fietkau82b873a2010-11-11 03:18:37 +01001476static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1477 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001478 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001479{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001480 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301481 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001482
Sujithe8324352009-01-16 21:38:42 +05301483 bf = list_first_entry(bf_head, struct ath_buf, list);
1484 bf->bf_state.bf_type &= ~BUF_AMPDU;
1485
1486 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001487 if (tid)
1488 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301489
Sujithd43f30152009-01-16 21:38:53 +05301490 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001491 fi = get_frame_info(bf->bf_mpdu);
1492 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301493 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301494 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001495}
1496
Sujith528f0c62008-10-29 10:14:26 +05301497static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001498{
Sujith528f0c62008-10-29 10:14:26 +05301499 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001500 enum ath9k_pkt_type htype;
1501 __le16 fc;
1502
Sujith528f0c62008-10-29 10:14:26 +05301503 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001504 fc = hdr->frame_control;
1505
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001506 if (ieee80211_is_beacon(fc))
1507 htype = ATH9K_PKT_TYPE_BEACON;
1508 else if (ieee80211_is_probe_resp(fc))
1509 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1510 else if (ieee80211_is_atim(fc))
1511 htype = ATH9K_PKT_TYPE_ATIM;
1512 else if (ieee80211_is_pspoll(fc))
1513 htype = ATH9K_PKT_TYPE_PSPOLL;
1514 else
1515 htype = ATH9K_PKT_TYPE_NORMAL;
1516
1517 return htype;
1518}
1519
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001520static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1521 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301522{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001523 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301524 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001525 struct ieee80211_sta *sta = tx_info->control.sta;
1526 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301527 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001528 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301529 struct ath_node *an;
1530 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001531 enum ath9k_key_type keytype;
1532 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001533 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301534
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001535 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301536
Sujith528f0c62008-10-29 10:14:26 +05301537 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001538 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1539 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001540
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001541 an = (struct ath_node *) sta->drv_priv;
1542 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1543
1544 /*
1545 * Override seqno set by upper layer with the one
1546 * in tx aggregation state.
1547 */
1548 tid = ATH_AN_2_TID(an, tidno);
1549 seqno = tid->seq_next;
1550 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1551 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1552 }
1553
1554 memset(fi, 0, sizeof(*fi));
1555 if (hw_key)
1556 fi->keyix = hw_key->hw_key_idx;
1557 else
1558 fi->keyix = ATH9K_TXKEYIX_INVALID;
1559 fi->keytype = keytype;
1560 fi->framelen = framelen;
1561 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301562}
1563
Felix Fietkau82b873a2010-11-11 03:18:37 +01001564static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301565{
1566 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1567 int flags = 0;
1568
Sujith528f0c62008-10-29 10:14:26 +05301569 flags |= ATH9K_TXDESC_INTREQ;
1570
1571 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1572 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301573
Felix Fietkau82b873a2010-11-11 03:18:37 +01001574 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001575 flags |= ATH9K_TXDESC_LDPC;
1576
Sujith528f0c62008-10-29 10:14:26 +05301577 return flags;
1578}
1579
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001581 * rix - rate index
1582 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1583 * width - 0 for 20 MHz, 1 for 40 MHz
1584 * half_gi - to use 4us v/s 3.6 us for symbol time
1585 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001586static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301587 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001588{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001589 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001590 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301591
1592 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001593 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001594 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001595 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001596 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1597
1598 if (!half_gi)
1599 duration = SYMBOL_TIME(nsymbols);
1600 else
1601 duration = SYMBOL_TIME_HALFGI(nsymbols);
1602
Sujithe63835b2008-11-18 09:07:53 +05301603 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001604 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301605
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001606 return duration;
1607}
1608
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301609u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1610{
1611 struct ath_hw *ah = sc->sc_ah;
1612 struct ath9k_channel *curchan = ah->curchan;
1613 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1614 (curchan->channelFlags & CHANNEL_5GHZ) &&
1615 (chainmask == 0x7) && (rate < 0x90))
1616 return 0x3;
1617 else
1618 return chainmask;
1619}
1620
Felix Fietkau269c44b2010-11-14 15:20:06 +01001621static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001622{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001623 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001624 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301625 struct sk_buff *skb;
1626 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301627 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001628 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301629 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301630 int i, flags = 0;
1631 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301632 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301633
1634 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301635
Sujitha22be222009-03-30 15:28:36 +05301636 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301637 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301638 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301639 hdr = (struct ieee80211_hdr *)skb->data;
1640 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301641
Sujithc89424d2009-01-30 14:29:28 +05301642 /*
1643 * We check if Short Preamble is needed for the CTS rate by
1644 * checking the BSS's global flag.
1645 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1646 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001647 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1648 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301649 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001650 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001651
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001652 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001653 bool is_40, is_sgi, is_sp;
1654 int phy;
1655
Sujithe63835b2008-11-18 09:07:53 +05301656 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001657 continue;
1658
Sujitha8efee42008-11-18 09:07:30 +05301659 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301660 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001661
Felix Fietkau27032052010-01-17 21:08:50 +01001662 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1663 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301664 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001665 flags |= ATH9K_TXDESC_RTSENA;
1666 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1667 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1668 flags |= ATH9K_TXDESC_CTSENA;
1669 }
1670
Sujithc89424d2009-01-30 14:29:28 +05301671 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1672 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1673 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1674 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001675
Felix Fietkau545750d2009-11-23 22:21:01 +01001676 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1677 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1678 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1679
1680 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1681 /* MCS rates */
1682 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301683 series[i].ChSel = ath_txchainmask_reduction(sc,
1684 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001685 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001686 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001687 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1688 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001689 continue;
1690 }
1691
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301692 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001693 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1694 !(rate->flags & IEEE80211_RATE_ERP_G))
1695 phy = WLAN_RC_PHY_CCK;
1696 else
1697 phy = WLAN_RC_PHY_OFDM;
1698
1699 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1700 series[i].Rate = rate->hw_value;
1701 if (rate->hw_value_short) {
1702 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1703 series[i].Rate |= rate->hw_value_short;
1704 } else {
1705 is_sp = false;
1706 }
1707
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301708 if (bf->bf_state.bfs_paprd)
1709 series[i].ChSel = common->tx_chainmask;
1710 else
1711 series[i].ChSel = ath_txchainmask_reduction(sc,
1712 common->tx_chainmask, series[i].Rate);
1713
Felix Fietkau545750d2009-11-23 22:21:01 +01001714 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001715 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001716 }
1717
Felix Fietkau27032052010-01-17 21:08:50 +01001718 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001719 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001720 flags &= ~ATH9K_TXDESC_RTSENA;
1721
1722 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1723 if (flags & ATH9K_TXDESC_RTSENA)
1724 flags &= ~ATH9K_TXDESC_CTSENA;
1725
Sujithe63835b2008-11-18 09:07:53 +05301726 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301727 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1728 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301729 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301730 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301731
Sujith17d79042009-02-09 13:27:03 +05301732 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301733 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001734}
1735
Felix Fietkau82b873a2010-11-11 03:18:37 +01001736static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001737 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001738 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301739{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001740 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001741 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001742 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001743 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001744 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001745 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001746 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001747
1748 bf = ath_tx_get_buffer(sc);
1749 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001750 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001751 return NULL;
1752 }
Sujithe8324352009-01-16 21:38:42 +05301753
Sujithe8324352009-01-16 21:38:42 +05301754 ATH_TXBUF_RESET(bf);
1755
Felix Fietkau82b873a2010-11-11 03:18:37 +01001756 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301757 bf->bf_mpdu = skb;
1758
Ben Greearc1739eb32010-10-14 12:45:29 -07001759 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1760 skb->len, DMA_TO_DEVICE);
1761 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301762 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001763 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001764 ath_err(ath9k_hw_common(sc->sc_ah),
1765 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001766 ath_tx_return_buffer(sc, bf);
1767 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301768 }
1769
Sujithe8324352009-01-16 21:38:42 +05301770 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301771
1772 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001773 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301774
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001775 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1776 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301777
1778 ath9k_hw_filltxdesc(ah, ds,
1779 skb->len, /* segment length */
1780 true, /* first segment */
1781 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001782 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001783 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001784 txq->axq_qnum);
1785
1786
1787 return bf;
1788}
1789
1790/* FIXME: tx power */
1791static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1792 struct ath_tx_control *txctl)
1793{
1794 struct sk_buff *skb = bf->bf_mpdu;
1795 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1796 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001797 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001798 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001799 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301800
Sujithe8324352009-01-16 21:38:42 +05301801 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301802 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1803 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001804 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1805 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001806 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001807
Felix Fietkau066dae92010-11-07 14:59:39 +01001808 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001809 }
1810
1811 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001812 /*
1813 * Try aggregation if it's a unicast data frame
1814 * and the destination is HT capable.
1815 */
1816 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301817 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001818 INIT_LIST_HEAD(&bf_head);
1819 list_add_tail(&bf->list, &bf_head);
1820
Felix Fietkau61117f02010-11-11 03:18:36 +01001821 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001822 bf->bf_state.bfs_paprd = txctl->paprd;
1823
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001824 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001825 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1826 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001827
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301828 if (txctl->paprd)
1829 bf->bf_state.bfs_paprd_timestamp = jiffies;
1830
Felix Fietkau55195412011-04-17 23:28:09 +02001831 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1832 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1833
Felix Fietkau248a38d2010-12-10 21:16:46 +01001834 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301835 }
1836
1837 spin_unlock_bh(&txctl->txq->axq_lock);
1838}
1839
1840/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001841int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301842 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001843{
Felix Fietkau28d16702010-11-14 15:20:10 +01001844 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1845 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001846 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001847 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001848 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001849 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001850 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001851 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001852 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001853
Ben Greeara9927ba2010-12-06 21:13:49 -08001854 /* NOTE: sta can be NULL according to net/mac80211.h */
1855 if (sta)
1856 txctl->an = (struct ath_node *)sta->drv_priv;
1857
Felix Fietkau04caf862010-11-14 15:20:12 +01001858 if (info->control.hw_key)
1859 frmlen += info->control.hw_key->icv_len;
1860
Felix Fietkau28d16702010-11-14 15:20:10 +01001861 /*
1862 * As a temporary workaround, assign seq# here; this will likely need
1863 * to be cleaned up to work better with Beacon transmission and virtual
1864 * BSSes.
1865 */
1866 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1867 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1868 sc->tx.seq_no += 0x10;
1869 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1870 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1871 }
1872
1873 /* Add the padding after the header if this is not already done */
1874 padpos = ath9k_cmn_padpos(hdr->frame_control);
1875 padsize = padpos & 3;
1876 if (padsize && skb->len > padpos) {
1877 if (skb_headroom(skb) < padsize)
1878 return -ENOMEM;
1879
1880 skb_push(skb, padsize);
1881 memmove(skb->data, skb->data + padsize, padpos);
1882 }
1883
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001884 setup_frame_info(hw, skb, frmlen);
1885
1886 /*
1887 * At this point, the vif, hw_key and sta pointers in the tx control
1888 * info are no longer valid (overwritten by the ath_frame_info data.
1889 */
1890
1891 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001892 if (unlikely(!bf))
1893 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001894
Felix Fietkau066dae92010-11-07 14:59:39 +01001895 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001896 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001897 if (txq == sc->tx.txq_map[q] &&
1898 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001899 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001900 txq->stopped = 1;
1901 }
1902 spin_unlock_bh(&txq->axq_lock);
1903
Sujithe8324352009-01-16 21:38:42 +05301904 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001905
1906 return 0;
1907}
1908
Sujithe8324352009-01-16 21:38:42 +05301909/*****************/
1910/* TX Completion */
1911/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001912
Sujithe8324352009-01-16 21:38:42 +05301913static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001914 int tx_flags, int ftype, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001915{
Sujithe8324352009-01-16 21:38:42 +05301916 struct ieee80211_hw *hw = sc->hw;
1917 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001918 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001919 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001920 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301921
Joe Perches226afe62010-12-02 19:12:37 -08001922 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301923
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301924 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301925 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301926
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301927 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301928 /* Frame was ACKed */
1929 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1930 }
1931
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001932 padpos = ath9k_cmn_padpos(hdr->frame_control);
1933 padsize = padpos & 3;
1934 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301935 /*
1936 * Remove MAC header padding before giving the frame back to
1937 * mac80211.
1938 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001939 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301940 skb_pull(skb, padsize);
1941 }
1942
Sujith1b04b932010-01-08 10:36:05 +05301943 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1944 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001945 ath_dbg(common, ATH_DBG_PS,
1946 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301947 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1948 PS_WAIT_FOR_CAB |
1949 PS_WAIT_FOR_PSPOLL_DATA |
1950 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001951 }
1952
Felix Fietkau7545daf2011-01-24 19:23:16 +01001953 q = skb_get_queue_mapping(skb);
1954 if (txq == sc->tx.txq_map[q]) {
1955 spin_lock_bh(&txq->axq_lock);
1956 if (WARN_ON(--txq->pending_frames < 0))
1957 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001958
Felix Fietkau7545daf2011-01-24 19:23:16 +01001959 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1960 ieee80211_wake_queue(sc->hw, q);
1961 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001962 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001963 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001964 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001965
1966 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301967}
1968
1969static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001970 struct ath_txq *txq, struct list_head *bf_q,
1971 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301972{
1973 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301974 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301975 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301976
Sujithe8324352009-01-16 21:38:42 +05301977 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301978 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301979
1980 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301981 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301982
1983 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301984 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301985 }
1986
Ben Greearc1739eb32010-10-14 12:45:29 -07001987 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001988 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001989
1990 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301991 if (time_after(jiffies,
1992 bf->bf_state.bfs_paprd_timestamp +
1993 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001994 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001995 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001996 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001997 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01001998 ath_debug_stat_tx(sc, bf, ts, txq);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001999 ath_tx_complete(sc, skb, tx_flags,
Felix Fietkau61117f02010-11-11 03:18:36 +01002000 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002001 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002002 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2003 * accidentally reference it later.
2004 */
2005 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302006
2007 /*
2008 * Return the list of ath_buf of this mpdu to free queue
2009 */
2010 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2011 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2012 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2013}
2014
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002015static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2016 struct ath_tx_status *ts, int nframes, int nbad,
2017 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302018{
Sujitha22be222009-03-30 15:28:36 +05302019 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302020 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302021 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002022 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002023 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302024 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302025
Sujith95e4acb2009-03-13 08:56:09 +05302026 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002027 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302028
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002029 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302030 WARN_ON(tx_rateindex >= hw->max_rates);
2031
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002032 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302033 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002034 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002035 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302036
Felix Fietkaub572d032010-11-14 15:20:07 +01002037 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002038
Felix Fietkaub572d032010-11-14 15:20:07 +01002039 tx_info->status.ampdu_len = nframes;
2040 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002041 }
2042
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002043 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302044 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002045 /*
2046 * If an underrun error is seen assume it as an excessive
2047 * retry only if max frame trigger level has been reached
2048 * (2 KB for single stream, and 4 KB for dual stream).
2049 * Adjust the long retry as if the frame was tried
2050 * hw->max_rate_tries times to affect how rate control updates
2051 * PER for the failed rate.
2052 * In case of congestion on the bus penalizing this type of
2053 * underruns should help hardware actually transmit new frames
2054 * successfully by eventually preferring slower rates.
2055 * This itself should also alleviate congestion on the bus.
2056 */
2057 if (ieee80211_is_data(hdr->frame_control) &&
2058 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2059 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002060 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002061 tx_info->status.rates[tx_rateindex].count =
2062 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302063 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302064
Felix Fietkau545750d2009-11-23 22:21:01 +01002065 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302066 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002067 tx_info->status.rates[i].idx = -1;
2068 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302069
Felix Fietkau78c46532010-06-25 01:26:16 +02002070 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302071}
2072
Sujithc4288392008-11-18 09:09:30 +05302073static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002074{
Sujithcbe61d82009-02-09 13:27:12 +05302075 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002076 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002077 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2078 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302079 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002080 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302081 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002082 int status;
2083
Joe Perches226afe62010-12-02 19:12:37 -08002084 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2085 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2086 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002087
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002088 for (;;) {
2089 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002090 if (list_empty(&txq->axq_q)) {
2091 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002092 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002093 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002094 spin_unlock_bh(&txq->axq_lock);
2095 break;
2096 }
2097 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2098
2099 /*
2100 * There is a race condition that a BH gets scheduled
2101 * after sw writes TxE and before hw re-load the last
2102 * descriptor to get the newly chained one.
2103 * Software must keep the last DONE descriptor as a
2104 * holding descriptor - software does so by marking
2105 * it with the STALE flag.
2106 */
2107 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302108 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109 bf_held = bf;
2110 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302111 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112 break;
2113 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002114 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302115 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 }
2117 }
2118
2119 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302120 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002121
Felix Fietkau29bffa92010-03-29 20:14:23 -07002122 memset(&ts, 0, sizeof(ts));
2123 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 if (status == -EINPROGRESS) {
2125 spin_unlock_bh(&txq->axq_lock);
2126 break;
2127 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002128 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002129
2130 /*
2131 * Remove ath_buf's of the same transmit unit from txq,
2132 * however leave the last descriptor back as the holding
2133 * descriptor for hw.
2134 */
Sujitha119cc42009-03-30 15:28:38 +05302135 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002136 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002137 if (!list_is_singular(&lastbf->list))
2138 list_cut_position(&bf_head,
2139 &txq->axq_q, lastbf->list.prev);
2140
2141 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002142 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002143 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002144 if (bf_held)
2145 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002146
2147 if (bf_is_ampdu_not_probing(bf))
2148 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002149
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002150 spin_unlock_bh(&txq->axq_lock);
2151
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002152 if (bf_held)
2153 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002154
Sujithcd3d39a2008-08-11 14:03:34 +05302155 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002156 /*
2157 * This frame is sent out as a single frame.
2158 * Use hardware retry status for this frame.
2159 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002160 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302161 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002162 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163 }
Johannes Berge6a98542008-10-21 12:40:02 +02002164
Sujithcd3d39a2008-08-11 14:03:34 +05302165 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002166 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2167 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002168 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002172
Felix Fietkau86271e42011-03-11 21:38:19 +01002173 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174 ath_txq_schedule(sc, txq);
2175 spin_unlock_bh(&txq->axq_lock);
2176 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177}
2178
Vivek Natarajan181fb182011-01-27 14:45:08 +05302179static void ath_hw_pll_work(struct work_struct *work)
2180{
2181 struct ath_softc *sc = container_of(work, struct ath_softc,
2182 hw_pll_work.work);
2183 static int count;
2184
2185 if (AR_SREV_9485(sc->sc_ah)) {
2186 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2187 count++;
2188
2189 if (count == 3) {
2190 /* Rx is hung for more than 500ms. Reset it */
2191 ath_reset(sc, true);
2192 count = 0;
2193 }
2194 } else
2195 count = 0;
2196
2197 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2198 }
2199}
2200
Sujith305fe472009-07-23 15:32:29 +05302201static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002202{
2203 struct ath_softc *sc = container_of(work, struct ath_softc,
2204 tx_complete_work.work);
2205 struct ath_txq *txq;
2206 int i;
2207 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002208#ifdef CONFIG_ATH9K_DEBUGFS
2209 sc->tx_complete_poll_work_seen++;
2210#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002211
2212 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2213 if (ATH_TXQ_SETUP(sc, i)) {
2214 txq = &sc->tx.txq[i];
2215 spin_lock_bh(&txq->axq_lock);
2216 if (txq->axq_depth) {
2217 if (txq->axq_tx_inprogress) {
2218 needreset = true;
2219 spin_unlock_bh(&txq->axq_lock);
2220 break;
2221 } else {
2222 txq->axq_tx_inprogress = true;
2223 }
2224 }
2225 spin_unlock_bh(&txq->axq_lock);
2226 }
2227
2228 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002229 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2230 "tx hung, resetting the chip\n");
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002231 ath_reset(sc, true);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002232 }
2233
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002234 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002235 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2236}
2237
2238
Sujithe8324352009-01-16 21:38:42 +05302239
2240void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002241{
Sujithe8324352009-01-16 21:38:42 +05302242 int i;
2243 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002244
Sujithe8324352009-01-16 21:38:42 +05302245 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002246
2247 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302248 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2249 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002250 }
2251}
2252
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002253void ath_tx_edma_tasklet(struct ath_softc *sc)
2254{
2255 struct ath_tx_status txs;
2256 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2257 struct ath_hw *ah = sc->sc_ah;
2258 struct ath_txq *txq;
2259 struct ath_buf *bf, *lastbf;
2260 struct list_head bf_head;
2261 int status;
2262 int txok;
2263
2264 for (;;) {
2265 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2266 if (status == -EINPROGRESS)
2267 break;
2268 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002269 ath_dbg(common, ATH_DBG_XMIT,
2270 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002271 break;
2272 }
2273
2274 /* Skip beacon completions */
2275 if (txs.qid == sc->beacon.beaconq)
2276 continue;
2277
2278 txq = &sc->tx.txq[txs.qid];
2279
2280 spin_lock_bh(&txq->axq_lock);
2281 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2282 spin_unlock_bh(&txq->axq_lock);
2283 return;
2284 }
2285
2286 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2287 struct ath_buf, list);
2288 lastbf = bf->bf_lastbf;
2289
2290 INIT_LIST_HEAD(&bf_head);
2291 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2292 &lastbf->list);
2293 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2294 txq->axq_depth--;
2295 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002296 if (bf_is_ampdu_not_probing(bf))
2297 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002298 spin_unlock_bh(&txq->axq_lock);
2299
2300 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2301
2302 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002303 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2304 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002305 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002306 }
2307
2308 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002309 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2310 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002311 else
2312 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2313 &txs, txok, 0);
2314
2315 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002316
Felix Fietkau86271e42011-03-11 21:38:19 +01002317 if (!list_empty(&txq->txq_fifo_pending)) {
2318 INIT_LIST_HEAD(&bf_head);
2319 bf = list_first_entry(&txq->txq_fifo_pending,
2320 struct ath_buf, list);
2321 list_cut_position(&bf_head,
2322 &txq->txq_fifo_pending,
2323 &bf->bf_lastbf->list);
2324 ath_tx_txqaddbuf(sc, txq, &bf_head);
2325 } else if (sc->sc_flags & SC_OP_TXAGGR)
2326 ath_txq_schedule(sc, txq);
2327
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002328 spin_unlock_bh(&txq->axq_lock);
2329 }
2330}
2331
Sujithe8324352009-01-16 21:38:42 +05302332/*****************/
2333/* Init, Cleanup */
2334/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002335
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002336static int ath_txstatus_setup(struct ath_softc *sc, int size)
2337{
2338 struct ath_descdma *dd = &sc->txsdma;
2339 u8 txs_len = sc->sc_ah->caps.txs_len;
2340
2341 dd->dd_desc_len = size * txs_len;
2342 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2343 &dd->dd_desc_paddr, GFP_KERNEL);
2344 if (!dd->dd_desc)
2345 return -ENOMEM;
2346
2347 return 0;
2348}
2349
2350static int ath_tx_edma_init(struct ath_softc *sc)
2351{
2352 int err;
2353
2354 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2355 if (!err)
2356 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2357 sc->txsdma.dd_desc_paddr,
2358 ATH_TXSTATUS_RING_SIZE);
2359
2360 return err;
2361}
2362
2363static void ath_tx_edma_cleanup(struct ath_softc *sc)
2364{
2365 struct ath_descdma *dd = &sc->txsdma;
2366
2367 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2368 dd->dd_desc_paddr);
2369}
2370
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002371int ath_tx_init(struct ath_softc *sc, int nbufs)
2372{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002373 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374 int error = 0;
2375
Sujith797fe5cb2009-03-30 15:28:45 +05302376 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002377
Sujith797fe5cb2009-03-30 15:28:45 +05302378 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002379 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302380 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002381 ath_err(common,
2382 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302383 goto err;
2384 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385
Sujith797fe5cb2009-03-30 15:28:45 +05302386 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002387 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302388 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002389 ath_err(common,
2390 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302391 goto err;
2392 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002394 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
Vivek Natarajan181fb182011-01-27 14:45:08 +05302395 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002396
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002397 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2398 error = ath_tx_edma_init(sc);
2399 if (error)
2400 goto err;
2401 }
2402
Sujith797fe5cb2009-03-30 15:28:45 +05302403err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002404 if (error != 0)
2405 ath_tx_cleanup(sc);
2406
2407 return error;
2408}
2409
Sujith797fe5cb2009-03-30 15:28:45 +05302410void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002411{
Sujithb77f4832008-12-07 21:44:03 +05302412 if (sc->beacon.bdma.dd_desc_len != 0)
2413 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002414
Sujithb77f4832008-12-07 21:44:03 +05302415 if (sc->tx.txdma.dd_desc_len != 0)
2416 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002417
2418 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2419 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420}
2421
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002422void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2423{
Sujithc5170162008-10-29 10:13:59 +05302424 struct ath_atx_tid *tid;
2425 struct ath_atx_ac *ac;
2426 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002427
Sujith8ee5afb2008-12-07 21:43:36 +05302428 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302429 tidno < WME_NUM_TID;
2430 tidno++, tid++) {
2431 tid->an = an;
2432 tid->tidno = tidno;
2433 tid->seq_start = tid->seq_next = 0;
2434 tid->baw_size = WME_MAX_BA;
2435 tid->baw_head = tid->baw_tail = 0;
2436 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302437 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302438 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302439 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302440 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302441 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302442 tid->state &= ~AGGR_ADDBA_COMPLETE;
2443 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302444 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002445
Sujith8ee5afb2008-12-07 21:43:36 +05302446 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302447 acno < WME_NUM_AC; acno++, ac++) {
2448 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002449 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302450 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002451 }
2452}
2453
Sujithb5aa9bf2008-10-29 10:13:31 +05302454void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002455{
Felix Fietkau2b409942010-07-07 19:42:08 +02002456 struct ath_atx_ac *ac;
2457 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002458 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002459 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302460
Felix Fietkau2b409942010-07-07 19:42:08 +02002461 for (tidno = 0, tid = &an->tid[tidno];
2462 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002463
Felix Fietkau2b409942010-07-07 19:42:08 +02002464 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002465 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002466
Felix Fietkau2b409942010-07-07 19:42:08 +02002467 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002468
Felix Fietkau2b409942010-07-07 19:42:08 +02002469 if (tid->sched) {
2470 list_del(&tid->list);
2471 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002472 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002473
2474 if (ac->sched) {
2475 list_del(&ac->list);
2476 tid->ac->sched = false;
2477 }
2478
2479 ath_tid_drain(sc, txq, tid);
2480 tid->state &= ~AGGR_ADDBA_COMPLETE;
2481 tid->state &= ~AGGR_CLEANUP;
2482
2483 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002484 }
2485}