blob: 68a1c7612e9b5e841ca84a911874e10e312088ef [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070022#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070034
Felix Fietkauc6663872010-04-19 19:57:33 +020035static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070036 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070045};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
Felix Fietkau82b873a2010-11-11 03:18:37 +010049static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010051 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053052static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070053 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053055static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
56 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010057static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010058static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020061static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
Sujithe8324352009-01-16 21:38:42 +053063
Felix Fietkau545750d2009-11-23 22:21:01 +010064enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020065 MCS_HT20,
66 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010067 MCS_HT40,
68 MCS_HT40_SGI,
69};
70
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071static int ath_max_4ms_framelen[4][32] = {
72 [MCS_HT20] = {
73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
77 },
78 [MCS_HT20_SGI] = {
79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010083 },
84 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020085 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 }
96};
97
Sujithe8324352009-01-16 21:38:42 +053098/*********************/
99/* Aggregation logic */
100/*********************/
101
Sujithe8324352009-01-16 21:38:42 +0530102static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
103{
104 struct ath_atx_ac *ac = tid->ac;
105
106 if (tid->paused)
107 return;
108
109 if (tid->sched)
110 return;
111
112 tid->sched = true;
113 list_add_tail(&tid->list, &ac->tid_q);
114
115 if (ac->sched)
116 return;
117
118 ac->sched = true;
119 list_add_tail(&ac->list, &txq->axq_acq);
120}
121
Sujithe8324352009-01-16 21:38:42 +0530122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
123{
Felix Fietkau066dae92010-11-07 14:59:39 +0100124 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530125
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200126 WARN_ON(!tid->paused);
127
Sujithe8324352009-01-16 21:38:42 +0530128 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530130
131 if (list_empty(&tid->buf_q))
132 goto unlock;
133
134 ath_tx_queue_tid(txq, tid);
135 ath_txq_schedule(sc, txq);
136unlock:
137 spin_unlock_bh(&txq->axq_lock);
138}
139
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100146}
147
Sujithe8324352009-01-16 21:38:42 +0530148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
149{
Felix Fietkau066dae92010-11-07 14:59:39 +0100150 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530151 struct ath_buf *bf;
152 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200153 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100154 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200155
Sujithe8324352009-01-16 21:38:42 +0530156 INIT_LIST_HEAD(&bf_head);
157
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530159 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530160
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530163 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164
Felix Fietkaue1566d12010-11-20 03:08:46 +0100165 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
170 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200172 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100173 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530174 }
175
176 spin_unlock_bh(&txq->axq_lock);
177}
178
179static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 int seqno)
181{
182 int index, cindex;
183
184 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
186
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200187 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530188
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
192 }
193}
194
195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100196 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530197{
198 int index, cindex;
199
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100200 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200202 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530203
204 if (index >= ((tid->baw_tail - tid->baw_head) &
205 (ATH_TID_MAX_BUFS - 1))) {
206 tid->baw_tail = cindex;
207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
208 }
209}
210
211/*
212 * TODO: For frame(s) that are in the retry state, we will reuse the
213 * sequence number(s) without setting the retry bit. The
214 * alternative is to give up on these and BAR the receiver's window
215 * forward.
216 */
217static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
218 struct ath_atx_tid *tid)
219
220{
221 struct ath_buf *bf;
222 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700223 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100224 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700225
226 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530227 INIT_LIST_HEAD(&bf_head);
228
229 for (;;) {
230 if (list_empty(&tid->buf_q))
231 break;
Sujithe8324352009-01-16 21:38:42 +0530232
Sujithd43f30152009-01-16 21:38:53 +0530233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
234 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530235
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100236 fi = get_frame_info(bf->bf_mpdu);
237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530239
240 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530242 spin_lock(&txq->axq_lock);
243 }
244
245 tid->seq_next = tid->seq_start;
246 tid->baw_tail = tid->baw_head;
247}
248
Sujithfec247c2009-07-27 12:08:16 +0530249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530251{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100252 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530253 struct ieee80211_hdr *hdr;
254
Sujithfec247c2009-07-27 12:08:16 +0530255 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100256 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100257 return;
Sujithe8324352009-01-16 21:38:42 +0530258
Sujithe8324352009-01-16 21:38:42 +0530259 hdr = (struct ieee80211_hdr *)skb->data;
260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
261}
262
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200263static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
264{
265 struct ath_buf *bf = NULL;
266
267 spin_lock_bh(&sc->tx.txbuflock);
268
269 if (unlikely(list_empty(&sc->tx.txbuf))) {
270 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL;
272 }
273
274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
275 list_del(&bf->list);
276
277 spin_unlock_bh(&sc->tx.txbuflock);
278
279 return bf;
280}
281
282static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
283{
284 spin_lock_bh(&sc->tx.txbuflock);
285 list_add_tail(&bf->list, &sc->tx.txbuf);
286 spin_unlock_bh(&sc->tx.txbuflock);
287}
288
Sujithd43f30152009-01-16 21:38:53 +0530289static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
290{
291 struct ath_buf *tbf;
292
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200293 tbf = ath_tx_get_buffer(sc);
294 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530295 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530296
297 ATH_TXBUF_RESET(tbf);
298
299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530302 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530303
304 return tbf;
305}
306
Felix Fietkaub572d032010-11-14 15:20:07 +0100307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100311 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
Sujithd43f30152009-01-16 21:38:53 +0530339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100341 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530342{
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530345 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100346 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530349 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530351 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530353 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200356 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100357 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200358 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100359 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530360
Sujitha22be222009-03-30 15:28:36 +0530361 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530362 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530363
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800364 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800365
Felix Fietkau78c46532010-06-25 01:26:16 +0200366 memcpy(rates, tx_info->control.rates, sizeof(rates));
367
Sujith1286ec62009-01-27 13:30:37 +0530368 rcu_read_lock();
369
Ben Greear686b9cb2010-09-23 09:44:36 -0700370 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530371 if (!sta) {
372 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200373
Felix Fietkau31e79a52010-07-12 23:16:34 +0200374 INIT_LIST_HEAD(&bf_head);
375 while (bf) {
376 bf_next = bf->bf_next;
377
378 bf->bf_state.bf_type |= BUF_XRETRY;
379 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
380 !bf->bf_stale || bf_next != NULL)
381 list_move_tail(&bf->list, &bf_head);
382
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100383 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200384 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
385 0, 0);
386
387 bf = bf_next;
388 }
Sujith1286ec62009-01-27 13:30:37 +0530389 return;
Sujithe8324352009-01-16 21:38:42 +0530390 }
391
Sujith1286ec62009-01-27 13:30:37 +0530392 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100393 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
394 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530395
Felix Fietkaub11b1602010-07-11 12:48:44 +0200396 /*
397 * The hardware occasionally sends a tx status for the wrong TID.
398 * In this case, the BA status cannot be considered valid and all
399 * subframes need to be retransmitted
400 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100401 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200402 txok = false;
403
Sujithe8324352009-01-16 21:38:42 +0530404 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530405 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530406
Sujithd43f30152009-01-16 21:38:53 +0530407 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700408 if (ts->ts_flags & ATH9K_TX_BA) {
409 seq_st = ts->ts_seqnum;
410 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530411 } else {
Sujithd43f30152009-01-16 21:38:53 +0530412 /*
413 * AR5416 can become deaf/mute when BA
414 * issue happens. Chip needs to be reset.
415 * But AP code may have sychronization issues
416 * when perform internal reset in this routine.
417 * Only enable reset in STA mode for now.
418 */
Sujith2660b812009-02-09 13:27:26 +0530419 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530420 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530421 }
422 }
423
424 INIT_LIST_HEAD(&bf_pending);
425 INIT_LIST_HEAD(&bf_head);
426
Felix Fietkaub572d032010-11-14 15:20:07 +0100427 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530428 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100429 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530430 bf_next = bf->bf_next;
431
Felix Fietkau78c46532010-06-25 01:26:16 +0200432 skb = bf->bf_mpdu;
433 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100434 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200435
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100436 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530437 /* transmit completion, subframe is
438 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530439 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530440 } else if (!isaggr && txok) {
441 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100444 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100445 if (fi->retries < ATH_MAX_SW_RETRIES) {
446 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530447 txpending = 1;
448 } else {
449 bf->bf_state.bf_type |= BUF_XRETRY;
450 txfail = 1;
451 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530452 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530453 }
454 } else {
455 /*
456 * cleanup in progress, just fail
457 * the un-acked sub-frames
458 */
459 txfail = 1;
460 }
461 }
462
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400463 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
464 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530465 /*
466 * Make sure the last desc is reclaimed if it
467 * not a holding desc.
468 */
469 if (!bf_last->bf_stale)
470 list_move_tail(&bf->list, &bf_head);
471 else
472 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530473 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700474 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530475 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 }
477
Felix Fietkau90fa5392010-09-20 13:45:38 +0200478 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530479 /*
480 * complete the acked-ones/xretried ones; update
481 * block-ack window
482 */
483 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100484 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530485 spin_unlock_bh(&txq->axq_lock);
486
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530487 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200488 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 rc_update = false;
491 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 }
494
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700495 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
496 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530497 } else {
Sujithd43f30152009-01-16 21:38:53 +0530498 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
500 if (bf->bf_next == NULL && bf_last->bf_stale) {
501 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530502
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400503 tbf = ath_clone_txbuf(sc, bf_last);
504 /*
505 * Update tx baw and complete the
506 * frame with failed status if we
507 * run out of tx buf.
508 */
509 if (!tbf) {
510 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100511 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400513
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400514 bf->bf_state.bf_type |=
515 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100516 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100517 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400518 ath_tx_complete_buf(sc, bf, txq,
519 &bf_head,
520 ts, 0, 0);
521 break;
522 }
523
524 ath9k_hw_cleartxdesc(sc->sc_ah,
525 tbf->bf_desc);
526 list_add_tail(&tbf->list, &bf_head);
527 } else {
528 /*
529 * Clear descriptor status words for
530 * software retry
531 */
532 ath9k_hw_cleartxdesc(sc->sc_ah,
533 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400534 }
Sujithe8324352009-01-16 21:38:42 +0530535 }
536
537 /*
538 * Put this buffer to the temporary pending
539 * queue to retain ordering
540 */
541 list_splice_tail_init(&bf_head, &bf_pending);
542 }
543
544 bf = bf_next;
545 }
546
Felix Fietkau4cee7862010-07-23 03:53:16 +0200547 /* prepend un-acked frames to the beginning of the pending frame queue */
548 if (!list_empty(&bf_pending)) {
549 spin_lock_bh(&txq->axq_lock);
550 list_splice(&bf_pending, &tid->buf_q);
551 ath_tx_queue_tid(txq, tid);
552 spin_unlock_bh(&txq->axq_lock);
553 }
554
Sujithe8324352009-01-16 21:38:42 +0530555 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200556 ath_tx_flush_tid(sc, tid);
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->baw_head == tid->baw_tail) {
559 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530560 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530561 }
Sujithe8324352009-01-16 21:38:42 +0530562 }
563
Sujith1286ec62009-01-27 13:30:37 +0530564 rcu_read_unlock();
565
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530566 if (needreset) {
567 spin_unlock_bh(&sc->sc_pcu_lock);
Sujithe8324352009-01-16 21:38:42 +0530568 ath_reset(sc, false);
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530569 spin_lock_bh(&sc->sc_pcu_lock);
570 }
Sujithe8324352009-01-16 21:38:42 +0530571}
572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
575{
Sujithe8324352009-01-16 21:38:42 +0530576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
578 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530579 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530580 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530581 int i;
582
Sujitha22be222009-03-30 15:28:36 +0530583 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
593
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530598 legacy = 1;
599 break;
600 }
601
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100603 modeidx = MCS_HT40;
604 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530612 }
613 }
614
615 /*
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
619 */
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
622
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530629
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
634 */
Sujith4ef70842009-07-23 15:32:41 +0530635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530637
638 return aggr_limit;
639}
640
641/*
Sujithd43f30152009-01-16 21:38:53 +0530642 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530643 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530650 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530651 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100652 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658
659 /*
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
664 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530666 ndelim += ATH_AGGR_ENCRYPTDELIM;
667
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530673 *
Sujithe8324352009-01-16 21:38:42 +0530674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
Sujith4ef70842009-07-23 15:32:41 +0530677
678 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530679 return ndelim;
680
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
685
686 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530688 else
Sujith4ef70842009-07-23 15:32:41 +0530689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530690
691 if (nsymbols == 0)
692 nsymbols = 1;
693
Felix Fietkauc6663872010-04-19 19:57:33 +0200694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
697
Sujithe8324352009-01-16 21:38:42 +0530698 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
701 }
702
703 return ndelim;
704}
705
706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530707 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530708 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100709 struct list_head *bf_q,
710 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530711{
712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200718 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100719 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530720
721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
722
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530726
Sujithd43f30152009-01-16 21:38:53 +0530727 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
732
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
737
Sujithd43f30152009-01-16 21:38:53 +0530738 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530740
Sujithd43f30152009-01-16 21:38:53 +0530741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530743 status = ATH_AGGR_LIMITED;
744 break;
745 }
746
Felix Fietkau0299a502010-10-21 02:47:24 +0200747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
Sujithd43f30152009-01-16 21:38:53 +0530752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530754 status = ATH_AGGR_LIMITED;
755 break;
756 }
Sujithd43f30152009-01-16 21:38:53 +0530757 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530758
Sujithd43f30152009-01-16 21:38:53 +0530759 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530760 al += bpad + al_delta;
761
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530767 bpad = PADBYTES(al_delta) + (ndelim << 2);
768
769 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530771
Sujithd43f30152009-01-16 21:38:53 +0530772 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530777 if (bf_prev) {
778 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530781 }
782 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530783
Sujithe8324352009-01-16 21:38:42 +0530784 } while (!list_empty(&tid->buf_q));
785
Felix Fietkau269c44b2010-11-14 15:20:06 +0100786 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530787
Sujithe8324352009-01-16 21:38:42 +0530788 return status;
789#undef PADBYTES
790}
791
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530796 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530798 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100799 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530800
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
804
805 INIT_LIST_HEAD(&bf_q);
806
Felix Fietkau269c44b2010-11-14 15:20:06 +0100807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530808
809 /*
Sujithd43f30152009-01-16 21:38:53 +0530810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530812 */
813 if (list_empty(&bf_q))
814 break;
815
816 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530818
Sujithd43f30152009-01-16 21:38:53 +0530819 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100820 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100821 fi = get_frame_info(bf->bf_mpdu);
822
Sujithe8324352009-01-16 21:38:42 +0530823 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100825 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530831 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithd43f30152009-01-16 21:38:53 +0530835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530837
Sujithe8324352009-01-16 21:38:42 +0530838 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530839 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530840
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530842 status != ATH_AGGR_BAW_CLOSED);
843}
844
Felix Fietkau231c3a12010-09-20 19:35:28 +0200845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530852 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
Sujithf83da962009-07-23 15:32:37 +0530857 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200858 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700859 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200860
Felix Fietkau2ed72222011-01-10 17:05:49 -0700861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
863
Felix Fietkau231c3a12010-09-20 19:35:28 +0200864 return 0;
Sujithe8324352009-01-16 21:38:42 +0530865}
866
Sujithf83da962009-07-23 15:32:37 +0530867void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530868{
869 struct ath_node *an = (struct ath_node *)sta->drv_priv;
870 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100871 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530874 return;
Sujithe8324352009-01-16 21:38:42 +0530875
876 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530877 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530878 return;
Sujithe8324352009-01-16 21:38:42 +0530879 }
880
Sujithe8324352009-01-16 21:38:42 +0530881 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200882 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200883
884 /*
885 * If frames are still being transmitted for this TID, they will be
886 * cleaned up during tx completion. To prevent race conditions, this
887 * TID can only be reused after all in-progress subframes have been
888 * completed.
889 */
890 if (txtid->baw_head != txtid->baw_tail)
891 txtid->state |= AGGR_CLEANUP;
892 else
893 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530894 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530895
Felix Fietkau90fa5392010-09-20 13:45:38 +0200896 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530897}
898
899void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
900{
901 struct ath_atx_tid *txtid;
902 struct ath_node *an;
903
904 an = (struct ath_node *)sta->drv_priv;
905
906 if (sc->sc_flags & SC_OP_TXAGGR) {
907 txtid = ATH_AN_2_TID(an, tid);
908 txtid->baw_size =
909 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
910 txtid->state |= AGGR_ADDBA_COMPLETE;
911 txtid->state &= ~AGGR_ADDBA_PROGRESS;
912 ath_tx_resume_tid(sc, txtid);
913 }
914}
915
Sujithe8324352009-01-16 21:38:42 +0530916/********************/
917/* Queue Management */
918/********************/
919
Sujithe8324352009-01-16 21:38:42 +0530920static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
921 struct ath_txq *txq)
922{
923 struct ath_atx_ac *ac, *ac_tmp;
924 struct ath_atx_tid *tid, *tid_tmp;
925
926 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
927 list_del(&ac->list);
928 ac->sched = false;
929 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
930 list_del(&tid->list);
931 tid->sched = false;
932 ath_tid_drain(sc, txq, tid);
933 }
934 }
935}
936
937struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
938{
Sujithcbe61d82009-02-09 13:27:12 +0530939 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700940 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530941 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100942 static const int subtype_txq_to_hwq[] = {
943 [WME_AC_BE] = ATH_TXQ_AC_BE,
944 [WME_AC_BK] = ATH_TXQ_AC_BK,
945 [WME_AC_VI] = ATH_TXQ_AC_VI,
946 [WME_AC_VO] = ATH_TXQ_AC_VO,
947 };
Ben Greear60f2d1d2011-01-09 23:11:52 -0800948 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530949
950 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530952 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
953 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
954 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
955 qi.tqi_physCompBuf = 0;
956
957 /*
958 * Enable interrupts only for EOL and DESC conditions.
959 * We mark tx descriptors to receive a DESC interrupt
960 * when a tx queue gets deep; otherwise waiting for the
961 * EOL to reap descriptors. Note that this is done to
962 * reduce interrupt load and this only defers reaping
963 * descriptors, never transmitting frames. Aside from
964 * reducing interrupts this also permits more concurrency.
965 * The only potential downside is if the tx queue backs
966 * up in which case the top half of the kernel may backup
967 * due to a lack of tx descriptors.
968 *
969 * The UAPSD queue is an exception, since we take a desc-
970 * based intr on the EOSP frames.
971 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400972 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
973 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
974 TXQ_FLAG_TXERRINT_ENABLE;
975 } else {
976 if (qtype == ATH9K_TX_QUEUE_UAPSD)
977 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
978 else
979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
980 TXQ_FLAG_TXDESCINT_ENABLE;
981 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800982 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
983 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +0530984 /*
985 * NB: don't print a message, this happens
986 * normally on parts with too few tx queues
987 */
988 return NULL;
989 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800990 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800991 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -0800992 axq_qnum, ARRAY_SIZE(sc->tx.txq));
993 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +0530994 return NULL;
995 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800996 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
997 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +0530998
Ben Greear60f2d1d2011-01-09 23:11:52 -0800999 txq->axq_qnum = axq_qnum;
1000 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301001 txq->axq_link = NULL;
1002 INIT_LIST_HEAD(&txq->axq_q);
1003 INIT_LIST_HEAD(&txq->axq_acq);
1004 spin_lock_init(&txq->axq_lock);
1005 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001006 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001007 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001008 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001009
1010 txq->txq_headidx = txq->txq_tailidx = 0;
1011 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1012 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1013 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301014 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001015 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301016}
1017
Sujithe8324352009-01-16 21:38:42 +05301018int ath_txq_update(struct ath_softc *sc, int qnum,
1019 struct ath9k_tx_queue_info *qinfo)
1020{
Sujithcbe61d82009-02-09 13:27:12 +05301021 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301022 int error = 0;
1023 struct ath9k_tx_queue_info qi;
1024
1025 if (qnum == sc->beacon.beaconq) {
1026 /*
1027 * XXX: for beacon queue, we just save the parameter.
1028 * It will be picked up by ath_beaconq_config when
1029 * it's necessary.
1030 */
1031 sc->beacon.beacon_qi = *qinfo;
1032 return 0;
1033 }
1034
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001035 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301036
1037 ath9k_hw_get_txq_props(ah, qnum, &qi);
1038 qi.tqi_aifs = qinfo->tqi_aifs;
1039 qi.tqi_cwmin = qinfo->tqi_cwmin;
1040 qi.tqi_cwmax = qinfo->tqi_cwmax;
1041 qi.tqi_burstTime = qinfo->tqi_burstTime;
1042 qi.tqi_readyTime = qinfo->tqi_readyTime;
1043
1044 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001045 ath_err(ath9k_hw_common(sc->sc_ah),
1046 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301047 error = -EIO;
1048 } else {
1049 ath9k_hw_resettxqueue(ah, qnum);
1050 }
1051
1052 return error;
1053}
1054
1055int ath_cabq_update(struct ath_softc *sc)
1056{
1057 struct ath9k_tx_queue_info qi;
1058 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301059
1060 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1061 /*
1062 * Ensure the readytime % is within the bounds.
1063 */
Sujith17d79042009-02-09 13:27:03 +05301064 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1065 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1066 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1067 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301068
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001069 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301070 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301071 ath_txq_update(sc, qnum, &qi);
1072
1073 return 0;
1074}
1075
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001076static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1077{
1078 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1079 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1080}
1081
Sujith043a0402009-01-16 21:38:47 +05301082/*
1083 * Drain a given TX queue (could be Beacon or Data)
1084 *
1085 * This assumes output has been stopped and
1086 * we do not need to block ath_tx_tasklet.
1087 */
1088void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301089{
1090 struct ath_buf *bf, *lastbf;
1091 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001092 struct ath_tx_status ts;
1093
1094 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301095 INIT_LIST_HEAD(&bf_head);
1096
Sujithe8324352009-01-16 21:38:42 +05301097 for (;;) {
1098 spin_lock_bh(&txq->axq_lock);
1099
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001100 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1101 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1102 txq->txq_headidx = txq->txq_tailidx = 0;
1103 spin_unlock_bh(&txq->axq_lock);
1104 break;
1105 } else {
1106 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1107 struct ath_buf, list);
1108 }
1109 } else {
1110 if (list_empty(&txq->axq_q)) {
1111 txq->axq_link = NULL;
1112 spin_unlock_bh(&txq->axq_lock);
1113 break;
1114 }
1115 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1116 list);
Sujithe8324352009-01-16 21:38:42 +05301117
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001118 if (bf->bf_stale) {
1119 list_del(&bf->list);
1120 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301121
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001122 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001123 continue;
1124 }
Sujithe8324352009-01-16 21:38:42 +05301125 }
1126
1127 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301128
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001129 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1130 list_cut_position(&bf_head,
1131 &txq->txq_fifo[txq->txq_tailidx],
1132 &lastbf->list);
1133 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1134 } else {
1135 /* remove ath_buf's of the same mpdu from txq */
1136 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1137 }
1138
Sujithe8324352009-01-16 21:38:42 +05301139 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001140 if (bf_is_ampdu_not_probing(bf))
1141 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301142 spin_unlock_bh(&txq->axq_lock);
1143
1144 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001145 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1146 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301147 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001148 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301149 }
1150
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001151 spin_lock_bh(&txq->axq_lock);
1152 txq->axq_tx_inprogress = false;
1153 spin_unlock_bh(&txq->axq_lock);
1154
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001155 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1156 spin_lock_bh(&txq->axq_lock);
1157 while (!list_empty(&txq->txq_fifo_pending)) {
1158 bf = list_first_entry(&txq->txq_fifo_pending,
1159 struct ath_buf, list);
1160 list_cut_position(&bf_head,
1161 &txq->txq_fifo_pending,
1162 &bf->bf_lastbf->list);
1163 spin_unlock_bh(&txq->axq_lock);
1164
1165 if (bf_isampdu(bf))
1166 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001167 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001168 else
1169 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1170 &ts, 0, 0);
1171 spin_lock_bh(&txq->axq_lock);
1172 }
1173 spin_unlock_bh(&txq->axq_lock);
1174 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001175
1176 /* flush any pending frames if aggregation is enabled */
1177 if (sc->sc_flags & SC_OP_TXAGGR) {
1178 if (!retry_tx) {
1179 spin_lock_bh(&txq->axq_lock);
1180 ath_txq_drain_pending_buffers(sc, txq);
1181 spin_unlock_bh(&txq->axq_lock);
1182 }
1183 }
Sujithe8324352009-01-16 21:38:42 +05301184}
1185
Felix Fietkau080e1a22010-12-05 20:17:53 +01001186bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301187{
Sujithcbe61d82009-02-09 13:27:12 +05301188 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001189 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301190 struct ath_txq *txq;
1191 int i, npend = 0;
1192
1193 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001194 return true;
Sujith043a0402009-01-16 21:38:47 +05301195
1196 /* Stop beacon queue */
1197 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1198
1199 /* Stop data queues */
1200 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1201 if (ATH_TXQ_SETUP(sc, i)) {
1202 txq = &sc->tx.txq[i];
1203 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1204 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1205 }
1206 }
1207
Felix Fietkau080e1a22010-12-05 20:17:53 +01001208 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001209 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301210
1211 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001212 if (!ATH_TXQ_SETUP(sc, i))
1213 continue;
1214
1215 /*
1216 * The caller will resume queues with ieee80211_wake_queues.
1217 * Mark the queue as not stopped to prevent ath_tx_complete
1218 * from waking the queue too early.
1219 */
1220 txq = &sc->tx.txq[i];
1221 txq->stopped = false;
1222 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301223 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001224
1225 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301226}
1227
Sujithe8324352009-01-16 21:38:42 +05301228void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1229{
1230 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1231 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1232}
1233
Ben Greear7755bad2011-01-18 17:30:00 -08001234/* For each axq_acq entry, for each tid, try to schedule packets
1235 * for transmit until ampdu_depth has reached min Q depth.
1236 */
Sujithe8324352009-01-16 21:38:42 +05301237void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1238{
Ben Greear7755bad2011-01-18 17:30:00 -08001239 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1240 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301241
Felix Fietkau21f28e62011-01-15 14:30:14 +01001242 if (list_empty(&txq->axq_acq) ||
1243 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301244 return;
1245
1246 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001247 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301248
Ben Greear7755bad2011-01-18 17:30:00 -08001249 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1250 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1251 list_del(&ac->list);
1252 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301253
Ben Greear7755bad2011-01-18 17:30:00 -08001254 while (!list_empty(&ac->tid_q)) {
1255 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1256 list);
1257 list_del(&tid->list);
1258 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301259
Ben Greear7755bad2011-01-18 17:30:00 -08001260 if (tid->paused)
1261 continue;
Sujithe8324352009-01-16 21:38:42 +05301262
Ben Greear7755bad2011-01-18 17:30:00 -08001263 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301264
Ben Greear7755bad2011-01-18 17:30:00 -08001265 /*
1266 * add tid to round-robin queue if more frames
1267 * are pending for the tid
1268 */
1269 if (!list_empty(&tid->buf_q))
1270 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301271
Ben Greear7755bad2011-01-18 17:30:00 -08001272 if (tid == last_tid ||
1273 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1274 break;
Sujithe8324352009-01-16 21:38:42 +05301275 }
Ben Greear7755bad2011-01-18 17:30:00 -08001276
1277 if (!list_empty(&ac->tid_q)) {
1278 if (!ac->sched) {
1279 ac->sched = true;
1280 list_add_tail(&ac->list, &txq->axq_acq);
1281 }
1282 }
1283
1284 if (ac == last_ac ||
1285 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1286 return;
Sujithe8324352009-01-16 21:38:42 +05301287 }
1288}
1289
Sujithe8324352009-01-16 21:38:42 +05301290/***********/
1291/* TX, DMA */
1292/***********/
1293
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001294/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001295 * Insert a chain of ath_buf (descriptors) on a txq and
1296 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001297 */
Sujith102e0572008-10-29 10:15:16 +05301298static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1299 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001300{
Sujithcbe61d82009-02-09 13:27:12 +05301301 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001302 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001303 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301304
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001305 /*
1306 * Insert the frame on the outbound list and
1307 * pass it on to the hardware.
1308 */
1309
1310 if (list_empty(head))
1311 return;
1312
1313 bf = list_first_entry(head, struct ath_buf, list);
1314
Joe Perches226afe62010-12-02 19:12:37 -08001315 ath_dbg(common, ATH_DBG_QUEUE,
1316 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001317
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001318 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1319 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1320 list_splice_tail_init(head, &txq->txq_fifo_pending);
1321 return;
1322 }
1323 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001324 ath_dbg(common, ATH_DBG_XMIT,
1325 "Initializing tx fifo %d which is non-empty\n",
1326 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001327 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1328 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1329 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001330 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001331 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001332 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1333 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001334 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001335 list_splice_tail_init(head, &txq->axq_q);
1336
1337 if (txq->axq_link == NULL) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001338 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001339 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001340 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1341 txq->axq_qnum, ito64(bf->bf_daddr),
1342 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001343 } else {
1344 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001345 ath_dbg(common, ATH_DBG_XMIT,
1346 "link[%u] (%p)=%llx (%p)\n",
1347 txq->axq_qnum, txq->axq_link,
1348 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001349 }
1350 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1351 &txq->axq_link);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001352 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001353 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001354 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001355 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001356 if (bf_is_ampdu_not_probing(bf))
1357 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001358}
1359
Sujithe8324352009-01-16 21:38:42 +05301360static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001361 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301362{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001363 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001364 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301365
Sujithe8324352009-01-16 21:38:42 +05301366 bf->bf_state.bf_type |= BUF_AMPDU;
1367
1368 /*
1369 * Do not queue to h/w when any of the following conditions is true:
1370 * - there are pending frames in software queue
1371 * - the TID is currently paused for ADDBA/BAR request
1372 * - seqno is not within block-ack window
1373 * - h/w queue depth exceeds low water mark
1374 */
1375 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001376 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001377 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001378 /*
Sujithe8324352009-01-16 21:38:42 +05301379 * Add this frame to software queue for scheduling later
1380 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001381 */
Ben Greearbda8add2011-01-09 23:11:48 -08001382 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001383 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301384 ath_tx_queue_tid(txctl->txq, tid);
1385 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001386 }
1387
Felix Fietkau04caf862010-11-14 15:20:12 +01001388 INIT_LIST_HEAD(&bf_head);
1389 list_add(&bf->list, &bf_head);
1390
Sujithe8324352009-01-16 21:38:42 +05301391 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001392 if (!fi->retries)
1393 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301394
1395 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001396 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301397 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001398 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001399 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301400}
1401
Felix Fietkau82b873a2010-11-11 03:18:37 +01001402static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1403 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001404 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001405{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001406 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301407 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001408
Sujithe8324352009-01-16 21:38:42 +05301409 bf = list_first_entry(bf_head, struct ath_buf, list);
1410 bf->bf_state.bf_type &= ~BUF_AMPDU;
1411
1412 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001413 if (tid)
1414 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301415
Sujithd43f30152009-01-16 21:38:53 +05301416 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001417 fi = get_frame_info(bf->bf_mpdu);
1418 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301419 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301420 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001421}
1422
Sujith528f0c62008-10-29 10:14:26 +05301423static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001424{
Sujith528f0c62008-10-29 10:14:26 +05301425 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001426 enum ath9k_pkt_type htype;
1427 __le16 fc;
1428
Sujith528f0c62008-10-29 10:14:26 +05301429 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001430 fc = hdr->frame_control;
1431
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001432 if (ieee80211_is_beacon(fc))
1433 htype = ATH9K_PKT_TYPE_BEACON;
1434 else if (ieee80211_is_probe_resp(fc))
1435 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1436 else if (ieee80211_is_atim(fc))
1437 htype = ATH9K_PKT_TYPE_ATIM;
1438 else if (ieee80211_is_pspoll(fc))
1439 htype = ATH9K_PKT_TYPE_PSPOLL;
1440 else
1441 htype = ATH9K_PKT_TYPE_NORMAL;
1442
1443 return htype;
1444}
1445
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001446static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1447 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301448{
Felix Fietkau9ac58612011-01-24 19:23:18 +01001449 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301450 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001451 struct ieee80211_sta *sta = tx_info->control.sta;
1452 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301453 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001454 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301455 struct ath_node *an;
1456 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001457 enum ath9k_key_type keytype;
1458 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001459 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301460
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001461 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301462
Sujith528f0c62008-10-29 10:14:26 +05301463 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001464 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1465 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001466
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001467 an = (struct ath_node *) sta->drv_priv;
1468 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1469
1470 /*
1471 * Override seqno set by upper layer with the one
1472 * in tx aggregation state.
1473 */
1474 tid = ATH_AN_2_TID(an, tidno);
1475 seqno = tid->seq_next;
1476 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1477 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1478 }
1479
1480 memset(fi, 0, sizeof(*fi));
1481 if (hw_key)
1482 fi->keyix = hw_key->hw_key_idx;
1483 else
1484 fi->keyix = ATH9K_TXKEYIX_INVALID;
1485 fi->keytype = keytype;
1486 fi->framelen = framelen;
1487 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301488}
1489
Felix Fietkau82b873a2010-11-11 03:18:37 +01001490static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301491{
1492 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1493 int flags = 0;
1494
1495 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1496 flags |= ATH9K_TXDESC_INTREQ;
1497
1498 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1499 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301500
Felix Fietkau82b873a2010-11-11 03:18:37 +01001501 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001502 flags |= ATH9K_TXDESC_LDPC;
1503
Sujith528f0c62008-10-29 10:14:26 +05301504 return flags;
1505}
1506
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001507/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001508 * rix - rate index
1509 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1510 * width - 0 for 20 MHz, 1 for 40 MHz
1511 * half_gi - to use 4us v/s 3.6 us for symbol time
1512 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001513static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301514 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001515{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001516 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001517 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301518
1519 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001520 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001521 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001522 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001523 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1524
1525 if (!half_gi)
1526 duration = SYMBOL_TIME(nsymbols);
1527 else
1528 duration = SYMBOL_TIME_HALFGI(nsymbols);
1529
Sujithe63835b2008-11-18 09:07:53 +05301530 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001531 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301532
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001533 return duration;
1534}
1535
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301536u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1537{
1538 struct ath_hw *ah = sc->sc_ah;
1539 struct ath9k_channel *curchan = ah->curchan;
1540 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1541 (curchan->channelFlags & CHANNEL_5GHZ) &&
1542 (chainmask == 0x7) && (rate < 0x90))
1543 return 0x3;
1544 else
1545 return chainmask;
1546}
1547
Felix Fietkau269c44b2010-11-14 15:20:06 +01001548static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001549{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001550 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001551 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301552 struct sk_buff *skb;
1553 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301554 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001555 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301556 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301557 int i, flags = 0;
1558 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301559 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301560
1561 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301562
Sujitha22be222009-03-30 15:28:36 +05301563 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301564 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301565 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301566 hdr = (struct ieee80211_hdr *)skb->data;
1567 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301568
Sujithc89424d2009-01-30 14:29:28 +05301569 /*
1570 * We check if Short Preamble is needed for the CTS rate by
1571 * checking the BSS's global flag.
1572 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1573 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001574 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1575 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301576 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001577 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001578
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001579 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001580 bool is_40, is_sgi, is_sp;
1581 int phy;
1582
Sujithe63835b2008-11-18 09:07:53 +05301583 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001584 continue;
1585
Sujitha8efee42008-11-18 09:07:30 +05301586 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301587 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001588
Felix Fietkau27032052010-01-17 21:08:50 +01001589 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1590 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301591 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001592 flags |= ATH9K_TXDESC_RTSENA;
1593 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1594 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1595 flags |= ATH9K_TXDESC_CTSENA;
1596 }
1597
Sujithc89424d2009-01-30 14:29:28 +05301598 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1599 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1600 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1601 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001602
Felix Fietkau545750d2009-11-23 22:21:01 +01001603 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1604 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1605 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1606
1607 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1608 /* MCS rates */
1609 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301610 series[i].ChSel = ath_txchainmask_reduction(sc,
1611 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001612 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001613 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001614 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1615 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001616 continue;
1617 }
1618
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301619 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001620 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1621 !(rate->flags & IEEE80211_RATE_ERP_G))
1622 phy = WLAN_RC_PHY_CCK;
1623 else
1624 phy = WLAN_RC_PHY_OFDM;
1625
1626 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1627 series[i].Rate = rate->hw_value;
1628 if (rate->hw_value_short) {
1629 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1630 series[i].Rate |= rate->hw_value_short;
1631 } else {
1632 is_sp = false;
1633 }
1634
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301635 if (bf->bf_state.bfs_paprd)
1636 series[i].ChSel = common->tx_chainmask;
1637 else
1638 series[i].ChSel = ath_txchainmask_reduction(sc,
1639 common->tx_chainmask, series[i].Rate);
1640
Felix Fietkau545750d2009-11-23 22:21:01 +01001641 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001642 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001643 }
1644
Felix Fietkau27032052010-01-17 21:08:50 +01001645 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001646 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001647 flags &= ~ATH9K_TXDESC_RTSENA;
1648
1649 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1650 if (flags & ATH9K_TXDESC_RTSENA)
1651 flags &= ~ATH9K_TXDESC_CTSENA;
1652
Sujithe63835b2008-11-18 09:07:53 +05301653 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301654 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1655 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301656 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301657 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301658
Sujith17d79042009-02-09 13:27:03 +05301659 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301660 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001661}
1662
Felix Fietkau82b873a2010-11-11 03:18:37 +01001663static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001664 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001665 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301666{
Felix Fietkau9ac58612011-01-24 19:23:18 +01001667 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001668 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001669 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001670 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001671 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001672 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001673 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001674
1675 bf = ath_tx_get_buffer(sc);
1676 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001677 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001678 return NULL;
1679 }
Sujithe8324352009-01-16 21:38:42 +05301680
Sujithe8324352009-01-16 21:38:42 +05301681 ATH_TXBUF_RESET(bf);
1682
Felix Fietkau82b873a2010-11-11 03:18:37 +01001683 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301684 bf->bf_mpdu = skb;
1685
Ben Greearc1739eb2010-10-14 12:45:29 -07001686 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1687 skb->len, DMA_TO_DEVICE);
1688 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301689 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001690 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001691 ath_err(ath9k_hw_common(sc->sc_ah),
1692 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001693 ath_tx_return_buffer(sc, bf);
1694 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301695 }
1696
Sujithe8324352009-01-16 21:38:42 +05301697 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301698
1699 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001700 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301701
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001702 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1703 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301704
1705 ath9k_hw_filltxdesc(ah, ds,
1706 skb->len, /* segment length */
1707 true, /* first segment */
1708 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001709 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001710 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001711 txq->axq_qnum);
1712
1713
1714 return bf;
1715}
1716
1717/* FIXME: tx power */
1718static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1719 struct ath_tx_control *txctl)
1720{
1721 struct sk_buff *skb = bf->bf_mpdu;
1722 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1723 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001724 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001725 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001726 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301727
Sujithe8324352009-01-16 21:38:42 +05301728 spin_lock_bh(&txctl->txq->axq_lock);
1729
Felix Fietkau248a38d2010-12-10 21:16:46 +01001730 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001731 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1732 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001733 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001734
Felix Fietkau066dae92010-11-07 14:59:39 +01001735 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001736 }
1737
1738 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001739 /*
1740 * Try aggregation if it's a unicast data frame
1741 * and the destination is HT capable.
1742 */
1743 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301744 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001745 INIT_LIST_HEAD(&bf_head);
1746 list_add_tail(&bf->list, &bf_head);
1747
Felix Fietkau61117f02010-11-11 03:18:36 +01001748 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001749 bf->bf_state.bfs_paprd = txctl->paprd;
1750
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001751 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001752 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1753 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001754
Felix Fietkau248a38d2010-12-10 21:16:46 +01001755 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301756 }
1757
1758 spin_unlock_bh(&txctl->txq->axq_lock);
1759}
1760
1761/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001762int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301763 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001764{
Felix Fietkau28d16702010-11-14 15:20:10 +01001765 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1766 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001767 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkau9ac58612011-01-24 19:23:18 +01001768 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001769 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001770 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001771 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001772 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001773 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001774
Ben Greeara9927ba2010-12-06 21:13:49 -08001775 /* NOTE: sta can be NULL according to net/mac80211.h */
1776 if (sta)
1777 txctl->an = (struct ath_node *)sta->drv_priv;
1778
Felix Fietkau04caf862010-11-14 15:20:12 +01001779 if (info->control.hw_key)
1780 frmlen += info->control.hw_key->icv_len;
1781
Felix Fietkau28d16702010-11-14 15:20:10 +01001782 /*
1783 * As a temporary workaround, assign seq# here; this will likely need
1784 * to be cleaned up to work better with Beacon transmission and virtual
1785 * BSSes.
1786 */
1787 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1788 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1789 sc->tx.seq_no += 0x10;
1790 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1791 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1792 }
1793
1794 /* Add the padding after the header if this is not already done */
1795 padpos = ath9k_cmn_padpos(hdr->frame_control);
1796 padsize = padpos & 3;
1797 if (padsize && skb->len > padpos) {
1798 if (skb_headroom(skb) < padsize)
1799 return -ENOMEM;
1800
1801 skb_push(skb, padsize);
1802 memmove(skb->data, skb->data + padsize, padpos);
1803 }
1804
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001805 setup_frame_info(hw, skb, frmlen);
1806
1807 /*
1808 * At this point, the vif, hw_key and sta pointers in the tx control
1809 * info are no longer valid (overwritten by the ath_frame_info data.
1810 */
1811
1812 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001813 if (unlikely(!bf))
1814 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001815
Felix Fietkau066dae92010-11-07 14:59:39 +01001816 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001817 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001818 if (txq == sc->tx.txq_map[q] &&
1819 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001820 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001821 txq->stopped = 1;
1822 }
1823 spin_unlock_bh(&txq->axq_lock);
1824
Sujithe8324352009-01-16 21:38:42 +05301825 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001826
1827 return 0;
1828}
1829
Sujithe8324352009-01-16 21:38:42 +05301830/*****************/
1831/* TX Completion */
1832/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001833
Sujithe8324352009-01-16 21:38:42 +05301834static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001835 int tx_flags, int ftype, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001836{
Sujithe8324352009-01-16 21:38:42 +05301837 struct ieee80211_hw *hw = sc->hw;
1838 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001839 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001840 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001841 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301842
Joe Perches226afe62010-12-02 19:12:37 -08001843 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301844
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301845 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301846 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301847
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301848 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301849 /* Frame was ACKed */
1850 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1851 }
1852
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001853 padpos = ath9k_cmn_padpos(hdr->frame_control);
1854 padsize = padpos & 3;
1855 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301856 /*
1857 * Remove MAC header padding before giving the frame back to
1858 * mac80211.
1859 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001860 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301861 skb_pull(skb, padsize);
1862 }
1863
Sujith1b04b932010-01-08 10:36:05 +05301864 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1865 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001866 ath_dbg(common, ATH_DBG_PS,
1867 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301868 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1869 PS_WAIT_FOR_CAB |
1870 PS_WAIT_FOR_PSPOLL_DATA |
1871 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001872 }
1873
Felix Fietkau7545daf2011-01-24 19:23:16 +01001874 q = skb_get_queue_mapping(skb);
1875 if (txq == sc->tx.txq_map[q]) {
1876 spin_lock_bh(&txq->axq_lock);
1877 if (WARN_ON(--txq->pending_frames < 0))
1878 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001879
Felix Fietkau7545daf2011-01-24 19:23:16 +01001880 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1881 ieee80211_wake_queue(sc->hw, q);
1882 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001883 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001884 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001885 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001886
1887 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301888}
1889
1890static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001891 struct ath_txq *txq, struct list_head *bf_q,
1892 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301893{
1894 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301895 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301896 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301897
Sujithe8324352009-01-16 21:38:42 +05301898 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301899 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301900
1901 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301902 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301903
1904 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301905 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301906 }
1907
Ben Greearc1739eb2010-10-14 12:45:29 -07001908 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001909 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001910
1911 if (bf->bf_state.bfs_paprd) {
Felix Fietkau82259b72010-11-14 15:20:04 +01001912 if (!sc->paprd_pending)
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001913 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001914 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001915 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001916 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01001917 ath_debug_stat_tx(sc, bf, ts, txq);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001918 ath_tx_complete(sc, skb, tx_flags,
Felix Fietkau61117f02010-11-11 03:18:36 +01001919 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001920 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001921 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1922 * accidentally reference it later.
1923 */
1924 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301925
1926 /*
1927 * Return the list of ath_buf of this mpdu to free queue
1928 */
1929 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1930 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1931 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1932}
1933
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001934static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1935 struct ath_tx_status *ts, int nframes, int nbad,
1936 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301937{
Sujitha22be222009-03-30 15:28:36 +05301938 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301939 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301940 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001941 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001942 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301943 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301944
Sujith95e4acb2009-03-13 08:56:09 +05301945 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001946 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301947
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001948 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301949 WARN_ON(tx_rateindex >= hw->max_rates);
1950
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001951 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301952 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001953 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001954 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301955
Felix Fietkaub572d032010-11-14 15:20:07 +01001956 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001957
Felix Fietkaub572d032010-11-14 15:20:07 +01001958 tx_info->status.ampdu_len = nframes;
1959 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001960 }
1961
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001962 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301963 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001964 /*
1965 * If an underrun error is seen assume it as an excessive
1966 * retry only if max frame trigger level has been reached
1967 * (2 KB for single stream, and 4 KB for dual stream).
1968 * Adjust the long retry as if the frame was tried
1969 * hw->max_rate_tries times to affect how rate control updates
1970 * PER for the failed rate.
1971 * In case of congestion on the bus penalizing this type of
1972 * underruns should help hardware actually transmit new frames
1973 * successfully by eventually preferring slower rates.
1974 * This itself should also alleviate congestion on the bus.
1975 */
1976 if (ieee80211_is_data(hdr->frame_control) &&
1977 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1978 ATH9K_TX_DELIM_UNDERRUN)) &&
1979 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1980 tx_info->status.rates[tx_rateindex].count =
1981 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301982 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301983
Felix Fietkau545750d2009-11-23 22:21:01 +01001984 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301985 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001986 tx_info->status.rates[i].idx = -1;
1987 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301988
Felix Fietkau78c46532010-06-25 01:26:16 +02001989 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301990}
1991
Sujithc4288392008-11-18 09:09:30 +05301992static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001993{
Sujithcbe61d82009-02-09 13:27:12 +05301994 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001995 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001996 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1997 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05301998 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07001999 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302000 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002001 int status;
2002
Joe Perches226afe62010-12-02 19:12:37 -08002003 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2004 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2005 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002006
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002007 for (;;) {
2008 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002009 if (list_empty(&txq->axq_q)) {
2010 txq->axq_link = NULL;
Ben Greear082f6532011-01-09 23:11:47 -08002011 if (sc->sc_flags & SC_OP_TXAGGR)
2012 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002013 spin_unlock_bh(&txq->axq_lock);
2014 break;
2015 }
2016 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2017
2018 /*
2019 * There is a race condition that a BH gets scheduled
2020 * after sw writes TxE and before hw re-load the last
2021 * descriptor to get the newly chained one.
2022 * Software must keep the last DONE descriptor as a
2023 * holding descriptor - software does so by marking
2024 * it with the STALE flag.
2025 */
2026 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302027 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002028 bf_held = bf;
2029 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302030 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002031 break;
2032 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002033 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302034 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002035 }
2036 }
2037
2038 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302039 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002040
Felix Fietkau29bffa92010-03-29 20:14:23 -07002041 memset(&ts, 0, sizeof(ts));
2042 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002043 if (status == -EINPROGRESS) {
2044 spin_unlock_bh(&txq->axq_lock);
2045 break;
2046 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002047 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002048
2049 /*
2050 * Remove ath_buf's of the same transmit unit from txq,
2051 * however leave the last descriptor back as the holding
2052 * descriptor for hw.
2053 */
Sujitha119cc42009-03-30 15:28:38 +05302054 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002055 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002056 if (!list_is_singular(&lastbf->list))
2057 list_cut_position(&bf_head,
2058 &txq->axq_q, lastbf->list.prev);
2059
2060 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002061 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002062 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002063 if (bf_held)
2064 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002065
2066 if (bf_is_ampdu_not_probing(bf))
2067 txq->axq_ampdu_depth--;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002068 spin_unlock_bh(&txq->axq_lock);
2069
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002070 if (bf_held)
2071 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002072
Sujithcd3d39a2008-08-11 14:03:34 +05302073 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002074 /*
2075 * This frame is sent out as a single frame.
2076 * Use hardware retry status for this frame.
2077 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002078 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302079 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002080 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002081 }
Johannes Berge6a98542008-10-21 12:40:02 +02002082
Sujithcd3d39a2008-08-11 14:03:34 +05302083 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002084 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2085 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002087 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002088
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002090
Sujith672840a2008-08-11 14:05:08 +05302091 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092 ath_txq_schedule(sc, txq);
2093 spin_unlock_bh(&txq->axq_lock);
2094 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002095}
2096
Vivek Natarajan181fb182011-01-27 14:45:08 +05302097static void ath_hw_pll_work(struct work_struct *work)
2098{
2099 struct ath_softc *sc = container_of(work, struct ath_softc,
2100 hw_pll_work.work);
2101 static int count;
2102
2103 if (AR_SREV_9485(sc->sc_ah)) {
2104 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2105 count++;
2106
2107 if (count == 3) {
2108 /* Rx is hung for more than 500ms. Reset it */
2109 ath_reset(sc, true);
2110 count = 0;
2111 }
2112 } else
2113 count = 0;
2114
2115 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2116 }
2117}
2118
Sujith305fe472009-07-23 15:32:29 +05302119static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002120{
2121 struct ath_softc *sc = container_of(work, struct ath_softc,
2122 tx_complete_work.work);
2123 struct ath_txq *txq;
2124 int i;
2125 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002126#ifdef CONFIG_ATH9K_DEBUGFS
2127 sc->tx_complete_poll_work_seen++;
2128#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002129
2130 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2131 if (ATH_TXQ_SETUP(sc, i)) {
2132 txq = &sc->tx.txq[i];
2133 spin_lock_bh(&txq->axq_lock);
2134 if (txq->axq_depth) {
2135 if (txq->axq_tx_inprogress) {
2136 needreset = true;
2137 spin_unlock_bh(&txq->axq_lock);
2138 break;
2139 } else {
2140 txq->axq_tx_inprogress = true;
2141 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08002142 } else {
2143 /* If the queue has pending buffers, then it
2144 * should be doing tx work (and have axq_depth).
2145 * Shouldn't get to this state I think..but
2146 * we do.
2147 */
2148 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2149 (txq->pending_frames > 0 ||
2150 !list_empty(&txq->axq_acq) ||
2151 txq->stopped)) {
2152 ath_err(ath9k_hw_common(sc->sc_ah),
2153 "txq: %p axq_qnum: %u,"
2154 " mac80211_qnum: %i"
2155 " axq_link: %p"
2156 " pending frames: %i"
2157 " axq_acq empty: %i"
2158 " stopped: %i"
2159 " axq_depth: 0 Attempting to"
2160 " restart tx logic.\n",
2161 txq, txq->axq_qnum,
2162 txq->mac80211_qnum,
2163 txq->axq_link,
2164 txq->pending_frames,
2165 list_empty(&txq->axq_acq),
2166 txq->stopped);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002167 ath_txq_schedule(sc, txq);
2168 }
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002169 }
2170 spin_unlock_bh(&txq->axq_lock);
2171 }
2172
2173 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002174 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2175 "tx hung, resetting the chip\n");
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002176 ath_reset(sc, true);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002177 }
2178
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002179 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002180 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2181}
2182
2183
Sujithe8324352009-01-16 21:38:42 +05302184
2185void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002186{
Sujithe8324352009-01-16 21:38:42 +05302187 int i;
2188 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002189
Sujithe8324352009-01-16 21:38:42 +05302190 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002191
2192 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302193 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2194 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002195 }
2196}
2197
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002198void ath_tx_edma_tasklet(struct ath_softc *sc)
2199{
2200 struct ath_tx_status txs;
2201 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2202 struct ath_hw *ah = sc->sc_ah;
2203 struct ath_txq *txq;
2204 struct ath_buf *bf, *lastbf;
2205 struct list_head bf_head;
2206 int status;
2207 int txok;
2208
2209 for (;;) {
2210 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2211 if (status == -EINPROGRESS)
2212 break;
2213 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002214 ath_dbg(common, ATH_DBG_XMIT,
2215 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002216 break;
2217 }
2218
2219 /* Skip beacon completions */
2220 if (txs.qid == sc->beacon.beaconq)
2221 continue;
2222
2223 txq = &sc->tx.txq[txs.qid];
2224
2225 spin_lock_bh(&txq->axq_lock);
2226 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2227 spin_unlock_bh(&txq->axq_lock);
2228 return;
2229 }
2230
2231 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2232 struct ath_buf, list);
2233 lastbf = bf->bf_lastbf;
2234
2235 INIT_LIST_HEAD(&bf_head);
2236 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2237 &lastbf->list);
2238 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2239 txq->axq_depth--;
2240 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002241 if (bf_is_ampdu_not_probing(bf))
2242 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002243 spin_unlock_bh(&txq->axq_lock);
2244
2245 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2246
2247 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002248 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2249 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002250 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251 }
2252
2253 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002254 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2255 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002256 else
2257 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2258 &txs, txok, 0);
2259
2260 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002261
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002262 if (!list_empty(&txq->txq_fifo_pending)) {
2263 INIT_LIST_HEAD(&bf_head);
2264 bf = list_first_entry(&txq->txq_fifo_pending,
2265 struct ath_buf, list);
2266 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2267 &bf->bf_lastbf->list);
2268 ath_tx_txqaddbuf(sc, txq, &bf_head);
2269 } else if (sc->sc_flags & SC_OP_TXAGGR)
2270 ath_txq_schedule(sc, txq);
2271 spin_unlock_bh(&txq->axq_lock);
2272 }
2273}
2274
Sujithe8324352009-01-16 21:38:42 +05302275/*****************/
2276/* Init, Cleanup */
2277/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002278
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002279static int ath_txstatus_setup(struct ath_softc *sc, int size)
2280{
2281 struct ath_descdma *dd = &sc->txsdma;
2282 u8 txs_len = sc->sc_ah->caps.txs_len;
2283
2284 dd->dd_desc_len = size * txs_len;
2285 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2286 &dd->dd_desc_paddr, GFP_KERNEL);
2287 if (!dd->dd_desc)
2288 return -ENOMEM;
2289
2290 return 0;
2291}
2292
2293static int ath_tx_edma_init(struct ath_softc *sc)
2294{
2295 int err;
2296
2297 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2298 if (!err)
2299 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2300 sc->txsdma.dd_desc_paddr,
2301 ATH_TXSTATUS_RING_SIZE);
2302
2303 return err;
2304}
2305
2306static void ath_tx_edma_cleanup(struct ath_softc *sc)
2307{
2308 struct ath_descdma *dd = &sc->txsdma;
2309
2310 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2311 dd->dd_desc_paddr);
2312}
2313
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002314int ath_tx_init(struct ath_softc *sc, int nbufs)
2315{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002316 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002317 int error = 0;
2318
Sujith797fe5c2009-03-30 15:28:45 +05302319 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002320
Sujith797fe5c2009-03-30 15:28:45 +05302321 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002322 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302323 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002324 ath_err(common,
2325 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302326 goto err;
2327 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002328
Sujith797fe5c2009-03-30 15:28:45 +05302329 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002330 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302331 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002332 ath_err(common,
2333 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302334 goto err;
2335 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002336
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002337 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
Vivek Natarajan181fb182011-01-27 14:45:08 +05302338 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002339
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002340 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2341 error = ath_tx_edma_init(sc);
2342 if (error)
2343 goto err;
2344 }
2345
Sujith797fe5c2009-03-30 15:28:45 +05302346err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002347 if (error != 0)
2348 ath_tx_cleanup(sc);
2349
2350 return error;
2351}
2352
Sujith797fe5c2009-03-30 15:28:45 +05302353void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002354{
Sujithb77f4832008-12-07 21:44:03 +05302355 if (sc->beacon.bdma.dd_desc_len != 0)
2356 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002357
Sujithb77f4832008-12-07 21:44:03 +05302358 if (sc->tx.txdma.dd_desc_len != 0)
2359 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002360
2361 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2362 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002363}
2364
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002365void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2366{
Sujithc5170162008-10-29 10:13:59 +05302367 struct ath_atx_tid *tid;
2368 struct ath_atx_ac *ac;
2369 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002370
Sujith8ee5afb2008-12-07 21:43:36 +05302371 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302372 tidno < WME_NUM_TID;
2373 tidno++, tid++) {
2374 tid->an = an;
2375 tid->tidno = tidno;
2376 tid->seq_start = tid->seq_next = 0;
2377 tid->baw_size = WME_MAX_BA;
2378 tid->baw_head = tid->baw_tail = 0;
2379 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302380 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302381 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302382 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302383 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302384 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302385 tid->state &= ~AGGR_ADDBA_COMPLETE;
2386 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302387 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002388
Sujith8ee5afb2008-12-07 21:43:36 +05302389 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302390 acno < WME_NUM_AC; acno++, ac++) {
2391 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002392 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302393 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002394 }
2395}
2396
Sujithb5aa9bf2008-10-29 10:13:31 +05302397void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002398{
Felix Fietkau2b409942010-07-07 19:42:08 +02002399 struct ath_atx_ac *ac;
2400 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002402 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302403
Felix Fietkau2b409942010-07-07 19:42:08 +02002404 for (tidno = 0, tid = &an->tid[tidno];
2405 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002406
Felix Fietkau2b409942010-07-07 19:42:08 +02002407 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002408 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002409
Felix Fietkau2b409942010-07-07 19:42:08 +02002410 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002411
Felix Fietkau2b409942010-07-07 19:42:08 +02002412 if (tid->sched) {
2413 list_del(&tid->list);
2414 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002415 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002416
2417 if (ac->sched) {
2418 list_del(&ac->list);
2419 tid->ac->sched = false;
2420 }
2421
2422 ath_tid_drain(sc, txq, tid);
2423 tid->state &= ~AGGR_ADDBA_COMPLETE;
2424 tid->state &= ~AGGR_CLEANUP;
2425
2426 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002427 }
2428}