blob: ab4f7b4f789f2d70b9496d90c4a7441daef6d632 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Felix Fietkau82b873a2010-11-11 03:18:37 +010051static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010053 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010059static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkaudb1a0522010-03-29 20:07:11 -070060static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +010061 int nframes, int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
Felix Fietkau066dae92010-11-07 14:59:39 +0100125 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100147}
148
Sujithe8324352009-01-16 21:38:42 +0530149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{
Felix Fietkau066dae92010-11-07 14:59:39 +0100151 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530152 struct ath_buf *bf;
153 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200154 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100155 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200156
Sujithe8324352009-01-16 21:38:42 +0530157 INIT_LIST_HEAD(&bf_head);
158
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530160 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530161
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530164 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165
Felix Fietkaue1566d12010-11-20 03:08:46 +0100166 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
171 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700172 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200173 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100174 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200188 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530189
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100197 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530198{
199 int index, cindex;
200
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100201 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200203 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530204
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
209 }
210}
211
212/*
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
217 */
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
220
221{
222 struct ath_buf *bf;
223 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700224 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100225 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700226
227 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530228 INIT_LIST_HEAD(&bf_head);
229
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
Sujithe8324352009-01-16 21:38:42 +0530233
Sujithd43f30152009-01-16 21:38:53 +0530234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530236
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530240
241 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530243 spin_lock(&txq->axq_lock);
244 }
245
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
248}
249
Sujithfec247c2009-07-27 12:08:16 +0530250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100251 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530252{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100253 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530254 struct ieee80211_hdr *hdr;
255
Sujithfec247c2009-07-27 12:08:16 +0530256 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100257 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100258 return;
Sujithe8324352009-01-16 21:38:42 +0530259
Sujithe8324352009-01-16 21:38:42 +0530260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
262}
263
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265{
266 struct ath_buf *bf = NULL;
267
268 spin_lock_bh(&sc->tx.txbuflock);
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
278 spin_unlock_bh(&sc->tx.txbuflock);
279
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
Sujithd43f30152009-01-16 21:38:53 +0530290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530296 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 ATH_TXBUF_RESET(tbf);
299
Felix Fietkau827e69b2009-11-15 23:09:25 +0100300 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530301 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530304 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530305
306 return tbf;
307}
308
Felix Fietkaub572d032010-11-14 15:20:07 +0100309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100313 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
Sujithd43f30152009-01-16 21:38:53 +0530341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100343 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530344{
345 struct ath_node *an = NULL;
346 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530349 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800350 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530351 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530352 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530353 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530355 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530356 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
357 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200358 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100359 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200360 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100361 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530362
Sujitha22be222009-03-30 15:28:36 +0530363 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530364 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530365
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100367 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800368
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 memcpy(rates, tx_info->control.rates, sizeof(rates));
370
Sujith1286ec62009-01-27 13:30:37 +0530371 rcu_read_lock();
372
Ben Greear686b9cb2010-09-23 09:44:36 -0700373 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530374 if (!sta) {
375 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200376
Felix Fietkau31e79a52010-07-12 23:16:34 +0200377 INIT_LIST_HEAD(&bf_head);
378 while (bf) {
379 bf_next = bf->bf_next;
380
381 bf->bf_state.bf_type |= BUF_XRETRY;
382 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
383 !bf->bf_stale || bf_next != NULL)
384 list_move_tail(&bf->list, &bf_head);
385
Felix Fietkaub572d032010-11-14 15:20:07 +0100386 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0);
389
390 bf = bf_next;
391 }
Sujith1286ec62009-01-27 13:30:37 +0530392 return;
Sujithe8324352009-01-16 21:38:42 +0530393 }
394
Sujith1286ec62009-01-27 13:30:37 +0530395 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530398
Felix Fietkaub11b1602010-07-11 12:48:44 +0200399 /*
400 * The hardware occasionally sends a tx status for the wrong TID.
401 * In this case, the BA status cannot be considered valid and all
402 * subframes need to be retransmitted
403 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100404 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200405 txok = false;
406
Sujithe8324352009-01-16 21:38:42 +0530407 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530408 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530409
Sujithd43f30152009-01-16 21:38:53 +0530410 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700411 if (ts->ts_flags & ATH9K_TX_BA) {
412 seq_st = ts->ts_seqnum;
413 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530414 } else {
Sujithd43f30152009-01-16 21:38:53 +0530415 /*
416 * AR5416 can become deaf/mute when BA
417 * issue happens. Chip needs to be reset.
418 * But AP code may have sychronization issues
419 * when perform internal reset in this routine.
420 * Only enable reset in STA mode for now.
421 */
Sujith2660b812009-02-09 13:27:26 +0530422 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530423 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530424 }
425 }
426
427 INIT_LIST_HEAD(&bf_pending);
428 INIT_LIST_HEAD(&bf_head);
429
Felix Fietkaub572d032010-11-14 15:20:07 +0100430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530431 while (bf) {
432 txfail = txpending = 0;
433 bf_next = bf->bf_next;
434
Felix Fietkau78c46532010-06-25 01:26:16 +0200435 skb = bf->bf_mpdu;
436 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200438
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100439 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530440 /* transmit completion, subframe is
441 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else if (!isaggr && txok) {
444 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530445 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530446 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100447 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100448 if (fi->retries < ATH_MAX_SW_RETRIES) {
449 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530450 txpending = 1;
451 } else {
452 bf->bf_state.bf_type |= BUF_XRETRY;
453 txfail = 1;
454 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530455 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530456 }
457 } else {
458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
463 }
464 }
465
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700477 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530478 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530479 }
480
Felix Fietkau90fa5392010-09-20 13:45:38 +0200481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100487 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530488 spin_unlock_bh(&txq->axq_lock);
489
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200491 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaub572d032010-11-14 15:20:07 +0100492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 rc_update = false;
494 } else {
Felix Fietkaub572d032010-11-14 15:20:07 +0100495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530496 }
497
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530500 } else {
Sujithd43f30152009-01-16 21:38:53 +0530501 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400502 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
503 if (bf->bf_next == NULL && bf_last->bf_stale) {
504 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530505
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400506 tbf = ath_clone_txbuf(sc, bf_last);
507 /*
508 * Update tx baw and complete the
509 * frame with failed status if we
510 * run out of tx buf.
511 */
512 if (!tbf) {
513 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100514 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400515 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400516
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400517 bf->bf_state.bf_type |=
518 BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +0100519 ath_tx_rc_status(bf, ts, nframes,
520 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400521 ath_tx_complete_buf(sc, bf, txq,
522 &bf_head,
523 ts, 0, 0);
524 break;
525 }
526
527 ath9k_hw_cleartxdesc(sc->sc_ah,
528 tbf->bf_desc);
529 list_add_tail(&tbf->list, &bf_head);
530 } else {
531 /*
532 * Clear descriptor status words for
533 * software retry
534 */
535 ath9k_hw_cleartxdesc(sc->sc_ah,
536 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400537 }
Sujithe8324352009-01-16 21:38:42 +0530538 }
539
540 /*
541 * Put this buffer to the temporary pending
542 * queue to retain ordering
543 */
544 list_splice_tail_init(&bf_head, &bf_pending);
545 }
546
547 bf = bf_next;
548 }
549
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 /* prepend un-acked frames to the beginning of the pending frame queue */
551 if (!list_empty(&bf_pending)) {
552 spin_lock_bh(&txq->axq_lock);
553 list_splice(&bf_pending, &tid->buf_q);
554 ath_tx_queue_tid(txq, tid);
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200559 ath_tx_flush_tid(sc, tid);
560
Sujithe8324352009-01-16 21:38:42 +0530561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530563 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530564 }
Sujithe8324352009-01-16 21:38:42 +0530565 }
566
Sujith1286ec62009-01-27 13:30:37 +0530567 rcu_read_unlock();
568
Sujithe8324352009-01-16 21:38:42 +0530569 if (needreset)
570 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530571}
572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
575{
Sujithe8324352009-01-16 21:38:42 +0530576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
578 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530579 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530580 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530581 int i;
582
Sujitha22be222009-03-30 15:28:36 +0530583 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
593
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530598 legacy = 1;
599 break;
600 }
601
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100603 modeidx = MCS_HT40;
604 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530612 }
613 }
614
615 /*
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
619 */
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
622
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530629
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
634 */
Sujith4ef70842009-07-23 15:32:41 +0530635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530637
638 return aggr_limit;
639}
640
641/*
Sujithd43f30152009-01-16 21:38:53 +0530642 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530643 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530650 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530651 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100652 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658
659 /*
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
664 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530666 ndelim += ATH_AGGR_ENCRYPTDELIM;
667
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530673 *
Sujithe8324352009-01-16 21:38:42 +0530674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
Sujith4ef70842009-07-23 15:32:41 +0530677
678 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530679 return ndelim;
680
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
685
686 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530688 else
Sujith4ef70842009-07-23 15:32:41 +0530689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530690
691 if (nsymbols == 0)
692 nsymbols = 1;
693
Felix Fietkauc6663872010-04-19 19:57:33 +0200694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
697
Sujithe8324352009-01-16 21:38:42 +0530698 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
701 }
702
703 return ndelim;
704}
705
706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530707 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530708 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100709 struct list_head *bf_q,
710 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530711{
712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200718 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100719 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530720
721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
722
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530726
Sujithd43f30152009-01-16 21:38:53 +0530727 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
732
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
737
Sujithd43f30152009-01-16 21:38:53 +0530738 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530740
Sujithd43f30152009-01-16 21:38:53 +0530741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530743 status = ATH_AGGR_LIMITED;
744 break;
745 }
746
Felix Fietkau0299a502010-10-21 02:47:24 +0200747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
Sujithd43f30152009-01-16 21:38:53 +0530752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530754 status = ATH_AGGR_LIMITED;
755 break;
756 }
Sujithd43f30152009-01-16 21:38:53 +0530757 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530758
Sujithd43f30152009-01-16 21:38:53 +0530759 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530760 al += bpad + al_delta;
761
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530767 bpad = PADBYTES(al_delta) + (ndelim << 2);
768
769 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530771
Sujithd43f30152009-01-16 21:38:53 +0530772 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530777 if (bf_prev) {
778 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530781 }
782 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530783
Sujithe8324352009-01-16 21:38:42 +0530784 } while (!list_empty(&tid->buf_q));
785
Felix Fietkau269c44b2010-11-14 15:20:06 +0100786 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530787
Sujithe8324352009-01-16 21:38:42 +0530788 return status;
789#undef PADBYTES
790}
791
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530796 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530798 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100799 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530800
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
804
805 INIT_LIST_HEAD(&bf_q);
806
Felix Fietkau269c44b2010-11-14 15:20:06 +0100807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530808
809 /*
Sujithd43f30152009-01-16 21:38:53 +0530810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530812 */
813 if (list_empty(&bf_q))
814 break;
815
816 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530818
Sujithd43f30152009-01-16 21:38:53 +0530819 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100820 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100821 fi = get_frame_info(bf->bf_mpdu);
822
Sujithe8324352009-01-16 21:38:42 +0530823 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100825 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530831 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithd43f30152009-01-16 21:38:53 +0530835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530837
Sujithe8324352009-01-16 21:38:42 +0530838 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530839 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530840
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530842 status != ATH_AGGR_BAW_CLOSED);
843}
844
Felix Fietkau231c3a12010-09-20 19:35:28 +0200845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530852 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
Sujithf83da962009-07-23 15:32:37 +0530857 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200858 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700859 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200860
Felix Fietkau2ed72222011-01-10 17:05:49 -0700861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
863
Felix Fietkau231c3a12010-09-20 19:35:28 +0200864 return 0;
Sujithe8324352009-01-16 21:38:42 +0530865}
866
Sujithf83da962009-07-23 15:32:37 +0530867void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530868{
869 struct ath_node *an = (struct ath_node *)sta->drv_priv;
870 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100871 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530874 return;
Sujithe8324352009-01-16 21:38:42 +0530875
876 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530877 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530878 return;
Sujithe8324352009-01-16 21:38:42 +0530879 }
880
Sujithe8324352009-01-16 21:38:42 +0530881 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200882 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200883
884 /*
885 * If frames are still being transmitted for this TID, they will be
886 * cleaned up during tx completion. To prevent race conditions, this
887 * TID can only be reused after all in-progress subframes have been
888 * completed.
889 */
890 if (txtid->baw_head != txtid->baw_tail)
891 txtid->state |= AGGR_CLEANUP;
892 else
893 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530894 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530895
Felix Fietkau90fa5392010-09-20 13:45:38 +0200896 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530897}
898
899void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
900{
901 struct ath_atx_tid *txtid;
902 struct ath_node *an;
903
904 an = (struct ath_node *)sta->drv_priv;
905
906 if (sc->sc_flags & SC_OP_TXAGGR) {
907 txtid = ATH_AN_2_TID(an, tid);
908 txtid->baw_size =
909 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
910 txtid->state |= AGGR_ADDBA_COMPLETE;
911 txtid->state &= ~AGGR_ADDBA_PROGRESS;
912 ath_tx_resume_tid(sc, txtid);
913 }
914}
915
Sujithe8324352009-01-16 21:38:42 +0530916/********************/
917/* Queue Management */
918/********************/
919
Sujithe8324352009-01-16 21:38:42 +0530920static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
921 struct ath_txq *txq)
922{
923 struct ath_atx_ac *ac, *ac_tmp;
924 struct ath_atx_tid *tid, *tid_tmp;
925
926 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
927 list_del(&ac->list);
928 ac->sched = false;
929 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
930 list_del(&tid->list);
931 tid->sched = false;
932 ath_tid_drain(sc, txq, tid);
933 }
934 }
935}
936
937struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
938{
Sujithcbe61d82009-02-09 13:27:12 +0530939 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700940 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530941 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100942 static const int subtype_txq_to_hwq[] = {
943 [WME_AC_BE] = ATH_TXQ_AC_BE,
944 [WME_AC_BK] = ATH_TXQ_AC_BK,
945 [WME_AC_VI] = ATH_TXQ_AC_VI,
946 [WME_AC_VO] = ATH_TXQ_AC_VO,
947 };
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400948 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530949
950 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530952 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
953 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
954 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
955 qi.tqi_physCompBuf = 0;
956
957 /*
958 * Enable interrupts only for EOL and DESC conditions.
959 * We mark tx descriptors to receive a DESC interrupt
960 * when a tx queue gets deep; otherwise waiting for the
961 * EOL to reap descriptors. Note that this is done to
962 * reduce interrupt load and this only defers reaping
963 * descriptors, never transmitting frames. Aside from
964 * reducing interrupts this also permits more concurrency.
965 * The only potential downside is if the tx queue backs
966 * up in which case the top half of the kernel may backup
967 * due to a lack of tx descriptors.
968 *
969 * The UAPSD queue is an exception, since we take a desc-
970 * based intr on the EOSP frames.
971 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400972 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
973 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
974 TXQ_FLAG_TXERRINT_ENABLE;
975 } else {
976 if (qtype == ATH9K_TX_QUEUE_UAPSD)
977 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
978 else
979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
980 TXQ_FLAG_TXDESCINT_ENABLE;
981 }
Sujithe8324352009-01-16 21:38:42 +0530982 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
983 if (qnum == -1) {
984 /*
985 * NB: don't print a message, this happens
986 * normally on parts with too few tx queues
987 */
988 return NULL;
989 }
990 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800991 ath_err(common, "qnum %u out of range, max %zu!\n",
992 qnum, ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530993 ath9k_hw_releasetxqueue(ah, qnum);
994 return NULL;
995 }
996 if (!ATH_TXQ_SETUP(sc, qnum)) {
997 struct ath_txq *txq = &sc->tx.txq[qnum];
998
999 txq->axq_qnum = qnum;
1000 txq->axq_link = NULL;
1001 INIT_LIST_HEAD(&txq->axq_q);
1002 INIT_LIST_HEAD(&txq->axq_acq);
1003 spin_lock_init(&txq->axq_lock);
1004 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001005 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001006 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +05301007 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001008
1009 txq->txq_headidx = txq->txq_tailidx = 0;
1010 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1011 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1012 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301013 }
1014 return &sc->tx.txq[qnum];
1015}
1016
Sujithe8324352009-01-16 21:38:42 +05301017int ath_txq_update(struct ath_softc *sc, int qnum,
1018 struct ath9k_tx_queue_info *qinfo)
1019{
Sujithcbe61d82009-02-09 13:27:12 +05301020 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301021 int error = 0;
1022 struct ath9k_tx_queue_info qi;
1023
1024 if (qnum == sc->beacon.beaconq) {
1025 /*
1026 * XXX: for beacon queue, we just save the parameter.
1027 * It will be picked up by ath_beaconq_config when
1028 * it's necessary.
1029 */
1030 sc->beacon.beacon_qi = *qinfo;
1031 return 0;
1032 }
1033
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001034 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301035
1036 ath9k_hw_get_txq_props(ah, qnum, &qi);
1037 qi.tqi_aifs = qinfo->tqi_aifs;
1038 qi.tqi_cwmin = qinfo->tqi_cwmin;
1039 qi.tqi_cwmax = qinfo->tqi_cwmax;
1040 qi.tqi_burstTime = qinfo->tqi_burstTime;
1041 qi.tqi_readyTime = qinfo->tqi_readyTime;
1042
1043 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001044 ath_err(ath9k_hw_common(sc->sc_ah),
1045 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301046 error = -EIO;
1047 } else {
1048 ath9k_hw_resettxqueue(ah, qnum);
1049 }
1050
1051 return error;
1052}
1053
1054int ath_cabq_update(struct ath_softc *sc)
1055{
1056 struct ath9k_tx_queue_info qi;
1057 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301058
1059 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1060 /*
1061 * Ensure the readytime % is within the bounds.
1062 */
Sujith17d79042009-02-09 13:27:03 +05301063 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1064 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1065 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1066 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301067
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001068 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301069 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301070 ath_txq_update(sc, qnum, &qi);
1071
1072 return 0;
1073}
1074
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001075static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1076{
1077 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1078 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1079}
1080
Sujith043a0402009-01-16 21:38:47 +05301081/*
1082 * Drain a given TX queue (could be Beacon or Data)
1083 *
1084 * This assumes output has been stopped and
1085 * we do not need to block ath_tx_tasklet.
1086 */
1087void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301088{
1089 struct ath_buf *bf, *lastbf;
1090 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001091 struct ath_tx_status ts;
1092
1093 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301094 INIT_LIST_HEAD(&bf_head);
1095
Sujithe8324352009-01-16 21:38:42 +05301096 for (;;) {
1097 spin_lock_bh(&txq->axq_lock);
1098
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001099 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1100 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1101 txq->txq_headidx = txq->txq_tailidx = 0;
1102 spin_unlock_bh(&txq->axq_lock);
1103 break;
1104 } else {
1105 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1106 struct ath_buf, list);
1107 }
1108 } else {
1109 if (list_empty(&txq->axq_q)) {
1110 txq->axq_link = NULL;
1111 spin_unlock_bh(&txq->axq_lock);
1112 break;
1113 }
1114 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1115 list);
Sujithe8324352009-01-16 21:38:42 +05301116
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001117 if (bf->bf_stale) {
1118 list_del(&bf->list);
1119 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301120
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001121 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001122 continue;
1123 }
Sujithe8324352009-01-16 21:38:42 +05301124 }
1125
1126 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301127
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001128 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1129 list_cut_position(&bf_head,
1130 &txq->txq_fifo[txq->txq_tailidx],
1131 &lastbf->list);
1132 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1133 } else {
1134 /* remove ath_buf's of the same mpdu from txq */
1135 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1136 }
1137
Sujithe8324352009-01-16 21:38:42 +05301138 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001139 if (bf_is_ampdu_not_probing(bf))
1140 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301141 spin_unlock_bh(&txq->axq_lock);
1142
1143 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001144 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1145 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301146 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001147 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301148 }
1149
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001150 spin_lock_bh(&txq->axq_lock);
1151 txq->axq_tx_inprogress = false;
1152 spin_unlock_bh(&txq->axq_lock);
1153
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001154 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1155 spin_lock_bh(&txq->axq_lock);
1156 while (!list_empty(&txq->txq_fifo_pending)) {
1157 bf = list_first_entry(&txq->txq_fifo_pending,
1158 struct ath_buf, list);
1159 list_cut_position(&bf_head,
1160 &txq->txq_fifo_pending,
1161 &bf->bf_lastbf->list);
1162 spin_unlock_bh(&txq->axq_lock);
1163
1164 if (bf_isampdu(bf))
1165 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001166 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001167 else
1168 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1169 &ts, 0, 0);
1170 spin_lock_bh(&txq->axq_lock);
1171 }
1172 spin_unlock_bh(&txq->axq_lock);
1173 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001174
1175 /* flush any pending frames if aggregation is enabled */
1176 if (sc->sc_flags & SC_OP_TXAGGR) {
1177 if (!retry_tx) {
1178 spin_lock_bh(&txq->axq_lock);
1179 ath_txq_drain_pending_buffers(sc, txq);
1180 spin_unlock_bh(&txq->axq_lock);
1181 }
1182 }
Sujithe8324352009-01-16 21:38:42 +05301183}
1184
Felix Fietkau080e1a22010-12-05 20:17:53 +01001185bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301186{
Sujithcbe61d82009-02-09 13:27:12 +05301187 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001188 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301189 struct ath_txq *txq;
1190 int i, npend = 0;
1191
1192 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001193 return true;
Sujith043a0402009-01-16 21:38:47 +05301194
1195 /* Stop beacon queue */
1196 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1197
1198 /* Stop data queues */
1199 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1200 if (ATH_TXQ_SETUP(sc, i)) {
1201 txq = &sc->tx.txq[i];
1202 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1203 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1204 }
1205 }
1206
Felix Fietkau080e1a22010-12-05 20:17:53 +01001207 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001208 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301209
1210 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1211 if (ATH_TXQ_SETUP(sc, i))
1212 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1213 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001214
1215 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301216}
1217
Sujithe8324352009-01-16 21:38:42 +05301218void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1219{
1220 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1221 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1222}
1223
Sujithe8324352009-01-16 21:38:42 +05301224void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1225{
1226 struct ath_atx_ac *ac;
1227 struct ath_atx_tid *tid;
1228
1229 if (list_empty(&txq->axq_acq))
1230 return;
1231
1232 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1233 list_del(&ac->list);
1234 ac->sched = false;
1235
1236 do {
1237 if (list_empty(&ac->tid_q))
1238 return;
1239
1240 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1241 list_del(&tid->list);
1242 tid->sched = false;
1243
1244 if (tid->paused)
1245 continue;
1246
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001247 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301248
1249 /*
1250 * add tid to round-robin queue if more frames
1251 * are pending for the tid
1252 */
1253 if (!list_empty(&tid->buf_q))
1254 ath_tx_queue_tid(txq, tid);
1255
1256 break;
1257 } while (!list_empty(&ac->tid_q));
1258
1259 if (!list_empty(&ac->tid_q)) {
1260 if (!ac->sched) {
1261 ac->sched = true;
1262 list_add_tail(&ac->list, &txq->axq_acq);
1263 }
1264 }
1265}
1266
Sujithe8324352009-01-16 21:38:42 +05301267/***********/
1268/* TX, DMA */
1269/***********/
1270
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001271/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001272 * Insert a chain of ath_buf (descriptors) on a txq and
1273 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001274 */
Sujith102e0572008-10-29 10:15:16 +05301275static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1276 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001277{
Sujithcbe61d82009-02-09 13:27:12 +05301278 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001279 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001280 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301281
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001282 /*
1283 * Insert the frame on the outbound list and
1284 * pass it on to the hardware.
1285 */
1286
1287 if (list_empty(head))
1288 return;
1289
1290 bf = list_first_entry(head, struct ath_buf, list);
1291
Joe Perches226afe62010-12-02 19:12:37 -08001292 ath_dbg(common, ATH_DBG_QUEUE,
1293 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001294
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001295 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1296 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1297 list_splice_tail_init(head, &txq->txq_fifo_pending);
1298 return;
1299 }
1300 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001301 ath_dbg(common, ATH_DBG_XMIT,
1302 "Initializing tx fifo %d which is non-empty\n",
1303 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001304 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1305 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1306 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001307 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001308 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1309 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001310 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001311 list_splice_tail_init(head, &txq->axq_q);
1312
1313 if (txq->axq_link == NULL) {
1314 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001315 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1316 txq->axq_qnum, ito64(bf->bf_daddr),
1317 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001318 } else {
1319 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001320 ath_dbg(common, ATH_DBG_XMIT,
1321 "link[%u] (%p)=%llx (%p)\n",
1322 txq->axq_qnum, txq->axq_link,
1323 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001324 }
1325 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1326 &txq->axq_link);
1327 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001328 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001329 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001330 if (bf_is_ampdu_not_probing(bf))
1331 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001332}
1333
Sujithe8324352009-01-16 21:38:42 +05301334static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001335 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301336{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001337 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001338 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301339
Sujithe8324352009-01-16 21:38:42 +05301340 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301341 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301342
1343 /*
1344 * Do not queue to h/w when any of the following conditions is true:
1345 * - there are pending frames in software queue
1346 * - the TID is currently paused for ADDBA/BAR request
1347 * - seqno is not within block-ack window
1348 * - h/w queue depth exceeds low water mark
1349 */
1350 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001351 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001352 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001353 /*
Sujithe8324352009-01-16 21:38:42 +05301354 * Add this frame to software queue for scheduling later
1355 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001356 */
Felix Fietkau04caf862010-11-14 15:20:12 +01001357 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301358 ath_tx_queue_tid(txctl->txq, tid);
1359 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001360 }
1361
Felix Fietkau04caf862010-11-14 15:20:12 +01001362 INIT_LIST_HEAD(&bf_head);
1363 list_add(&bf->list, &bf_head);
1364
Sujithe8324352009-01-16 21:38:42 +05301365 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001366 if (!fi->retries)
1367 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301368
1369 /* Queue to h/w without aggregation */
Sujithd43f30152009-01-16 21:38:53 +05301370 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001371 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001372 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301373}
1374
Felix Fietkau82b873a2010-11-11 03:18:37 +01001375static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1376 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001377 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001378{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001379 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301380 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001381
Sujithe8324352009-01-16 21:38:42 +05301382 bf = list_first_entry(bf_head, struct ath_buf, list);
1383 bf->bf_state.bf_type &= ~BUF_AMPDU;
1384
1385 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001386 if (tid)
1387 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301388
Sujithd43f30152009-01-16 21:38:53 +05301389 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001390 fi = get_frame_info(bf->bf_mpdu);
1391 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301392 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301393 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001394}
1395
Sujith528f0c62008-10-29 10:14:26 +05301396static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001397{
Sujith528f0c62008-10-29 10:14:26 +05301398 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001399 enum ath9k_pkt_type htype;
1400 __le16 fc;
1401
Sujith528f0c62008-10-29 10:14:26 +05301402 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001403 fc = hdr->frame_control;
1404
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001405 if (ieee80211_is_beacon(fc))
1406 htype = ATH9K_PKT_TYPE_BEACON;
1407 else if (ieee80211_is_probe_resp(fc))
1408 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1409 else if (ieee80211_is_atim(fc))
1410 htype = ATH9K_PKT_TYPE_ATIM;
1411 else if (ieee80211_is_pspoll(fc))
1412 htype = ATH9K_PKT_TYPE_PSPOLL;
1413 else
1414 htype = ATH9K_PKT_TYPE_NORMAL;
1415
1416 return htype;
1417}
1418
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001419static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1420 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301421{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001422 struct ath_wiphy *aphy = hw->priv;
1423 struct ath_softc *sc = aphy->sc;
Sujith528f0c62008-10-29 10:14:26 +05301424 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001425 struct ieee80211_sta *sta = tx_info->control.sta;
1426 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301427 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001428 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301429 struct ath_node *an;
1430 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001431 enum ath9k_key_type keytype;
1432 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001433 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301434
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001435 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301436
Sujith528f0c62008-10-29 10:14:26 +05301437 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001438 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1439 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001440
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001441 an = (struct ath_node *) sta->drv_priv;
1442 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1443
1444 /*
1445 * Override seqno set by upper layer with the one
1446 * in tx aggregation state.
1447 */
1448 tid = ATH_AN_2_TID(an, tidno);
1449 seqno = tid->seq_next;
1450 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1451 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1452 }
1453
1454 memset(fi, 0, sizeof(*fi));
1455 if (hw_key)
1456 fi->keyix = hw_key->hw_key_idx;
1457 else
1458 fi->keyix = ATH9K_TXKEYIX_INVALID;
1459 fi->keytype = keytype;
1460 fi->framelen = framelen;
1461 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301462}
1463
Felix Fietkau82b873a2010-11-11 03:18:37 +01001464static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301465{
1466 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1467 int flags = 0;
1468
1469 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1470 flags |= ATH9K_TXDESC_INTREQ;
1471
1472 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1473 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301474
Felix Fietkau82b873a2010-11-11 03:18:37 +01001475 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001476 flags |= ATH9K_TXDESC_LDPC;
1477
Sujith528f0c62008-10-29 10:14:26 +05301478 return flags;
1479}
1480
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001481/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001482 * rix - rate index
1483 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1484 * width - 0 for 20 MHz, 1 for 40 MHz
1485 * half_gi - to use 4us v/s 3.6 us for symbol time
1486 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001487static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301488 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001489{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001490 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001491 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301492
1493 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001494 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001495 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001496 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001497 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1498
1499 if (!half_gi)
1500 duration = SYMBOL_TIME(nsymbols);
1501 else
1502 duration = SYMBOL_TIME_HALFGI(nsymbols);
1503
Sujithe63835b2008-11-18 09:07:53 +05301504 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001505 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301506
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001507 return duration;
1508}
1509
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301510u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1511{
1512 struct ath_hw *ah = sc->sc_ah;
1513 struct ath9k_channel *curchan = ah->curchan;
1514 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1515 (curchan->channelFlags & CHANNEL_5GHZ) &&
1516 (chainmask == 0x7) && (rate < 0x90))
1517 return 0x3;
1518 else
1519 return chainmask;
1520}
1521
Felix Fietkau269c44b2010-11-14 15:20:06 +01001522static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001523{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001524 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001525 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301526 struct sk_buff *skb;
1527 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301528 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001529 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301530 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301531 int i, flags = 0;
1532 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301533 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301534
1535 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301536
Sujitha22be222009-03-30 15:28:36 +05301537 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301538 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301539 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301540 hdr = (struct ieee80211_hdr *)skb->data;
1541 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301542
Sujithc89424d2009-01-30 14:29:28 +05301543 /*
1544 * We check if Short Preamble is needed for the CTS rate by
1545 * checking the BSS's global flag.
1546 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1547 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001548 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1549 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301550 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001551 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001552
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001553 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001554 bool is_40, is_sgi, is_sp;
1555 int phy;
1556
Sujithe63835b2008-11-18 09:07:53 +05301557 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001558 continue;
1559
Sujitha8efee42008-11-18 09:07:30 +05301560 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301561 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001562
Felix Fietkau27032052010-01-17 21:08:50 +01001563 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1564 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301565 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001566 flags |= ATH9K_TXDESC_RTSENA;
1567 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1568 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1569 flags |= ATH9K_TXDESC_CTSENA;
1570 }
1571
Sujithc89424d2009-01-30 14:29:28 +05301572 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1573 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1574 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1575 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001576
Felix Fietkau545750d2009-11-23 22:21:01 +01001577 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1578 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1579 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1580
1581 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1582 /* MCS rates */
1583 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301584 series[i].ChSel = ath_txchainmask_reduction(sc,
1585 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001586 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001587 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001588 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1589 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001590 continue;
1591 }
1592
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301593 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001594 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1595 !(rate->flags & IEEE80211_RATE_ERP_G))
1596 phy = WLAN_RC_PHY_CCK;
1597 else
1598 phy = WLAN_RC_PHY_OFDM;
1599
1600 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1601 series[i].Rate = rate->hw_value;
1602 if (rate->hw_value_short) {
1603 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1604 series[i].Rate |= rate->hw_value_short;
1605 } else {
1606 is_sp = false;
1607 }
1608
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301609 if (bf->bf_state.bfs_paprd)
1610 series[i].ChSel = common->tx_chainmask;
1611 else
1612 series[i].ChSel = ath_txchainmask_reduction(sc,
1613 common->tx_chainmask, series[i].Rate);
1614
Felix Fietkau545750d2009-11-23 22:21:01 +01001615 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001616 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001617 }
1618
Felix Fietkau27032052010-01-17 21:08:50 +01001619 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001620 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001621 flags &= ~ATH9K_TXDESC_RTSENA;
1622
1623 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1624 if (flags & ATH9K_TXDESC_RTSENA)
1625 flags &= ~ATH9K_TXDESC_CTSENA;
1626
Sujithe63835b2008-11-18 09:07:53 +05301627 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301628 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1629 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301630 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301631 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301632
Sujith17d79042009-02-09 13:27:03 +05301633 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301634 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001635}
1636
Felix Fietkau82b873a2010-11-11 03:18:37 +01001637static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001638 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001639 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301640{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001641 struct ath_wiphy *aphy = hw->priv;
1642 struct ath_softc *sc = aphy->sc;
Felix Fietkau04caf862010-11-14 15:20:12 +01001643 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001644 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001645 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001646 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001647 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001648 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001649
1650 bf = ath_tx_get_buffer(sc);
1651 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001652 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001653 return NULL;
1654 }
Sujithe8324352009-01-16 21:38:42 +05301655
Sujithe8324352009-01-16 21:38:42 +05301656 ATH_TXBUF_RESET(bf);
1657
Felix Fietkau827e69b2009-11-15 23:09:25 +01001658 bf->aphy = aphy;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001659 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301660 bf->bf_mpdu = skb;
1661
Ben Greearc1739eb32010-10-14 12:45:29 -07001662 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1663 skb->len, DMA_TO_DEVICE);
1664 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301665 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001666 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001667 ath_err(ath9k_hw_common(sc->sc_ah),
1668 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001669 ath_tx_return_buffer(sc, bf);
1670 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301671 }
1672
Sujithe8324352009-01-16 21:38:42 +05301673 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301674
1675 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001676 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301677
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001678 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1679 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301680
1681 ath9k_hw_filltxdesc(ah, ds,
1682 skb->len, /* segment length */
1683 true, /* first segment */
1684 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001685 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001686 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001687 txq->axq_qnum);
1688
1689
1690 return bf;
1691}
1692
1693/* FIXME: tx power */
1694static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1695 struct ath_tx_control *txctl)
1696{
1697 struct sk_buff *skb = bf->bf_mpdu;
1698 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1699 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001700 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001701 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001702 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301703
Sujithe8324352009-01-16 21:38:42 +05301704 spin_lock_bh(&txctl->txq->axq_lock);
1705
Felix Fietkau248a38d2010-12-10 21:16:46 +01001706 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001707 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1708 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001709 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001710
Felix Fietkau066dae92010-11-07 14:59:39 +01001711 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001712 }
1713
1714 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001715 /*
1716 * Try aggregation if it's a unicast data frame
1717 * and the destination is HT capable.
1718 */
1719 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301720 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001721 INIT_LIST_HEAD(&bf_head);
1722 list_add_tail(&bf->list, &bf_head);
1723
Felix Fietkau61117f02010-11-11 03:18:36 +01001724 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001725 bf->bf_state.bfs_paprd = txctl->paprd;
1726
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001727 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001728 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1729 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001730
Felix Fietkau248a38d2010-12-10 21:16:46 +01001731 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301732 }
1733
1734 spin_unlock_bh(&txctl->txq->axq_lock);
1735}
1736
1737/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001738int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301739 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001740{
Felix Fietkau28d16702010-11-14 15:20:10 +01001741 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1742 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001743 struct ieee80211_sta *sta = info->control.sta;
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001744 struct ath_wiphy *aphy = hw->priv;
1745 struct ath_softc *sc = aphy->sc;
Felix Fietkau84642d62010-06-01 21:33:13 +02001746 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001747 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001748 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001749 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001750 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001751
Ben Greeara9927ba2010-12-06 21:13:49 -08001752 /* NOTE: sta can be NULL according to net/mac80211.h */
1753 if (sta)
1754 txctl->an = (struct ath_node *)sta->drv_priv;
1755
Felix Fietkau04caf862010-11-14 15:20:12 +01001756 if (info->control.hw_key)
1757 frmlen += info->control.hw_key->icv_len;
1758
Felix Fietkau28d16702010-11-14 15:20:10 +01001759 /*
1760 * As a temporary workaround, assign seq# here; this will likely need
1761 * to be cleaned up to work better with Beacon transmission and virtual
1762 * BSSes.
1763 */
1764 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1765 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1766 sc->tx.seq_no += 0x10;
1767 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1768 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1769 }
1770
1771 /* Add the padding after the header if this is not already done */
1772 padpos = ath9k_cmn_padpos(hdr->frame_control);
1773 padsize = padpos & 3;
1774 if (padsize && skb->len > padpos) {
1775 if (skb_headroom(skb) < padsize)
1776 return -ENOMEM;
1777
1778 skb_push(skb, padsize);
1779 memmove(skb->data, skb->data + padsize, padpos);
1780 }
1781
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001782 setup_frame_info(hw, skb, frmlen);
1783
1784 /*
1785 * At this point, the vif, hw_key and sta pointers in the tx control
1786 * info are no longer valid (overwritten by the ath_frame_info data.
1787 */
1788
1789 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001790 if (unlikely(!bf))
1791 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001792
Felix Fietkau066dae92010-11-07 14:59:39 +01001793 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001794 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001795 if (txq == sc->tx.txq_map[q] &&
1796 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1797 ath_mac80211_stop_queue(sc, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001798 txq->stopped = 1;
1799 }
1800 spin_unlock_bh(&txq->axq_lock);
1801
Sujithe8324352009-01-16 21:38:42 +05301802 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001803
1804 return 0;
1805}
1806
Sujithe8324352009-01-16 21:38:42 +05301807/*****************/
1808/* TX Completion */
1809/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001810
Sujithe8324352009-01-16 21:38:42 +05301811static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau61117f02010-11-11 03:18:36 +01001812 struct ath_wiphy *aphy, int tx_flags, int ftype,
Felix Fietkau066dae92010-11-07 14:59:39 +01001813 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001814{
Sujithe8324352009-01-16 21:38:42 +05301815 struct ieee80211_hw *hw = sc->hw;
1816 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001817 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001818 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001819 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301820
Joe Perches226afe62010-12-02 19:12:37 -08001821 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301822
Felix Fietkau827e69b2009-11-15 23:09:25 +01001823 if (aphy)
1824 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301825
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301826 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301827 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301828
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301829 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301830 /* Frame was ACKed */
1831 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1832 }
1833
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001834 padpos = ath9k_cmn_padpos(hdr->frame_control);
1835 padsize = padpos & 3;
1836 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301837 /*
1838 * Remove MAC header padding before giving the frame back to
1839 * mac80211.
1840 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001841 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301842 skb_pull(skb, padsize);
1843 }
1844
Sujith1b04b932010-01-08 10:36:05 +05301845 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1846 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001847 ath_dbg(common, ATH_DBG_PS,
1848 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301849 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1850 PS_WAIT_FOR_CAB |
1851 PS_WAIT_FOR_PSPOLL_DATA |
1852 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001853 }
1854
Felix Fietkau61117f02010-11-11 03:18:36 +01001855 if (unlikely(ftype))
1856 ath9k_tx_status(hw, skb, ftype);
Felix Fietkau97923b12010-06-12 00:33:55 -04001857 else {
1858 q = skb_get_queue_mapping(skb);
Felix Fietkau066dae92010-11-07 14:59:39 +01001859 if (txq == sc->tx.txq_map[q]) {
1860 spin_lock_bh(&txq->axq_lock);
1861 if (WARN_ON(--txq->pending_frames < 0))
1862 txq->pending_frames = 0;
1863 spin_unlock_bh(&txq->axq_lock);
1864 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001865
Felix Fietkau827e69b2009-11-15 23:09:25 +01001866 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001867 }
Sujithe8324352009-01-16 21:38:42 +05301868}
1869
1870static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001871 struct ath_txq *txq, struct list_head *bf_q,
1872 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301873{
1874 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301875 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301876 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301877
Sujithe8324352009-01-16 21:38:42 +05301878 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301879 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301880
1881 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301882 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301883
1884 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301885 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301886 }
1887
Ben Greearc1739eb32010-10-14 12:45:29 -07001888 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001889 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001890
1891 if (bf->bf_state.bfs_paprd) {
Felix Fietkau82259b72010-11-14 15:20:04 +01001892 if (!sc->paprd_pending)
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001893 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001894 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001895 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001896 } else {
Felix Fietkau066dae92010-11-07 14:59:39 +01001897 ath_debug_stat_tx(sc, bf, ts);
Felix Fietkau61117f02010-11-11 03:18:36 +01001898 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1899 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001900 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001901 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1902 * accidentally reference it later.
1903 */
1904 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301905
1906 /*
1907 * Return the list of ath_buf of this mpdu to free queue
1908 */
1909 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1910 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1911 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1912}
1913
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001914static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +01001915 int nframes, int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301916{
Sujitha22be222009-03-30 15:28:36 +05301917 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301918 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301919 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001920 struct ieee80211_hw *hw = bf->aphy->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001921 struct ath_softc *sc = bf->aphy->sc;
1922 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301923 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301924
Sujith95e4acb2009-03-13 08:56:09 +05301925 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001926 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301927
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001928 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301929 WARN_ON(tx_rateindex >= hw->max_rates);
1930
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001931 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301932 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001933 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001934 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301935
Felix Fietkaub572d032010-11-14 15:20:07 +01001936 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001937
Felix Fietkaub572d032010-11-14 15:20:07 +01001938 tx_info->status.ampdu_len = nframes;
1939 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001940 }
1941
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001942 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301943 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001944 /*
1945 * If an underrun error is seen assume it as an excessive
1946 * retry only if max frame trigger level has been reached
1947 * (2 KB for single stream, and 4 KB for dual stream).
1948 * Adjust the long retry as if the frame was tried
1949 * hw->max_rate_tries times to affect how rate control updates
1950 * PER for the failed rate.
1951 * In case of congestion on the bus penalizing this type of
1952 * underruns should help hardware actually transmit new frames
1953 * successfully by eventually preferring slower rates.
1954 * This itself should also alleviate congestion on the bus.
1955 */
1956 if (ieee80211_is_data(hdr->frame_control) &&
1957 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1958 ATH9K_TX_DELIM_UNDERRUN)) &&
1959 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1960 tx_info->status.rates[tx_rateindex].count =
1961 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301962 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301963
Felix Fietkau545750d2009-11-23 22:21:01 +01001964 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301965 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001966 tx_info->status.rates[i].idx = -1;
1967 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301968
Felix Fietkau78c46532010-06-25 01:26:16 +02001969 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301970}
1971
Felix Fietkau066dae92010-11-07 14:59:39 +01001972static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
Sujith059d8062009-01-16 21:38:49 +05301973{
Felix Fietkau066dae92010-11-07 14:59:39 +01001974 struct ath_txq *txq;
Sujith059d8062009-01-16 21:38:49 +05301975
Felix Fietkau066dae92010-11-07 14:59:39 +01001976 txq = sc->tx.txq_map[qnum];
Sujith059d8062009-01-16 21:38:49 +05301977 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001978 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07001979 if (ath_mac80211_start_queue(sc, qnum))
1980 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05301981 }
1982 spin_unlock_bh(&txq->axq_lock);
1983}
1984
Sujithc4288392008-11-18 09:09:30 +05301985static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001986{
Sujithcbe61d82009-02-09 13:27:12 +05301987 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001988 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001989 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1990 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05301991 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07001992 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05301993 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001994 int status;
Felix Fietkau066dae92010-11-07 14:59:39 +01001995 int qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001996
Joe Perches226afe62010-12-02 19:12:37 -08001997 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1998 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1999 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002000
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002001 for (;;) {
2002 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002003 if (list_empty(&txq->axq_q)) {
2004 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002005 spin_unlock_bh(&txq->axq_lock);
2006 break;
2007 }
2008 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2009
2010 /*
2011 * There is a race condition that a BH gets scheduled
2012 * after sw writes TxE and before hw re-load the last
2013 * descriptor to get the newly chained one.
2014 * Software must keep the last DONE descriptor as a
2015 * holding descriptor - software does so by marking
2016 * it with the STALE flag.
2017 */
2018 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302019 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002020 bf_held = bf;
2021 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302022 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002023 break;
2024 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002025 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302026 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002027 }
2028 }
2029
2030 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302031 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002032
Felix Fietkau29bffa92010-03-29 20:14:23 -07002033 memset(&ts, 0, sizeof(ts));
2034 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002035 if (status == -EINPROGRESS) {
2036 spin_unlock_bh(&txq->axq_lock);
2037 break;
2038 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002039
2040 /*
2041 * Remove ath_buf's of the same transmit unit from txq,
2042 * however leave the last descriptor back as the holding
2043 * descriptor for hw.
2044 */
Sujitha119cc42009-03-30 15:28:38 +05302045 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002046 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002047 if (!list_is_singular(&lastbf->list))
2048 list_cut_position(&bf_head,
2049 &txq->axq_q, lastbf->list.prev);
2050
2051 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002052 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002053 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002054 if (bf_held)
2055 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002056
2057 if (bf_is_ampdu_not_probing(bf))
2058 txq->axq_ampdu_depth--;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002059 spin_unlock_bh(&txq->axq_lock);
2060
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002061 if (bf_held)
2062 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002063
Sujithcd3d39a2008-08-11 14:03:34 +05302064 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002065 /*
2066 * This frame is sent out as a single frame.
2067 * Use hardware retry status for this frame.
2068 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002069 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302070 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002071 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002072 }
Johannes Berge6a98542008-10-21 12:40:02 +02002073
Felix Fietkau066dae92010-11-07 14:59:39 +01002074 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2075
Sujithcd3d39a2008-08-11 14:03:34 +05302076 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002077 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2078 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002079 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002080 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002081
Felix Fietkau066dae92010-11-07 14:59:39 +01002082 if (txq == sc->tx.txq_map[qnum])
2083 ath_wake_mac80211_queue(sc, qnum);
Sujith059d8062009-01-16 21:38:49 +05302084
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002085 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302086 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002087 ath_txq_schedule(sc, txq);
2088 spin_unlock_bh(&txq->axq_lock);
2089 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002090}
2091
Sujith305fe472009-07-23 15:32:29 +05302092static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002093{
2094 struct ath_softc *sc = container_of(work, struct ath_softc,
2095 tx_complete_work.work);
2096 struct ath_txq *txq;
2097 int i;
2098 bool needreset = false;
2099
2100 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2101 if (ATH_TXQ_SETUP(sc, i)) {
2102 txq = &sc->tx.txq[i];
2103 spin_lock_bh(&txq->axq_lock);
2104 if (txq->axq_depth) {
2105 if (txq->axq_tx_inprogress) {
2106 needreset = true;
2107 spin_unlock_bh(&txq->axq_lock);
2108 break;
2109 } else {
2110 txq->axq_tx_inprogress = true;
2111 }
2112 }
2113 spin_unlock_bh(&txq->axq_lock);
2114 }
2115
2116 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002117 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2118 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302119 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002120 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302121 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002122 }
2123
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002124 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002125 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2126}
2127
2128
Sujithe8324352009-01-16 21:38:42 +05302129
2130void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131{
Sujithe8324352009-01-16 21:38:42 +05302132 int i;
2133 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002134
Sujithe8324352009-01-16 21:38:42 +05302135 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002136
2137 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302138 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2139 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002140 }
2141}
2142
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002143void ath_tx_edma_tasklet(struct ath_softc *sc)
2144{
2145 struct ath_tx_status txs;
2146 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2147 struct ath_hw *ah = sc->sc_ah;
2148 struct ath_txq *txq;
2149 struct ath_buf *bf, *lastbf;
2150 struct list_head bf_head;
2151 int status;
2152 int txok;
Felix Fietkau066dae92010-11-07 14:59:39 +01002153 int qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002154
2155 for (;;) {
2156 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2157 if (status == -EINPROGRESS)
2158 break;
2159 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002160 ath_dbg(common, ATH_DBG_XMIT,
2161 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002162 break;
2163 }
2164
2165 /* Skip beacon completions */
2166 if (txs.qid == sc->beacon.beaconq)
2167 continue;
2168
2169 txq = &sc->tx.txq[txs.qid];
2170
2171 spin_lock_bh(&txq->axq_lock);
2172 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2173 spin_unlock_bh(&txq->axq_lock);
2174 return;
2175 }
2176
2177 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2178 struct ath_buf, list);
2179 lastbf = bf->bf_lastbf;
2180
2181 INIT_LIST_HEAD(&bf_head);
2182 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2183 &lastbf->list);
2184 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2185 txq->axq_depth--;
2186 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002187 if (bf_is_ampdu_not_probing(bf))
2188 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002189 spin_unlock_bh(&txq->axq_lock);
2190
2191 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2192
2193 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002194 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2195 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002196 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002197 }
2198
Felix Fietkau066dae92010-11-07 14:59:39 +01002199 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2200
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002201 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002202 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2203 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002204 else
2205 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2206 &txs, txok, 0);
2207
Felix Fietkau066dae92010-11-07 14:59:39 +01002208 if (txq == sc->tx.txq_map[qnum])
2209 ath_wake_mac80211_queue(sc, qnum);
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002210
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002211 spin_lock_bh(&txq->axq_lock);
2212 if (!list_empty(&txq->txq_fifo_pending)) {
2213 INIT_LIST_HEAD(&bf_head);
2214 bf = list_first_entry(&txq->txq_fifo_pending,
2215 struct ath_buf, list);
2216 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2217 &bf->bf_lastbf->list);
2218 ath_tx_txqaddbuf(sc, txq, &bf_head);
2219 } else if (sc->sc_flags & SC_OP_TXAGGR)
2220 ath_txq_schedule(sc, txq);
2221 spin_unlock_bh(&txq->axq_lock);
2222 }
2223}
2224
Sujithe8324352009-01-16 21:38:42 +05302225/*****************/
2226/* Init, Cleanup */
2227/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002228
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002229static int ath_txstatus_setup(struct ath_softc *sc, int size)
2230{
2231 struct ath_descdma *dd = &sc->txsdma;
2232 u8 txs_len = sc->sc_ah->caps.txs_len;
2233
2234 dd->dd_desc_len = size * txs_len;
2235 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2236 &dd->dd_desc_paddr, GFP_KERNEL);
2237 if (!dd->dd_desc)
2238 return -ENOMEM;
2239
2240 return 0;
2241}
2242
2243static int ath_tx_edma_init(struct ath_softc *sc)
2244{
2245 int err;
2246
2247 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2248 if (!err)
2249 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2250 sc->txsdma.dd_desc_paddr,
2251 ATH_TXSTATUS_RING_SIZE);
2252
2253 return err;
2254}
2255
2256static void ath_tx_edma_cleanup(struct ath_softc *sc)
2257{
2258 struct ath_descdma *dd = &sc->txsdma;
2259
2260 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2261 dd->dd_desc_paddr);
2262}
2263
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002264int ath_tx_init(struct ath_softc *sc, int nbufs)
2265{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002266 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002267 int error = 0;
2268
Sujith797fe5cb2009-03-30 15:28:45 +05302269 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002270
Sujith797fe5cb2009-03-30 15:28:45 +05302271 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002272 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302273 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002274 ath_err(common,
2275 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302276 goto err;
2277 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002278
Sujith797fe5cb2009-03-30 15:28:45 +05302279 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002280 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302281 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002282 ath_err(common,
2283 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302284 goto err;
2285 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002286
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002287 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2288
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002289 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2290 error = ath_tx_edma_init(sc);
2291 if (error)
2292 goto err;
2293 }
2294
Sujith797fe5cb2009-03-30 15:28:45 +05302295err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002296 if (error != 0)
2297 ath_tx_cleanup(sc);
2298
2299 return error;
2300}
2301
Sujith797fe5cb2009-03-30 15:28:45 +05302302void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002303{
Sujithb77f4832008-12-07 21:44:03 +05302304 if (sc->beacon.bdma.dd_desc_len != 0)
2305 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002306
Sujithb77f4832008-12-07 21:44:03 +05302307 if (sc->tx.txdma.dd_desc_len != 0)
2308 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002309
2310 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2311 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002312}
2313
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002314void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2315{
Sujithc5170162008-10-29 10:13:59 +05302316 struct ath_atx_tid *tid;
2317 struct ath_atx_ac *ac;
2318 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002319
Sujith8ee5afb2008-12-07 21:43:36 +05302320 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302321 tidno < WME_NUM_TID;
2322 tidno++, tid++) {
2323 tid->an = an;
2324 tid->tidno = tidno;
2325 tid->seq_start = tid->seq_next = 0;
2326 tid->baw_size = WME_MAX_BA;
2327 tid->baw_head = tid->baw_tail = 0;
2328 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302329 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302330 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302331 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302332 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302333 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302334 tid->state &= ~AGGR_ADDBA_COMPLETE;
2335 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302336 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002337
Sujith8ee5afb2008-12-07 21:43:36 +05302338 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302339 acno < WME_NUM_AC; acno++, ac++) {
2340 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002341 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302342 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002343 }
2344}
2345
Sujithb5aa9bf2008-10-29 10:13:31 +05302346void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002347{
Felix Fietkau2b409942010-07-07 19:42:08 +02002348 struct ath_atx_ac *ac;
2349 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002350 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002351 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302352
Felix Fietkau2b409942010-07-07 19:42:08 +02002353 for (tidno = 0, tid = &an->tid[tidno];
2354 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002355
Felix Fietkau2b409942010-07-07 19:42:08 +02002356 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002357 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002358
Felix Fietkau2b409942010-07-07 19:42:08 +02002359 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002360
Felix Fietkau2b409942010-07-07 19:42:08 +02002361 if (tid->sched) {
2362 list_del(&tid->list);
2363 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002364 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002365
2366 if (ac->sched) {
2367 list_del(&ac->list);
2368 tid->ac->sched = false;
2369 }
2370
2371 ath_tid_drain(sc, txq, tid);
2372 tid->state &= ~AGGR_ADDBA_COMPLETE;
2373 tid->state &= ~AGGR_CLEANUP;
2374
2375 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376 }
2377}