blob: fe0b6b3ec69786d5af5ecb1f19cefbf12d441efc [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070022#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070034
Felix Fietkauc6663872010-04-19 19:57:33 +020035static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070036 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070045};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
Felix Fietkau82b873a2010-11-11 03:18:37 +010049static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010051 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053052static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070053 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053055static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
56 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010057static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkaudb1a0522010-03-29 20:07:11 -070058static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +010059 int nframes, int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020060static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
61 int seqno);
Sujithe8324352009-01-16 21:38:42 +053062
Felix Fietkau545750d2009-11-23 22:21:01 +010063enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020064 MCS_HT20,
65 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010066 MCS_HT40,
67 MCS_HT40_SGI,
68};
69
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070static int ath_max_4ms_framelen[4][32] = {
71 [MCS_HT20] = {
72 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
73 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
74 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
75 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
76 },
77 [MCS_HT20_SGI] = {
78 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
79 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
80 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
81 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010082 },
83 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020084 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
85 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
86 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
87 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
91 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
92 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
93 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 }
95};
96
Sujithe8324352009-01-16 21:38:42 +053097/*********************/
98/* Aggregation logic */
99/*********************/
100
Sujithe8324352009-01-16 21:38:42 +0530101static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
102{
103 struct ath_atx_ac *ac = tid->ac;
104
105 if (tid->paused)
106 return;
107
108 if (tid->sched)
109 return;
110
111 tid->sched = true;
112 list_add_tail(&tid->list, &ac->tid_q);
113
114 if (ac->sched)
115 return;
116
117 ac->sched = true;
118 list_add_tail(&ac->list, &txq->axq_acq);
119}
120
Sujithe8324352009-01-16 21:38:42 +0530121static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
122{
Felix Fietkau066dae92010-11-07 14:59:39 +0100123 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530124
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200125 WARN_ON(!tid->paused);
126
Sujithe8324352009-01-16 21:38:42 +0530127 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200128 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530129
130 if (list_empty(&tid->buf_q))
131 goto unlock;
132
133 ath_tx_queue_tid(txq, tid);
134 ath_txq_schedule(sc, txq);
135unlock:
136 spin_unlock_bh(&txq->axq_lock);
137}
138
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100139static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100140{
141 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100142 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
143 sizeof(tx_info->rate_driver_data));
144 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100145}
146
Sujithe8324352009-01-16 21:38:42 +0530147static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
148{
Felix Fietkau066dae92010-11-07 14:59:39 +0100149 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530150 struct ath_buf *bf;
151 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200152 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100153 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200154
Sujithe8324352009-01-16 21:38:42 +0530155 INIT_LIST_HEAD(&bf_head);
156
Felix Fietkau90fa5392010-09-20 13:45:38 +0200157 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530158 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530159
160 while (!list_empty(&tid->buf_q)) {
161 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530162 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200163
Felix Fietkaue1566d12010-11-20 03:08:46 +0100164 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100165 fi = get_frame_info(bf->bf_mpdu);
166 if (fi->retries) {
167 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200168 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
169 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700170 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200171 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100172 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530173 }
174
175 spin_unlock_bh(&txq->axq_lock);
176}
177
178static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
179 int seqno)
180{
181 int index, cindex;
182
183 index = ATH_BA_INDEX(tid->seq_start, seqno);
184 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
185
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200186 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530187
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200188 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530189 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
190 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
191 }
192}
193
194static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100195 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530196{
197 int index, cindex;
198
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100199 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530200 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200201 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530202
203 if (index >= ((tid->baw_tail - tid->baw_head) &
204 (ATH_TID_MAX_BUFS - 1))) {
205 tid->baw_tail = cindex;
206 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
207 }
208}
209
210/*
211 * TODO: For frame(s) that are in the retry state, we will reuse the
212 * sequence number(s) without setting the retry bit. The
213 * alternative is to give up on these and BAR the receiver's window
214 * forward.
215 */
216static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
217 struct ath_atx_tid *tid)
218
219{
220 struct ath_buf *bf;
221 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700222 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100223 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700224
225 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530226 INIT_LIST_HEAD(&bf_head);
227
228 for (;;) {
229 if (list_empty(&tid->buf_q))
230 break;
Sujithe8324352009-01-16 21:38:42 +0530231
Sujithd43f30152009-01-16 21:38:53 +0530232 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
233 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530234
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100235 fi = get_frame_info(bf->bf_mpdu);
236 if (fi->retries)
237 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530238
239 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700240 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530241 spin_lock(&txq->axq_lock);
242 }
243
244 tid->seq_next = tid->seq_start;
245 tid->baw_tail = tid->baw_head;
246}
247
Sujithfec247c2009-07-27 12:08:16 +0530248static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530250{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100251 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530252 struct ieee80211_hdr *hdr;
253
Sujithfec247c2009-07-27 12:08:16 +0530254 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100255 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100256 return;
Sujithe8324352009-01-16 21:38:42 +0530257
Sujithe8324352009-01-16 21:38:42 +0530258 hdr = (struct ieee80211_hdr *)skb->data;
259 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
260}
261
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200262static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
263{
264 struct ath_buf *bf = NULL;
265
266 spin_lock_bh(&sc->tx.txbuflock);
267
268 if (unlikely(list_empty(&sc->tx.txbuf))) {
269 spin_unlock_bh(&sc->tx.txbuflock);
270 return NULL;
271 }
272
273 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
274 list_del(&bf->list);
275
276 spin_unlock_bh(&sc->tx.txbuflock);
277
278 return bf;
279}
280
281static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
282{
283 spin_lock_bh(&sc->tx.txbuflock);
284 list_add_tail(&bf->list, &sc->tx.txbuf);
285 spin_unlock_bh(&sc->tx.txbuflock);
286}
287
Sujithd43f30152009-01-16 21:38:53 +0530288static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
289{
290 struct ath_buf *tbf;
291
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200292 tbf = ath_tx_get_buffer(sc);
293 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530294 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530295
296 ATH_TXBUF_RESET(tbf);
297
Felix Fietkau827e69b2009-11-15 23:09:25 +0100298 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530302 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530303
304 return tbf;
305}
306
Felix Fietkaub572d032010-11-14 15:20:07 +0100307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100311 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
Sujithd43f30152009-01-16 21:38:53 +0530339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100341 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530342{
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530345 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800346 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530349 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530351 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530353 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200356 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100357 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200358 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100359 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530360
Sujitha22be222009-03-30 15:28:36 +0530361 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530362 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530363
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800364 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100365 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366
Felix Fietkau78c46532010-06-25 01:26:16 +0200367 memcpy(rates, tx_info->control.rates, sizeof(rates));
368
Sujith1286ec62009-01-27 13:30:37 +0530369 rcu_read_lock();
370
Ben Greear686b9cb2010-09-23 09:44:36 -0700371 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530372 if (!sta) {
373 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200374
Felix Fietkau31e79a52010-07-12 23:16:34 +0200375 INIT_LIST_HEAD(&bf_head);
376 while (bf) {
377 bf_next = bf->bf_next;
378
379 bf->bf_state.bf_type |= BUF_XRETRY;
380 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
381 !bf->bf_stale || bf_next != NULL)
382 list_move_tail(&bf->list, &bf_head);
383
Felix Fietkaub572d032010-11-14 15:20:07 +0100384 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200385 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
386 0, 0);
387
388 bf = bf_next;
389 }
Sujith1286ec62009-01-27 13:30:37 +0530390 return;
Sujithe8324352009-01-16 21:38:42 +0530391 }
392
Sujith1286ec62009-01-27 13:30:37 +0530393 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100394 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
395 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530396
Felix Fietkaub11b1602010-07-11 12:48:44 +0200397 /*
398 * The hardware occasionally sends a tx status for the wrong TID.
399 * In this case, the BA status cannot be considered valid and all
400 * subframes need to be retransmitted
401 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100402 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200403 txok = false;
404
Sujithe8324352009-01-16 21:38:42 +0530405 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530406 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530407
Sujithd43f30152009-01-16 21:38:53 +0530408 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700409 if (ts->ts_flags & ATH9K_TX_BA) {
410 seq_st = ts->ts_seqnum;
411 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530412 } else {
Sujithd43f30152009-01-16 21:38:53 +0530413 /*
414 * AR5416 can become deaf/mute when BA
415 * issue happens. Chip needs to be reset.
416 * But AP code may have sychronization issues
417 * when perform internal reset in this routine.
418 * Only enable reset in STA mode for now.
419 */
Sujith2660b812009-02-09 13:27:26 +0530420 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530421 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530422 }
423 }
424
425 INIT_LIST_HEAD(&bf_pending);
426 INIT_LIST_HEAD(&bf_head);
427
Felix Fietkaub572d032010-11-14 15:20:07 +0100428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530429 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100430 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530431 bf_next = bf->bf_next;
432
Felix Fietkau78c46532010-06-25 01:26:16 +0200433 skb = bf->bf_mpdu;
434 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100435 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200436
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530438 /* transmit completion, subframe is
439 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530440 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530441 } else if (!isaggr && txok) {
442 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530443 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530444 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100445 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100446 if (fi->retries < ATH_MAX_SW_RETRIES) {
447 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530448 txpending = 1;
449 } else {
450 bf->bf_state.bf_type |= BUF_XRETRY;
451 txfail = 1;
452 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530453 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530454 }
455 } else {
456 /*
457 * cleanup in progress, just fail
458 * the un-acked sub-frames
459 */
460 txfail = 1;
461 }
462 }
463
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400464 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
465 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530466 /*
467 * Make sure the last desc is reclaimed if it
468 * not a holding desc.
469 */
470 if (!bf_last->bf_stale)
471 list_move_tail(&bf->list, &bf_head);
472 else
473 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530474 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700475 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530476 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530477 }
478
Felix Fietkau90fa5392010-09-20 13:45:38 +0200479 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530480 /*
481 * complete the acked-ones/xretried ones; update
482 * block-ack window
483 */
484 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100485 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530486 spin_unlock_bh(&txq->axq_lock);
487
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530488 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200489 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaub572d032010-11-14 15:20:07 +0100490 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530491 rc_update = false;
492 } else {
Felix Fietkaub572d032010-11-14 15:20:07 +0100493 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530494 }
495
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700496 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
497 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530498 } else {
Sujithd43f30152009-01-16 21:38:53 +0530499 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400500 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
501 if (bf->bf_next == NULL && bf_last->bf_stale) {
502 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530503
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400504 tbf = ath_clone_txbuf(sc, bf_last);
505 /*
506 * Update tx baw and complete the
507 * frame with failed status if we
508 * run out of tx buf.
509 */
510 if (!tbf) {
511 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100512 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400513 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400514
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400515 bf->bf_state.bf_type |=
516 BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +0100517 ath_tx_rc_status(bf, ts, nframes,
518 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400519 ath_tx_complete_buf(sc, bf, txq,
520 &bf_head,
521 ts, 0, 0);
522 break;
523 }
524
525 ath9k_hw_cleartxdesc(sc->sc_ah,
526 tbf->bf_desc);
527 list_add_tail(&tbf->list, &bf_head);
528 } else {
529 /*
530 * Clear descriptor status words for
531 * software retry
532 */
533 ath9k_hw_cleartxdesc(sc->sc_ah,
534 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400535 }
Sujithe8324352009-01-16 21:38:42 +0530536 }
537
538 /*
539 * Put this buffer to the temporary pending
540 * queue to retain ordering
541 */
542 list_splice_tail_init(&bf_head, &bf_pending);
543 }
544
545 bf = bf_next;
546 }
547
Felix Fietkau4cee7862010-07-23 03:53:16 +0200548 /* prepend un-acked frames to the beginning of the pending frame queue */
549 if (!list_empty(&bf_pending)) {
550 spin_lock_bh(&txq->axq_lock);
551 list_splice(&bf_pending, &tid->buf_q);
552 ath_tx_queue_tid(txq, tid);
553 spin_unlock_bh(&txq->axq_lock);
554 }
555
Sujithe8324352009-01-16 21:38:42 +0530556 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200557 ath_tx_flush_tid(sc, tid);
558
Sujithe8324352009-01-16 21:38:42 +0530559 if (tid->baw_head == tid->baw_tail) {
560 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530561 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530562 }
Sujithe8324352009-01-16 21:38:42 +0530563 }
564
Sujith1286ec62009-01-27 13:30:37 +0530565 rcu_read_unlock();
566
Sujithe8324352009-01-16 21:38:42 +0530567 if (needreset)
568 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530569}
570
571static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
572 struct ath_atx_tid *tid)
573{
Sujithe8324352009-01-16 21:38:42 +0530574 struct sk_buff *skb;
575 struct ieee80211_tx_info *tx_info;
576 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530577 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530578 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530579 int i;
580
Sujitha22be222009-03-30 15:28:36 +0530581 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530582 tx_info = IEEE80211_SKB_CB(skb);
583 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530584
585 /*
586 * Find the lowest frame length among the rate series that will have a
587 * 4ms transmit duration.
588 * TODO - TXOP limit needs to be considered.
589 */
590 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
591
592 for (i = 0; i < 4; i++) {
593 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100594 int modeidx;
595 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530596 legacy = 1;
597 break;
598 }
599
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200600 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100601 modeidx = MCS_HT40;
602 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200603 modeidx = MCS_HT20;
604
605 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
606 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100607
608 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530609 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530610 }
611 }
612
613 /*
614 * limit aggregate size by the minimum rate if rate selected is
615 * not a probe rate, if rate selected is a probe rate then
616 * avoid aggregation of this packet.
617 */
618 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
619 return 0;
620
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530621 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
622 aggr_limit = min((max_4ms_framelen * 3) / 8,
623 (u32)ATH_AMPDU_LIMIT_MAX);
624 else
625 aggr_limit = min(max_4ms_framelen,
626 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530627
628 /*
629 * h/w can accept aggregates upto 16 bit lengths (65535).
630 * The IE, however can hold upto 65536, which shows up here
631 * as zero. Ignore 65536 since we are constrained by hw.
632 */
Sujith4ef70842009-07-23 15:32:41 +0530633 if (tid->an->maxampdu)
634 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530635
636 return aggr_limit;
637}
638
639/*
Sujithd43f30152009-01-16 21:38:53 +0530640 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530641 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530642 */
643static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
644 struct ath_buf *bf, u16 frmlen)
645{
Sujithe8324352009-01-16 21:38:42 +0530646 struct sk_buff *skb = bf->bf_mpdu;
647 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530648 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530649 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100650 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200651 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100652 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530653
654 /* Select standard number of delimiters based on frame length alone */
655 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
656
657 /*
658 * If encryption enabled, hardware requires some more padding between
659 * subframes.
660 * TODO - this could be improved to be dependent on the rate.
661 * The hardware can keep up at lower rates, but not higher rates
662 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100663 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530664 ndelim += ATH_AGGR_ENCRYPTDELIM;
665
666 /*
667 * Convert desired mpdu density from microeconds to bytes based
668 * on highest rate in rate series (i.e. first rate) to determine
669 * required minimum length for subframe. Take into account
670 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530671 *
Sujithe8324352009-01-16 21:38:42 +0530672 * If there is no mpdu density restriction, no further calculation
673 * is needed.
674 */
Sujith4ef70842009-07-23 15:32:41 +0530675
676 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530677 return ndelim;
678
679 rix = tx_info->control.rates[0].idx;
680 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530681 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
682 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
683
684 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530685 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530686 else
Sujith4ef70842009-07-23 15:32:41 +0530687 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530688
689 if (nsymbols == 0)
690 nsymbols = 1;
691
Felix Fietkauc6663872010-04-19 19:57:33 +0200692 streams = HT_RC_2_STREAMS(rix);
693 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530694 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
695
Sujithe8324352009-01-16 21:38:42 +0530696 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530697 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
698 ndelim = max(mindelim, ndelim);
699 }
700
701 return ndelim;
702}
703
704static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530705 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530706 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100707 struct list_head *bf_q,
708 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530709{
710#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530711 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
712 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530713 u16 aggr_limit = 0, al = 0, bpad = 0,
714 al_delta, h_baw = tid->baw_size / 2;
715 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200716 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100717 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530718
719 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
720
721 do {
722 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100723 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530724
Sujithd43f30152009-01-16 21:38:53 +0530725 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100726 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530727 status = ATH_AGGR_BAW_CLOSED;
728 break;
729 }
730
731 if (!rl) {
732 aggr_limit = ath_lookup_rate(sc, bf, tid);
733 rl = 1;
734 }
735
Sujithd43f30152009-01-16 21:38:53 +0530736 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100737 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530738
Sujithd43f30152009-01-16 21:38:53 +0530739 if (nframes &&
740 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530741 status = ATH_AGGR_LIMITED;
742 break;
743 }
744
Felix Fietkau0299a502010-10-21 02:47:24 +0200745 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
746 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
747 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
748 break;
749
Sujithd43f30152009-01-16 21:38:53 +0530750 /* do not exceed subframe limit */
751 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530752 status = ATH_AGGR_LIMITED;
753 break;
754 }
Sujithd43f30152009-01-16 21:38:53 +0530755 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530756
Sujithd43f30152009-01-16 21:38:53 +0530757 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530758 al += bpad + al_delta;
759
760 /*
761 * Get the delimiters needed to meet the MPDU
762 * density for this node.
763 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100764 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530765 bpad = PADBYTES(al_delta) + (ndelim << 2);
766
767 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400768 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530769
Sujithd43f30152009-01-16 21:38:53 +0530770 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100771 if (!fi->retries)
772 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530773 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
774 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530775 if (bf_prev) {
776 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400777 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
778 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530779 }
780 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530781
Sujithe8324352009-01-16 21:38:42 +0530782 } while (!list_empty(&tid->buf_q));
783
Felix Fietkau269c44b2010-11-14 15:20:06 +0100784 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530785
Sujithe8324352009-01-16 21:38:42 +0530786 return status;
787#undef PADBYTES
788}
789
790static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
791 struct ath_atx_tid *tid)
792{
Sujithd43f30152009-01-16 21:38:53 +0530793 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530794 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100795 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530796 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100797 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530798
799 do {
800 if (list_empty(&tid->buf_q))
801 return;
802
803 INIT_LIST_HEAD(&bf_q);
804
Felix Fietkau269c44b2010-11-14 15:20:06 +0100805 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530806
807 /*
Sujithd43f30152009-01-16 21:38:53 +0530808 * no frames picked up to be aggregated;
809 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530810 */
811 if (list_empty(&bf_q))
812 break;
813
814 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530815 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530816
Sujithd43f30152009-01-16 21:38:53 +0530817 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100818 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100819 fi = get_frame_info(bf->bf_mpdu);
820
Sujithe8324352009-01-16 21:38:42 +0530821 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530822 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100823 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530824 ath_tx_txqaddbuf(sc, txq, &bf_q);
825 continue;
826 }
827
Sujithd43f30152009-01-16 21:38:53 +0530828 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530829 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100830 ath_buf_set_rate(sc, bf, aggr_len);
831 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530832
Sujithd43f30152009-01-16 21:38:53 +0530833 /* anchor last desc of aggregate */
834 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530835
Sujithe8324352009-01-16 21:38:42 +0530836 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530837 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530838
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100839 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530840 status != ATH_AGGR_BAW_CLOSED);
841}
842
Felix Fietkau231c3a12010-09-20 19:35:28 +0200843int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
844 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530845{
846 struct ath_atx_tid *txtid;
847 struct ath_node *an;
848
849 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530850 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200851
852 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
853 return -EAGAIN;
854
Sujithf83da962009-07-23 15:32:37 +0530855 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200856 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700857 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200858
Felix Fietkau2ed72222011-01-10 17:05:49 -0700859 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
860 txtid->baw_head = txtid->baw_tail = 0;
861
Felix Fietkau231c3a12010-09-20 19:35:28 +0200862 return 0;
Sujithe8324352009-01-16 21:38:42 +0530863}
864
Sujithf83da962009-07-23 15:32:37 +0530865void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530866{
867 struct ath_node *an = (struct ath_node *)sta->drv_priv;
868 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100869 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530870
871 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530872 return;
Sujithe8324352009-01-16 21:38:42 +0530873
874 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530875 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530876 return;
Sujithe8324352009-01-16 21:38:42 +0530877 }
878
Sujithe8324352009-01-16 21:38:42 +0530879 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200880 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200881
882 /*
883 * If frames are still being transmitted for this TID, they will be
884 * cleaned up during tx completion. To prevent race conditions, this
885 * TID can only be reused after all in-progress subframes have been
886 * completed.
887 */
888 if (txtid->baw_head != txtid->baw_tail)
889 txtid->state |= AGGR_CLEANUP;
890 else
891 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530892 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530893
Felix Fietkau90fa5392010-09-20 13:45:38 +0200894 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530895}
896
897void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
898{
899 struct ath_atx_tid *txtid;
900 struct ath_node *an;
901
902 an = (struct ath_node *)sta->drv_priv;
903
904 if (sc->sc_flags & SC_OP_TXAGGR) {
905 txtid = ATH_AN_2_TID(an, tid);
906 txtid->baw_size =
907 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
908 txtid->state |= AGGR_ADDBA_COMPLETE;
909 txtid->state &= ~AGGR_ADDBA_PROGRESS;
910 ath_tx_resume_tid(sc, txtid);
911 }
912}
913
Sujithe8324352009-01-16 21:38:42 +0530914/********************/
915/* Queue Management */
916/********************/
917
Sujithe8324352009-01-16 21:38:42 +0530918static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
919 struct ath_txq *txq)
920{
921 struct ath_atx_ac *ac, *ac_tmp;
922 struct ath_atx_tid *tid, *tid_tmp;
923
924 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
925 list_del(&ac->list);
926 ac->sched = false;
927 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
928 list_del(&tid->list);
929 tid->sched = false;
930 ath_tid_drain(sc, txq, tid);
931 }
932 }
933}
934
935struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
936{
Sujithcbe61d82009-02-09 13:27:12 +0530937 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700938 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530939 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100940 static const int subtype_txq_to_hwq[] = {
941 [WME_AC_BE] = ATH_TXQ_AC_BE,
942 [WME_AC_BK] = ATH_TXQ_AC_BK,
943 [WME_AC_VI] = ATH_TXQ_AC_VI,
944 [WME_AC_VO] = ATH_TXQ_AC_VO,
945 };
Ben Greear60f2d1d2011-01-09 23:11:52 -0800946 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530947
948 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100949 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530950 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
951 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
952 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
953 qi.tqi_physCompBuf = 0;
954
955 /*
956 * Enable interrupts only for EOL and DESC conditions.
957 * We mark tx descriptors to receive a DESC interrupt
958 * when a tx queue gets deep; otherwise waiting for the
959 * EOL to reap descriptors. Note that this is done to
960 * reduce interrupt load and this only defers reaping
961 * descriptors, never transmitting frames. Aside from
962 * reducing interrupts this also permits more concurrency.
963 * The only potential downside is if the tx queue backs
964 * up in which case the top half of the kernel may backup
965 * due to a lack of tx descriptors.
966 *
967 * The UAPSD queue is an exception, since we take a desc-
968 * based intr on the EOSP frames.
969 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400970 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
971 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
972 TXQ_FLAG_TXERRINT_ENABLE;
973 } else {
974 if (qtype == ATH9K_TX_QUEUE_UAPSD)
975 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
976 else
977 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
978 TXQ_FLAG_TXDESCINT_ENABLE;
979 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800980 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
981 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +0530982 /*
983 * NB: don't print a message, this happens
984 * normally on parts with too few tx queues
985 */
986 return NULL;
987 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800988 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800989 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -0800990 axq_qnum, ARRAY_SIZE(sc->tx.txq));
991 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +0530992 return NULL;
993 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800994 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
995 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +0530996
Ben Greear60f2d1d2011-01-09 23:11:52 -0800997 txq->axq_qnum = axq_qnum;
998 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +0530999 txq->axq_link = NULL;
1000 INIT_LIST_HEAD(&txq->axq_q);
1001 INIT_LIST_HEAD(&txq->axq_acq);
1002 spin_lock_init(&txq->axq_lock);
1003 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001004 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001005 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001006 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001007
1008 txq->txq_headidx = txq->txq_tailidx = 0;
1009 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1010 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1011 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301012 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001013 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301014}
1015
Sujithe8324352009-01-16 21:38:42 +05301016int ath_txq_update(struct ath_softc *sc, int qnum,
1017 struct ath9k_tx_queue_info *qinfo)
1018{
Sujithcbe61d82009-02-09 13:27:12 +05301019 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301020 int error = 0;
1021 struct ath9k_tx_queue_info qi;
1022
1023 if (qnum == sc->beacon.beaconq) {
1024 /*
1025 * XXX: for beacon queue, we just save the parameter.
1026 * It will be picked up by ath_beaconq_config when
1027 * it's necessary.
1028 */
1029 sc->beacon.beacon_qi = *qinfo;
1030 return 0;
1031 }
1032
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001033 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301034
1035 ath9k_hw_get_txq_props(ah, qnum, &qi);
1036 qi.tqi_aifs = qinfo->tqi_aifs;
1037 qi.tqi_cwmin = qinfo->tqi_cwmin;
1038 qi.tqi_cwmax = qinfo->tqi_cwmax;
1039 qi.tqi_burstTime = qinfo->tqi_burstTime;
1040 qi.tqi_readyTime = qinfo->tqi_readyTime;
1041
1042 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001043 ath_err(ath9k_hw_common(sc->sc_ah),
1044 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301045 error = -EIO;
1046 } else {
1047 ath9k_hw_resettxqueue(ah, qnum);
1048 }
1049
1050 return error;
1051}
1052
1053int ath_cabq_update(struct ath_softc *sc)
1054{
1055 struct ath9k_tx_queue_info qi;
1056 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301057
1058 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1059 /*
1060 * Ensure the readytime % is within the bounds.
1061 */
Sujith17d79042009-02-09 13:27:03 +05301062 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1063 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1064 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1065 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301066
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001067 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301068 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301069 ath_txq_update(sc, qnum, &qi);
1070
1071 return 0;
1072}
1073
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001074static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1075{
1076 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1077 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1078}
1079
Sujith043a0402009-01-16 21:38:47 +05301080/*
1081 * Drain a given TX queue (could be Beacon or Data)
1082 *
1083 * This assumes output has been stopped and
1084 * we do not need to block ath_tx_tasklet.
1085 */
1086void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301087{
1088 struct ath_buf *bf, *lastbf;
1089 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001090 struct ath_tx_status ts;
1091
1092 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301093 INIT_LIST_HEAD(&bf_head);
1094
Sujithe8324352009-01-16 21:38:42 +05301095 for (;;) {
1096 spin_lock_bh(&txq->axq_lock);
1097
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001098 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1099 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1100 txq->txq_headidx = txq->txq_tailidx = 0;
1101 spin_unlock_bh(&txq->axq_lock);
1102 break;
1103 } else {
1104 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1105 struct ath_buf, list);
1106 }
1107 } else {
1108 if (list_empty(&txq->axq_q)) {
1109 txq->axq_link = NULL;
1110 spin_unlock_bh(&txq->axq_lock);
1111 break;
1112 }
1113 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1114 list);
Sujithe8324352009-01-16 21:38:42 +05301115
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001116 if (bf->bf_stale) {
1117 list_del(&bf->list);
1118 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301119
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001120 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001121 continue;
1122 }
Sujithe8324352009-01-16 21:38:42 +05301123 }
1124
1125 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301126
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001127 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1128 list_cut_position(&bf_head,
1129 &txq->txq_fifo[txq->txq_tailidx],
1130 &lastbf->list);
1131 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1132 } else {
1133 /* remove ath_buf's of the same mpdu from txq */
1134 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1135 }
1136
Sujithe8324352009-01-16 21:38:42 +05301137 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001138 if (bf_is_ampdu_not_probing(bf))
1139 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301140 spin_unlock_bh(&txq->axq_lock);
1141
1142 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001143 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1144 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301145 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001146 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301147 }
1148
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001149 spin_lock_bh(&txq->axq_lock);
1150 txq->axq_tx_inprogress = false;
1151 spin_unlock_bh(&txq->axq_lock);
1152
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001153 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1154 spin_lock_bh(&txq->axq_lock);
1155 while (!list_empty(&txq->txq_fifo_pending)) {
1156 bf = list_first_entry(&txq->txq_fifo_pending,
1157 struct ath_buf, list);
1158 list_cut_position(&bf_head,
1159 &txq->txq_fifo_pending,
1160 &bf->bf_lastbf->list);
1161 spin_unlock_bh(&txq->axq_lock);
1162
1163 if (bf_isampdu(bf))
1164 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001165 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001166 else
1167 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1168 &ts, 0, 0);
1169 spin_lock_bh(&txq->axq_lock);
1170 }
1171 spin_unlock_bh(&txq->axq_lock);
1172 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001173
1174 /* flush any pending frames if aggregation is enabled */
1175 if (sc->sc_flags & SC_OP_TXAGGR) {
1176 if (!retry_tx) {
1177 spin_lock_bh(&txq->axq_lock);
1178 ath_txq_drain_pending_buffers(sc, txq);
1179 spin_unlock_bh(&txq->axq_lock);
1180 }
1181 }
Sujithe8324352009-01-16 21:38:42 +05301182}
1183
Felix Fietkau080e1a22010-12-05 20:17:53 +01001184bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301185{
Sujithcbe61d82009-02-09 13:27:12 +05301186 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001187 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301188 struct ath_txq *txq;
1189 int i, npend = 0;
1190
1191 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001192 return true;
Sujith043a0402009-01-16 21:38:47 +05301193
1194 /* Stop beacon queue */
1195 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1196
1197 /* Stop data queues */
1198 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1199 if (ATH_TXQ_SETUP(sc, i)) {
1200 txq = &sc->tx.txq[i];
1201 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1202 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1203 }
1204 }
1205
Felix Fietkau080e1a22010-12-05 20:17:53 +01001206 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001207 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301208
1209 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1210 if (ATH_TXQ_SETUP(sc, i))
1211 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1212 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001213
1214 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301215}
1216
Sujithe8324352009-01-16 21:38:42 +05301217void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1218{
1219 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1220 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1221}
1222
Ben Greear7755bad2011-01-18 17:30:00 -08001223/* For each axq_acq entry, for each tid, try to schedule packets
1224 * for transmit until ampdu_depth has reached min Q depth.
1225 */
Sujithe8324352009-01-16 21:38:42 +05301226void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1227{
Ben Greear7755bad2011-01-18 17:30:00 -08001228 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1229 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301230
Felix Fietkau21f28e62011-01-15 14:30:14 +01001231 if (list_empty(&txq->axq_acq) ||
1232 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301233 return;
1234
1235 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001236 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301237
Ben Greear7755bad2011-01-18 17:30:00 -08001238 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1239 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1240 list_del(&ac->list);
1241 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301242
Ben Greear7755bad2011-01-18 17:30:00 -08001243 while (!list_empty(&ac->tid_q)) {
1244 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1245 list);
1246 list_del(&tid->list);
1247 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301248
Ben Greear7755bad2011-01-18 17:30:00 -08001249 if (tid->paused)
1250 continue;
Sujithe8324352009-01-16 21:38:42 +05301251
Ben Greear7755bad2011-01-18 17:30:00 -08001252 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301253
Ben Greear7755bad2011-01-18 17:30:00 -08001254 /*
1255 * add tid to round-robin queue if more frames
1256 * are pending for the tid
1257 */
1258 if (!list_empty(&tid->buf_q))
1259 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301260
Ben Greear7755bad2011-01-18 17:30:00 -08001261 if (tid == last_tid ||
1262 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1263 break;
Sujithe8324352009-01-16 21:38:42 +05301264 }
Ben Greear7755bad2011-01-18 17:30:00 -08001265
1266 if (!list_empty(&ac->tid_q)) {
1267 if (!ac->sched) {
1268 ac->sched = true;
1269 list_add_tail(&ac->list, &txq->axq_acq);
1270 }
1271 }
1272
1273 if (ac == last_ac ||
1274 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1275 return;
Sujithe8324352009-01-16 21:38:42 +05301276 }
1277}
1278
Sujithe8324352009-01-16 21:38:42 +05301279/***********/
1280/* TX, DMA */
1281/***********/
1282
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001283/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001284 * Insert a chain of ath_buf (descriptors) on a txq and
1285 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001286 */
Sujith102e0572008-10-29 10:15:16 +05301287static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1288 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001289{
Sujithcbe61d82009-02-09 13:27:12 +05301290 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001291 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001292 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301293
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001294 /*
1295 * Insert the frame on the outbound list and
1296 * pass it on to the hardware.
1297 */
1298
1299 if (list_empty(head))
1300 return;
1301
1302 bf = list_first_entry(head, struct ath_buf, list);
1303
Joe Perches226afe62010-12-02 19:12:37 -08001304 ath_dbg(common, ATH_DBG_QUEUE,
1305 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001306
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001307 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1308 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1309 list_splice_tail_init(head, &txq->txq_fifo_pending);
1310 return;
1311 }
1312 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001313 ath_dbg(common, ATH_DBG_XMIT,
1314 "Initializing tx fifo %d which is non-empty\n",
1315 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001316 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1317 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1318 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001319 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001320 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001321 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1322 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001323 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001324 list_splice_tail_init(head, &txq->axq_q);
1325
1326 if (txq->axq_link == NULL) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001327 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001328 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001329 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1330 txq->axq_qnum, ito64(bf->bf_daddr),
1331 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001332 } else {
1333 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001334 ath_dbg(common, ATH_DBG_XMIT,
1335 "link[%u] (%p)=%llx (%p)\n",
1336 txq->axq_qnum, txq->axq_link,
1337 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001338 }
1339 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1340 &txq->axq_link);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001341 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001342 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001343 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001344 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001345 if (bf_is_ampdu_not_probing(bf))
1346 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001347}
1348
Sujithe8324352009-01-16 21:38:42 +05301349static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001350 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301351{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001352 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001353 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301354
Sujithe8324352009-01-16 21:38:42 +05301355 bf->bf_state.bf_type |= BUF_AMPDU;
1356
1357 /*
1358 * Do not queue to h/w when any of the following conditions is true:
1359 * - there are pending frames in software queue
1360 * - the TID is currently paused for ADDBA/BAR request
1361 * - seqno is not within block-ack window
1362 * - h/w queue depth exceeds low water mark
1363 */
1364 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001365 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001366 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001367 /*
Sujithe8324352009-01-16 21:38:42 +05301368 * Add this frame to software queue for scheduling later
1369 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001370 */
Ben Greearbda8add2011-01-09 23:11:48 -08001371 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001372 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301373 ath_tx_queue_tid(txctl->txq, tid);
1374 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001375 }
1376
Felix Fietkau04caf862010-11-14 15:20:12 +01001377 INIT_LIST_HEAD(&bf_head);
1378 list_add(&bf->list, &bf_head);
1379
Sujithe8324352009-01-16 21:38:42 +05301380 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001381 if (!fi->retries)
1382 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301383
1384 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001385 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301386 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001387 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001388 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301389}
1390
Felix Fietkau82b873a2010-11-11 03:18:37 +01001391static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1392 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001393 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001394{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001395 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301396 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001397
Sujithe8324352009-01-16 21:38:42 +05301398 bf = list_first_entry(bf_head, struct ath_buf, list);
1399 bf->bf_state.bf_type &= ~BUF_AMPDU;
1400
1401 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001402 if (tid)
1403 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301404
Sujithd43f30152009-01-16 21:38:53 +05301405 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001406 fi = get_frame_info(bf->bf_mpdu);
1407 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301408 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301409 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001410}
1411
Sujith528f0c62008-10-29 10:14:26 +05301412static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001413{
Sujith528f0c62008-10-29 10:14:26 +05301414 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001415 enum ath9k_pkt_type htype;
1416 __le16 fc;
1417
Sujith528f0c62008-10-29 10:14:26 +05301418 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001419 fc = hdr->frame_control;
1420
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001421 if (ieee80211_is_beacon(fc))
1422 htype = ATH9K_PKT_TYPE_BEACON;
1423 else if (ieee80211_is_probe_resp(fc))
1424 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1425 else if (ieee80211_is_atim(fc))
1426 htype = ATH9K_PKT_TYPE_ATIM;
1427 else if (ieee80211_is_pspoll(fc))
1428 htype = ATH9K_PKT_TYPE_PSPOLL;
1429 else
1430 htype = ATH9K_PKT_TYPE_NORMAL;
1431
1432 return htype;
1433}
1434
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001435static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1436 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301437{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001438 struct ath_wiphy *aphy = hw->priv;
1439 struct ath_softc *sc = aphy->sc;
Sujith528f0c62008-10-29 10:14:26 +05301440 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001441 struct ieee80211_sta *sta = tx_info->control.sta;
1442 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301443 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001444 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301445 struct ath_node *an;
1446 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001447 enum ath9k_key_type keytype;
1448 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001449 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301450
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001451 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301452
Sujith528f0c62008-10-29 10:14:26 +05301453 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001454 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1455 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001456
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001457 an = (struct ath_node *) sta->drv_priv;
1458 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1459
1460 /*
1461 * Override seqno set by upper layer with the one
1462 * in tx aggregation state.
1463 */
1464 tid = ATH_AN_2_TID(an, tidno);
1465 seqno = tid->seq_next;
1466 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1467 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1468 }
1469
1470 memset(fi, 0, sizeof(*fi));
1471 if (hw_key)
1472 fi->keyix = hw_key->hw_key_idx;
1473 else
1474 fi->keyix = ATH9K_TXKEYIX_INVALID;
1475 fi->keytype = keytype;
1476 fi->framelen = framelen;
1477 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301478}
1479
Felix Fietkau82b873a2010-11-11 03:18:37 +01001480static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301481{
1482 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1483 int flags = 0;
1484
1485 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1486 flags |= ATH9K_TXDESC_INTREQ;
1487
1488 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1489 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301490
Felix Fietkau82b873a2010-11-11 03:18:37 +01001491 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001492 flags |= ATH9K_TXDESC_LDPC;
1493
Sujith528f0c62008-10-29 10:14:26 +05301494 return flags;
1495}
1496
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001497/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001498 * rix - rate index
1499 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1500 * width - 0 for 20 MHz, 1 for 40 MHz
1501 * half_gi - to use 4us v/s 3.6 us for symbol time
1502 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001503static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301504 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001505{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001506 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001507 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301508
1509 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001510 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001511 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001512 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001513 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1514
1515 if (!half_gi)
1516 duration = SYMBOL_TIME(nsymbols);
1517 else
1518 duration = SYMBOL_TIME_HALFGI(nsymbols);
1519
Sujithe63835b2008-11-18 09:07:53 +05301520 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001521 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301522
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001523 return duration;
1524}
1525
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301526u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1527{
1528 struct ath_hw *ah = sc->sc_ah;
1529 struct ath9k_channel *curchan = ah->curchan;
1530 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1531 (curchan->channelFlags & CHANNEL_5GHZ) &&
1532 (chainmask == 0x7) && (rate < 0x90))
1533 return 0x3;
1534 else
1535 return chainmask;
1536}
1537
Felix Fietkau269c44b2010-11-14 15:20:06 +01001538static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001539{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001540 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001541 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301542 struct sk_buff *skb;
1543 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301544 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001545 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301546 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301547 int i, flags = 0;
1548 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301549 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301550
1551 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301552
Sujitha22be222009-03-30 15:28:36 +05301553 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301554 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301555 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301556 hdr = (struct ieee80211_hdr *)skb->data;
1557 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301558
Sujithc89424d2009-01-30 14:29:28 +05301559 /*
1560 * We check if Short Preamble is needed for the CTS rate by
1561 * checking the BSS's global flag.
1562 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1563 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001564 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1565 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301566 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001567 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001568
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001569 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001570 bool is_40, is_sgi, is_sp;
1571 int phy;
1572
Sujithe63835b2008-11-18 09:07:53 +05301573 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001574 continue;
1575
Sujitha8efee42008-11-18 09:07:30 +05301576 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301577 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001578
Felix Fietkau27032052010-01-17 21:08:50 +01001579 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1580 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301581 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001582 flags |= ATH9K_TXDESC_RTSENA;
1583 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1584 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1585 flags |= ATH9K_TXDESC_CTSENA;
1586 }
1587
Sujithc89424d2009-01-30 14:29:28 +05301588 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1589 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1590 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1591 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001592
Felix Fietkau545750d2009-11-23 22:21:01 +01001593 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1594 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1595 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1596
1597 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1598 /* MCS rates */
1599 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301600 series[i].ChSel = ath_txchainmask_reduction(sc,
1601 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001602 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001603 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001604 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1605 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001606 continue;
1607 }
1608
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301609 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001610 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1611 !(rate->flags & IEEE80211_RATE_ERP_G))
1612 phy = WLAN_RC_PHY_CCK;
1613 else
1614 phy = WLAN_RC_PHY_OFDM;
1615
1616 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1617 series[i].Rate = rate->hw_value;
1618 if (rate->hw_value_short) {
1619 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1620 series[i].Rate |= rate->hw_value_short;
1621 } else {
1622 is_sp = false;
1623 }
1624
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301625 if (bf->bf_state.bfs_paprd)
1626 series[i].ChSel = common->tx_chainmask;
1627 else
1628 series[i].ChSel = ath_txchainmask_reduction(sc,
1629 common->tx_chainmask, series[i].Rate);
1630
Felix Fietkau545750d2009-11-23 22:21:01 +01001631 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001632 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001633 }
1634
Felix Fietkau27032052010-01-17 21:08:50 +01001635 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001636 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001637 flags &= ~ATH9K_TXDESC_RTSENA;
1638
1639 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1640 if (flags & ATH9K_TXDESC_RTSENA)
1641 flags &= ~ATH9K_TXDESC_CTSENA;
1642
Sujithe63835b2008-11-18 09:07:53 +05301643 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301644 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1645 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301646 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301647 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301648
Sujith17d79042009-02-09 13:27:03 +05301649 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301650 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001651}
1652
Felix Fietkau82b873a2010-11-11 03:18:37 +01001653static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001654 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001655 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301656{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001657 struct ath_wiphy *aphy = hw->priv;
1658 struct ath_softc *sc = aphy->sc;
Felix Fietkau04caf862010-11-14 15:20:12 +01001659 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001660 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001661 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001662 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001663 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001664 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001665
1666 bf = ath_tx_get_buffer(sc);
1667 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001668 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001669 return NULL;
1670 }
Sujithe8324352009-01-16 21:38:42 +05301671
Sujithe8324352009-01-16 21:38:42 +05301672 ATH_TXBUF_RESET(bf);
1673
Felix Fietkau827e69b2009-11-15 23:09:25 +01001674 bf->aphy = aphy;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001675 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301676 bf->bf_mpdu = skb;
1677
Ben Greearc1739eb32010-10-14 12:45:29 -07001678 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1679 skb->len, DMA_TO_DEVICE);
1680 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301681 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001682 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001683 ath_err(ath9k_hw_common(sc->sc_ah),
1684 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001685 ath_tx_return_buffer(sc, bf);
1686 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301687 }
1688
Sujithe8324352009-01-16 21:38:42 +05301689 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301690
1691 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001692 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301693
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001694 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1695 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301696
1697 ath9k_hw_filltxdesc(ah, ds,
1698 skb->len, /* segment length */
1699 true, /* first segment */
1700 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001701 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001702 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001703 txq->axq_qnum);
1704
1705
1706 return bf;
1707}
1708
1709/* FIXME: tx power */
1710static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1711 struct ath_tx_control *txctl)
1712{
1713 struct sk_buff *skb = bf->bf_mpdu;
1714 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1715 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001716 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001717 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001718 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301719
Sujithe8324352009-01-16 21:38:42 +05301720 spin_lock_bh(&txctl->txq->axq_lock);
1721
Felix Fietkau248a38d2010-12-10 21:16:46 +01001722 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001723 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1724 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001725 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001726
Felix Fietkau066dae92010-11-07 14:59:39 +01001727 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001728 }
1729
1730 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001731 /*
1732 * Try aggregation if it's a unicast data frame
1733 * and the destination is HT capable.
1734 */
1735 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301736 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001737 INIT_LIST_HEAD(&bf_head);
1738 list_add_tail(&bf->list, &bf_head);
1739
Felix Fietkau61117f02010-11-11 03:18:36 +01001740 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001741 bf->bf_state.bfs_paprd = txctl->paprd;
1742
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001743 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001744 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1745 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001746
Felix Fietkau248a38d2010-12-10 21:16:46 +01001747 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301748 }
1749
1750 spin_unlock_bh(&txctl->txq->axq_lock);
1751}
1752
1753/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001754int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301755 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001756{
Felix Fietkau28d16702010-11-14 15:20:10 +01001757 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1758 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001759 struct ieee80211_sta *sta = info->control.sta;
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001760 struct ath_wiphy *aphy = hw->priv;
1761 struct ath_softc *sc = aphy->sc;
Felix Fietkau84642d62010-06-01 21:33:13 +02001762 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001763 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001764 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001765 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001766 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001767
Ben Greeara9927ba2010-12-06 21:13:49 -08001768 /* NOTE: sta can be NULL according to net/mac80211.h */
1769 if (sta)
1770 txctl->an = (struct ath_node *)sta->drv_priv;
1771
Felix Fietkau04caf862010-11-14 15:20:12 +01001772 if (info->control.hw_key)
1773 frmlen += info->control.hw_key->icv_len;
1774
Felix Fietkau28d16702010-11-14 15:20:10 +01001775 /*
1776 * As a temporary workaround, assign seq# here; this will likely need
1777 * to be cleaned up to work better with Beacon transmission and virtual
1778 * BSSes.
1779 */
1780 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1781 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1782 sc->tx.seq_no += 0x10;
1783 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1784 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1785 }
1786
1787 /* Add the padding after the header if this is not already done */
1788 padpos = ath9k_cmn_padpos(hdr->frame_control);
1789 padsize = padpos & 3;
1790 if (padsize && skb->len > padpos) {
1791 if (skb_headroom(skb) < padsize)
1792 return -ENOMEM;
1793
1794 skb_push(skb, padsize);
1795 memmove(skb->data, skb->data + padsize, padpos);
1796 }
1797
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001798 setup_frame_info(hw, skb, frmlen);
1799
1800 /*
1801 * At this point, the vif, hw_key and sta pointers in the tx control
1802 * info are no longer valid (overwritten by the ath_frame_info data.
1803 */
1804
1805 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001806 if (unlikely(!bf))
1807 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001808
Felix Fietkau066dae92010-11-07 14:59:39 +01001809 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001810 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001811 if (txq == sc->tx.txq_map[q] &&
1812 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1813 ath_mac80211_stop_queue(sc, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001814 txq->stopped = 1;
1815 }
1816 spin_unlock_bh(&txq->axq_lock);
1817
Sujithe8324352009-01-16 21:38:42 +05301818 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001819
1820 return 0;
1821}
1822
Sujithe8324352009-01-16 21:38:42 +05301823/*****************/
1824/* TX Completion */
1825/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001826
Sujithe8324352009-01-16 21:38:42 +05301827static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau61117f02010-11-11 03:18:36 +01001828 struct ath_wiphy *aphy, int tx_flags, int ftype,
Felix Fietkau066dae92010-11-07 14:59:39 +01001829 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001830{
Sujithe8324352009-01-16 21:38:42 +05301831 struct ieee80211_hw *hw = sc->hw;
1832 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001833 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001834 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001835 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301836
Joe Perches226afe62010-12-02 19:12:37 -08001837 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301838
Felix Fietkau827e69b2009-11-15 23:09:25 +01001839 if (aphy)
1840 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301841
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301842 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301843 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301844
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301845 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301846 /* Frame was ACKed */
1847 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1848 }
1849
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001850 padpos = ath9k_cmn_padpos(hdr->frame_control);
1851 padsize = padpos & 3;
1852 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301853 /*
1854 * Remove MAC header padding before giving the frame back to
1855 * mac80211.
1856 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001857 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301858 skb_pull(skb, padsize);
1859 }
1860
Sujith1b04b932010-01-08 10:36:05 +05301861 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1862 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001863 ath_dbg(common, ATH_DBG_PS,
1864 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301865 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1866 PS_WAIT_FOR_CAB |
1867 PS_WAIT_FOR_PSPOLL_DATA |
1868 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001869 }
1870
Felix Fietkau61117f02010-11-11 03:18:36 +01001871 if (unlikely(ftype))
1872 ath9k_tx_status(hw, skb, ftype);
Felix Fietkau97923b12010-06-12 00:33:55 -04001873 else {
1874 q = skb_get_queue_mapping(skb);
Felix Fietkau066dae92010-11-07 14:59:39 +01001875 if (txq == sc->tx.txq_map[q]) {
1876 spin_lock_bh(&txq->axq_lock);
1877 if (WARN_ON(--txq->pending_frames < 0))
1878 txq->pending_frames = 0;
1879 spin_unlock_bh(&txq->axq_lock);
1880 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001881
Felix Fietkau827e69b2009-11-15 23:09:25 +01001882 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001883 }
Sujithe8324352009-01-16 21:38:42 +05301884}
1885
1886static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001887 struct ath_txq *txq, struct list_head *bf_q,
1888 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301889{
1890 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301891 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301892 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301893
Sujithe8324352009-01-16 21:38:42 +05301894 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301895 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301896
1897 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301898 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301899
1900 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301901 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301902 }
1903
Ben Greearc1739eb32010-10-14 12:45:29 -07001904 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001905 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001906
1907 if (bf->bf_state.bfs_paprd) {
Felix Fietkau82259b72010-11-14 15:20:04 +01001908 if (!sc->paprd_pending)
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001909 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001910 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001911 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001912 } else {
Felix Fietkau066dae92010-11-07 14:59:39 +01001913 ath_debug_stat_tx(sc, bf, ts);
Felix Fietkau61117f02010-11-11 03:18:36 +01001914 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1915 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001916 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001917 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1918 * accidentally reference it later.
1919 */
1920 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301921
1922 /*
1923 * Return the list of ath_buf of this mpdu to free queue
1924 */
1925 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1926 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1927 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1928}
1929
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001930static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Felix Fietkaub572d032010-11-14 15:20:07 +01001931 int nframes, int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301932{
Sujitha22be222009-03-30 15:28:36 +05301933 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301934 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301935 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001936 struct ieee80211_hw *hw = bf->aphy->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001937 struct ath_softc *sc = bf->aphy->sc;
1938 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301939 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301940
Sujith95e4acb2009-03-13 08:56:09 +05301941 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001942 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301943
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001944 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301945 WARN_ON(tx_rateindex >= hw->max_rates);
1946
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001947 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301948 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001949 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001950 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301951
Felix Fietkaub572d032010-11-14 15:20:07 +01001952 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001953
Felix Fietkaub572d032010-11-14 15:20:07 +01001954 tx_info->status.ampdu_len = nframes;
1955 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001956 }
1957
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001958 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301959 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001960 /*
1961 * If an underrun error is seen assume it as an excessive
1962 * retry only if max frame trigger level has been reached
1963 * (2 KB for single stream, and 4 KB for dual stream).
1964 * Adjust the long retry as if the frame was tried
1965 * hw->max_rate_tries times to affect how rate control updates
1966 * PER for the failed rate.
1967 * In case of congestion on the bus penalizing this type of
1968 * underruns should help hardware actually transmit new frames
1969 * successfully by eventually preferring slower rates.
1970 * This itself should also alleviate congestion on the bus.
1971 */
1972 if (ieee80211_is_data(hdr->frame_control) &&
1973 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1974 ATH9K_TX_DELIM_UNDERRUN)) &&
1975 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1976 tx_info->status.rates[tx_rateindex].count =
1977 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301978 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301979
Felix Fietkau545750d2009-11-23 22:21:01 +01001980 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301981 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001982 tx_info->status.rates[i].idx = -1;
1983 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301984
Felix Fietkau78c46532010-06-25 01:26:16 +02001985 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301986}
1987
Ben Greear60f2d1d2011-01-09 23:11:52 -08001988/* Has no locking. Must hold spin_lock_bh(&txq->axq_lock)
1989 * before calling this.
1990 */
1991static void __ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
Sujith059d8062009-01-16 21:38:49 +05301992{
Ben Greear60f2d1d2011-01-09 23:11:52 -08001993 if (txq->mac80211_qnum >= 0 &&
1994 txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1995 if (ath_mac80211_start_queue(sc, txq->mac80211_qnum))
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07001996 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05301997 }
Sujith059d8062009-01-16 21:38:49 +05301998}
1999
Sujithc4288392008-11-18 09:09:30 +05302000static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002001{
Sujithcbe61d82009-02-09 13:27:12 +05302002 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002003 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002004 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2005 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302006 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002007 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302008 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002009 int status;
Felix Fietkau066dae92010-11-07 14:59:39 +01002010 int qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002011
Joe Perches226afe62010-12-02 19:12:37 -08002012 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2013 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2014 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002015
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002016 for (;;) {
2017 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002018 if (list_empty(&txq->axq_q)) {
2019 txq->axq_link = NULL;
Ben Greear082f6532011-01-09 23:11:47 -08002020 if (sc->sc_flags & SC_OP_TXAGGR)
2021 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002022 spin_unlock_bh(&txq->axq_lock);
2023 break;
2024 }
2025 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2026
2027 /*
2028 * There is a race condition that a BH gets scheduled
2029 * after sw writes TxE and before hw re-load the last
2030 * descriptor to get the newly chained one.
2031 * Software must keep the last DONE descriptor as a
2032 * holding descriptor - software does so by marking
2033 * it with the STALE flag.
2034 */
2035 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302036 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002037 bf_held = bf;
2038 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302039 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002040 break;
2041 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002042 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302043 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002044 }
2045 }
2046
2047 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302048 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002049
Felix Fietkau29bffa92010-03-29 20:14:23 -07002050 memset(&ts, 0, sizeof(ts));
2051 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002052 if (status == -EINPROGRESS) {
2053 spin_unlock_bh(&txq->axq_lock);
2054 break;
2055 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002056 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002057
2058 /*
2059 * Remove ath_buf's of the same transmit unit from txq,
2060 * however leave the last descriptor back as the holding
2061 * descriptor for hw.
2062 */
Sujitha119cc42009-03-30 15:28:38 +05302063 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002064 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002065 if (!list_is_singular(&lastbf->list))
2066 list_cut_position(&bf_head,
2067 &txq->axq_q, lastbf->list.prev);
2068
2069 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002070 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002071 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002072 if (bf_held)
2073 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002074
2075 if (bf_is_ampdu_not_probing(bf))
2076 txq->axq_ampdu_depth--;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002077 spin_unlock_bh(&txq->axq_lock);
2078
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002079 if (bf_held)
2080 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002081
Sujithcd3d39a2008-08-11 14:03:34 +05302082 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002083 /*
2084 * This frame is sent out as a single frame.
2085 * Use hardware retry status for this frame.
2086 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002087 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302088 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002089 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002090 }
Johannes Berge6a98542008-10-21 12:40:02 +02002091
Felix Fietkau066dae92010-11-07 14:59:39 +01002092 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2093
Sujithcd3d39a2008-08-11 14:03:34 +05302094 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002095 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2096 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002097 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002098 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002099
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002100 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002101 __ath_wake_mac80211_queue(sc, txq);
2102
Sujith672840a2008-08-11 14:05:08 +05302103 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002104 ath_txq_schedule(sc, txq);
2105 spin_unlock_bh(&txq->axq_lock);
2106 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002107}
2108
Sujith305fe472009-07-23 15:32:29 +05302109static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002110{
2111 struct ath_softc *sc = container_of(work, struct ath_softc,
2112 tx_complete_work.work);
2113 struct ath_txq *txq;
2114 int i;
2115 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002116#ifdef CONFIG_ATH9K_DEBUGFS
2117 sc->tx_complete_poll_work_seen++;
2118#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002119
2120 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2121 if (ATH_TXQ_SETUP(sc, i)) {
2122 txq = &sc->tx.txq[i];
2123 spin_lock_bh(&txq->axq_lock);
2124 if (txq->axq_depth) {
2125 if (txq->axq_tx_inprogress) {
2126 needreset = true;
2127 spin_unlock_bh(&txq->axq_lock);
2128 break;
2129 } else {
2130 txq->axq_tx_inprogress = true;
2131 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08002132 } else {
2133 /* If the queue has pending buffers, then it
2134 * should be doing tx work (and have axq_depth).
2135 * Shouldn't get to this state I think..but
2136 * we do.
2137 */
2138 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2139 (txq->pending_frames > 0 ||
2140 !list_empty(&txq->axq_acq) ||
2141 txq->stopped)) {
2142 ath_err(ath9k_hw_common(sc->sc_ah),
2143 "txq: %p axq_qnum: %u,"
2144 " mac80211_qnum: %i"
2145 " axq_link: %p"
2146 " pending frames: %i"
2147 " axq_acq empty: %i"
2148 " stopped: %i"
2149 " axq_depth: 0 Attempting to"
2150 " restart tx logic.\n",
2151 txq, txq->axq_qnum,
2152 txq->mac80211_qnum,
2153 txq->axq_link,
2154 txq->pending_frames,
2155 list_empty(&txq->axq_acq),
2156 txq->stopped);
2157 __ath_wake_mac80211_queue(sc, txq);
2158 ath_txq_schedule(sc, txq);
2159 }
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002160 }
2161 spin_unlock_bh(&txq->axq_lock);
2162 }
2163
2164 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002165 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2166 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302167 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002168 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302169 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002170 }
2171
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002172 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002173 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2174}
2175
2176
Sujithe8324352009-01-16 21:38:42 +05302177
2178void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002179{
Sujithe8324352009-01-16 21:38:42 +05302180 int i;
2181 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002182
Sujithe8324352009-01-16 21:38:42 +05302183 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002184
2185 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302186 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2187 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002188 }
2189}
2190
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002191void ath_tx_edma_tasklet(struct ath_softc *sc)
2192{
2193 struct ath_tx_status txs;
2194 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2195 struct ath_hw *ah = sc->sc_ah;
2196 struct ath_txq *txq;
2197 struct ath_buf *bf, *lastbf;
2198 struct list_head bf_head;
2199 int status;
2200 int txok;
Felix Fietkau066dae92010-11-07 14:59:39 +01002201 int qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002202
2203 for (;;) {
2204 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2205 if (status == -EINPROGRESS)
2206 break;
2207 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002208 ath_dbg(common, ATH_DBG_XMIT,
2209 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002210 break;
2211 }
2212
2213 /* Skip beacon completions */
2214 if (txs.qid == sc->beacon.beaconq)
2215 continue;
2216
2217 txq = &sc->tx.txq[txs.qid];
2218
2219 spin_lock_bh(&txq->axq_lock);
2220 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2221 spin_unlock_bh(&txq->axq_lock);
2222 return;
2223 }
2224
2225 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2226 struct ath_buf, list);
2227 lastbf = bf->bf_lastbf;
2228
2229 INIT_LIST_HEAD(&bf_head);
2230 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2231 &lastbf->list);
2232 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2233 txq->axq_depth--;
2234 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002235 if (bf_is_ampdu_not_probing(bf))
2236 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002237 spin_unlock_bh(&txq->axq_lock);
2238
2239 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2240
2241 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002242 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2243 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaub572d032010-11-14 15:20:07 +01002244 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002245 }
2246
Felix Fietkau066dae92010-11-07 14:59:39 +01002247 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2248
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002249 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002250 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2251 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002252 else
2253 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2254 &txs, txok, 0);
2255
2256 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002257 __ath_wake_mac80211_queue(sc, txq);
2258
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002259 if (!list_empty(&txq->txq_fifo_pending)) {
2260 INIT_LIST_HEAD(&bf_head);
2261 bf = list_first_entry(&txq->txq_fifo_pending,
2262 struct ath_buf, list);
2263 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2264 &bf->bf_lastbf->list);
2265 ath_tx_txqaddbuf(sc, txq, &bf_head);
2266 } else if (sc->sc_flags & SC_OP_TXAGGR)
2267 ath_txq_schedule(sc, txq);
2268 spin_unlock_bh(&txq->axq_lock);
2269 }
2270}
2271
Sujithe8324352009-01-16 21:38:42 +05302272/*****************/
2273/* Init, Cleanup */
2274/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002275
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002276static int ath_txstatus_setup(struct ath_softc *sc, int size)
2277{
2278 struct ath_descdma *dd = &sc->txsdma;
2279 u8 txs_len = sc->sc_ah->caps.txs_len;
2280
2281 dd->dd_desc_len = size * txs_len;
2282 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2283 &dd->dd_desc_paddr, GFP_KERNEL);
2284 if (!dd->dd_desc)
2285 return -ENOMEM;
2286
2287 return 0;
2288}
2289
2290static int ath_tx_edma_init(struct ath_softc *sc)
2291{
2292 int err;
2293
2294 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2295 if (!err)
2296 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2297 sc->txsdma.dd_desc_paddr,
2298 ATH_TXSTATUS_RING_SIZE);
2299
2300 return err;
2301}
2302
2303static void ath_tx_edma_cleanup(struct ath_softc *sc)
2304{
2305 struct ath_descdma *dd = &sc->txsdma;
2306
2307 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2308 dd->dd_desc_paddr);
2309}
2310
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002311int ath_tx_init(struct ath_softc *sc, int nbufs)
2312{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002313 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002314 int error = 0;
2315
Sujith797fe5cb2009-03-30 15:28:45 +05302316 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002317
Sujith797fe5cb2009-03-30 15:28:45 +05302318 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002319 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302320 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002321 ath_err(common,
2322 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302323 goto err;
2324 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002325
Sujith797fe5cb2009-03-30 15:28:45 +05302326 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002327 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302328 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002329 ath_err(common,
2330 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302331 goto err;
2332 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002333
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002334 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2335
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002336 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2337 error = ath_tx_edma_init(sc);
2338 if (error)
2339 goto err;
2340 }
2341
Sujith797fe5cb2009-03-30 15:28:45 +05302342err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002343 if (error != 0)
2344 ath_tx_cleanup(sc);
2345
2346 return error;
2347}
2348
Sujith797fe5cb2009-03-30 15:28:45 +05302349void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002350{
Sujithb77f4832008-12-07 21:44:03 +05302351 if (sc->beacon.bdma.dd_desc_len != 0)
2352 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002353
Sujithb77f4832008-12-07 21:44:03 +05302354 if (sc->tx.txdma.dd_desc_len != 0)
2355 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002356
2357 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2358 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002359}
2360
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002361void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2362{
Sujithc5170162008-10-29 10:13:59 +05302363 struct ath_atx_tid *tid;
2364 struct ath_atx_ac *ac;
2365 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366
Sujith8ee5afb2008-12-07 21:43:36 +05302367 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302368 tidno < WME_NUM_TID;
2369 tidno++, tid++) {
2370 tid->an = an;
2371 tid->tidno = tidno;
2372 tid->seq_start = tid->seq_next = 0;
2373 tid->baw_size = WME_MAX_BA;
2374 tid->baw_head = tid->baw_tail = 0;
2375 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302376 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302377 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302378 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302379 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302380 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302381 tid->state &= ~AGGR_ADDBA_COMPLETE;
2382 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302383 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384
Sujith8ee5afb2008-12-07 21:43:36 +05302385 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302386 acno < WME_NUM_AC; acno++, ac++) {
2387 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002388 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302389 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002390 }
2391}
2392
Sujithb5aa9bf2008-10-29 10:13:31 +05302393void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002394{
Felix Fietkau2b409942010-07-07 19:42:08 +02002395 struct ath_atx_ac *ac;
2396 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002398 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302399
Felix Fietkau2b409942010-07-07 19:42:08 +02002400 for (tidno = 0, tid = &an->tid[tidno];
2401 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002402
Felix Fietkau2b409942010-07-07 19:42:08 +02002403 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002404 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002405
Felix Fietkau2b409942010-07-07 19:42:08 +02002406 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407
Felix Fietkau2b409942010-07-07 19:42:08 +02002408 if (tid->sched) {
2409 list_del(&tid->list);
2410 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002411 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002412
2413 if (ac->sched) {
2414 list_del(&ac->list);
2415 tid->ac->sched = false;
2416 }
2417
2418 ath_tid_drain(sc, txq, tid);
2419 tid->state &= ~AGGR_ADDBA_COMPLETE;
2420 tid->state &= ~AGGR_CLEANUP;
2421
2422 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002423 }
2424}