blob: 9a11099dd86a3db1f081d6ded402fdb72883c787 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020064static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
Sujithe8324352009-01-16 21:38:42 +053066
Felix Fietkau545750d2009-11-23 22:21:01 +010067enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020068 MCS_HT20,
69 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010070 MCS_HT40,
71 MCS_HT40_SGI,
72};
73
Felix Fietkau0e668cd2010-04-19 19:57:32 +020074static int ath_max_4ms_framelen[4][32] = {
75 [MCS_HT20] = {
76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
80 },
81 [MCS_HT20_SGI] = {
82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010086 },
87 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020088 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010092 },
93 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020094 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010098 }
99};
100
Sujithe8324352009-01-16 21:38:42 +0530101/*********************/
102/* Aggregation logic */
103/*********************/
104
Sujithe8324352009-01-16 21:38:42 +0530105static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
106{
107 struct ath_atx_ac *ac = tid->ac;
108
109 if (tid->paused)
110 return;
111
112 if (tid->sched)
113 return;
114
115 tid->sched = true;
116 list_add_tail(&tid->list, &ac->tid_q);
117
118 if (ac->sched)
119 return;
120
121 ac->sched = true;
122 list_add_tail(&ac->list, &txq->axq_acq);
123}
124
Sujithe8324352009-01-16 21:38:42 +0530125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
126{
127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
128
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 WARN_ON(!tid->paused);
130
Sujithe8324352009-01-16 21:38:42 +0530131 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200132 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530133
134 if (list_empty(&tid->buf_q))
135 goto unlock;
136
137 ath_tx_queue_tid(txq, tid);
138 ath_txq_schedule(sc, txq);
139unlock:
140 spin_unlock_bh(&txq->axq_lock);
141}
142
143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
144{
145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
146 struct ath_buf *bf;
147 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200148 struct ath_tx_status ts;
149
Sujithe8324352009-01-16 21:38:42 +0530150 INIT_LIST_HEAD(&bf_head);
151
Felix Fietkau90fa5392010-09-20 13:45:38 +0200152 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530153 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530154
155 while (!list_empty(&tid->buf_q)) {
156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530157 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158
159 if (bf_isretried(bf)) {
160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
164 }
Sujithe8324352009-01-16 21:38:42 +0530165 }
166
167 spin_unlock_bh(&txq->axq_lock);
168}
169
170static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
171 int seqno)
172{
173 int index, cindex;
174
175 index = ATH_BA_INDEX(tid->seq_start, seqno);
176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
177
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200178 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530179
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
183 }
184}
185
186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf)
188{
189 int index, cindex;
190
191 if (bf_isretried(bf))
192 return;
193
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200196 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530197
198 if (index >= ((tid->baw_tail - tid->baw_head) &
199 (ATH_TID_MAX_BUFS - 1))) {
200 tid->baw_tail = cindex;
201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
202 }
203}
204
205/*
206 * TODO: For frame(s) that are in the retry state, we will reuse the
207 * sequence number(s) without setting the retry bit. The
208 * alternative is to give up on these and BAR the receiver's window
209 * forward.
210 */
211static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
212 struct ath_atx_tid *tid)
213
214{
215 struct ath_buf *bf;
216 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700217 struct ath_tx_status ts;
218
219 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530220 INIT_LIST_HEAD(&bf_head);
221
222 for (;;) {
223 if (list_empty(&tid->buf_q))
224 break;
Sujithe8324352009-01-16 21:38:42 +0530225
Sujithd43f30152009-01-16 21:38:53 +0530226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
227 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530228
229 if (bf_isretried(bf))
230 ath_tx_update_baw(sc, tid, bf->bf_seqno);
231
232 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530234 spin_lock(&txq->axq_lock);
235 }
236
237 tid->seq_next = tid->seq_start;
238 tid->baw_tail = tid->baw_head;
239}
240
Sujithfec247c2009-07-27 12:08:16 +0530241static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
242 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530243{
244 struct sk_buff *skb;
245 struct ieee80211_hdr *hdr;
246
247 bf->bf_state.bf_type |= BUF_RETRY;
248 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530249 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530250
251 skb = bf->bf_mpdu;
252 hdr = (struct ieee80211_hdr *)skb->data;
253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
254}
255
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200256static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
257{
258 struct ath_buf *bf = NULL;
259
260 spin_lock_bh(&sc->tx.txbuflock);
261
262 if (unlikely(list_empty(&sc->tx.txbuf))) {
263 spin_unlock_bh(&sc->tx.txbuflock);
264 return NULL;
265 }
266
267 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
268 list_del(&bf->list);
269
270 spin_unlock_bh(&sc->tx.txbuflock);
271
272 return bf;
273}
274
275static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
276{
277 spin_lock_bh(&sc->tx.txbuflock);
278 list_add_tail(&bf->list, &sc->tx.txbuf);
279 spin_unlock_bh(&sc->tx.txbuflock);
280}
281
Sujithd43f30152009-01-16 21:38:53 +0530282static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
283{
284 struct ath_buf *tbf;
285
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200286 tbf = ath_tx_get_buffer(sc);
287 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530288 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530289
290 ATH_TXBUF_RESET(tbf);
291
Felix Fietkau827e69b2009-11-15 23:09:25 +0100292 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530293 tbf->bf_mpdu = bf->bf_mpdu;
294 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400295 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530296 tbf->bf_state = bf->bf_state;
297 tbf->bf_dmacontext = bf->bf_dmacontext;
298
299 return tbf;
300}
301
302static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
303 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700304 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530305{
306 struct ath_node *an = NULL;
307 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530308 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800309 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530310 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800311 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530312 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530313 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530314 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530315 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530316 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530317 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
318 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200319 struct ieee80211_tx_rate rates[4];
Björn Smedmanebd02282010-10-10 22:44:39 +0200320 int nframes;
Sujithe8324352009-01-16 21:38:42 +0530321
Sujitha22be222009-03-30 15:28:36 +0530322 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530323 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530324
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800325 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100326 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800327
Felix Fietkau78c46532010-06-25 01:26:16 +0200328 memcpy(rates, tx_info->control.rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200329 nframes = bf->bf_nframes;
Felix Fietkau78c46532010-06-25 01:26:16 +0200330
Sujith1286ec62009-01-27 13:30:37 +0530331 rcu_read_lock();
332
Ben Greear686b9cb2010-09-23 09:44:36 -0700333 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530334 if (!sta) {
335 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200336
Felix Fietkau31e79a52010-07-12 23:16:34 +0200337 INIT_LIST_HEAD(&bf_head);
338 while (bf) {
339 bf_next = bf->bf_next;
340
341 bf->bf_state.bf_type |= BUF_XRETRY;
342 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
343 !bf->bf_stale || bf_next != NULL)
344 list_move_tail(&bf->list, &bf_head);
345
Björn Smedmanebd02282010-10-10 22:44:39 +0200346 ath_tx_rc_status(bf, ts, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200347 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
348 0, 0);
349
350 bf = bf_next;
351 }
Sujith1286ec62009-01-27 13:30:37 +0530352 return;
Sujithe8324352009-01-16 21:38:42 +0530353 }
354
Sujith1286ec62009-01-27 13:30:37 +0530355 an = (struct ath_node *)sta->drv_priv;
356 tid = ATH_AN_2_TID(an, bf->bf_tidno);
357
Felix Fietkaub11b1602010-07-11 12:48:44 +0200358 /*
359 * The hardware occasionally sends a tx status for the wrong TID.
360 * In this case, the BA status cannot be considered valid and all
361 * subframes need to be retransmitted
362 */
363 if (bf->bf_tidno != ts->tid)
364 txok = false;
365
Sujithe8324352009-01-16 21:38:42 +0530366 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530367 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530368
Sujithd43f30152009-01-16 21:38:53 +0530369 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700370 if (ts->ts_flags & ATH9K_TX_BA) {
371 seq_st = ts->ts_seqnum;
372 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530373 } else {
Sujithd43f30152009-01-16 21:38:53 +0530374 /*
375 * AR5416 can become deaf/mute when BA
376 * issue happens. Chip needs to be reset.
377 * But AP code may have sychronization issues
378 * when perform internal reset in this routine.
379 * Only enable reset in STA mode for now.
380 */
Sujith2660b812009-02-09 13:27:26 +0530381 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530382 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530383 }
384 }
385
386 INIT_LIST_HEAD(&bf_pending);
387 INIT_LIST_HEAD(&bf_head);
388
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700389 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530390 while (bf) {
391 txfail = txpending = 0;
392 bf_next = bf->bf_next;
393
Felix Fietkau78c46532010-06-25 01:26:16 +0200394 skb = bf->bf_mpdu;
395 tx_info = IEEE80211_SKB_CB(skb);
396
Sujithe8324352009-01-16 21:38:42 +0530397 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
398 /* transmit completion, subframe is
399 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530400 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530401 } else if (!isaggr && txok) {
402 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530403 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530404 } else {
Sujithe8324352009-01-16 21:38:42 +0530405 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400406 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530407 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530408 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530409 txpending = 1;
410 } else {
411 bf->bf_state.bf_type |= BUF_XRETRY;
412 txfail = 1;
413 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530414 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530415 }
416 } else {
417 /*
418 * cleanup in progress, just fail
419 * the un-acked sub-frames
420 */
421 txfail = 1;
422 }
423 }
424
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400425 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
426 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530427 /*
428 * Make sure the last desc is reclaimed if it
429 * not a holding desc.
430 */
431 if (!bf_last->bf_stale)
432 list_move_tail(&bf->list, &bf_head);
433 else
434 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530435 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700436 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530437 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530438 }
439
Felix Fietkau90fa5392010-09-20 13:45:38 +0200440 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530441 /*
442 * complete the acked-ones/xretried ones; update
443 * block-ack window
444 */
445 spin_lock_bh(&txq->axq_lock);
446 ath_tx_update_baw(sc, tid, bf->bf_seqno);
447 spin_unlock_bh(&txq->axq_lock);
448
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530449 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200450 memcpy(tx_info->control.rates, rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200451 bf->bf_nframes = nframes;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700452 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530453 rc_update = false;
454 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700455 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530456 }
457
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700458 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
459 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530460 } else {
Sujithd43f30152009-01-16 21:38:53 +0530461 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400462 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
463 if (bf->bf_next == NULL && bf_last->bf_stale) {
464 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530465
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400466 tbf = ath_clone_txbuf(sc, bf_last);
467 /*
468 * Update tx baw and complete the
469 * frame with failed status if we
470 * run out of tx buf.
471 */
472 if (!tbf) {
473 spin_lock_bh(&txq->axq_lock);
474 ath_tx_update_baw(sc, tid,
475 bf->bf_seqno);
476 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400477
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400478 bf->bf_state.bf_type |=
479 BUF_XRETRY;
480 ath_tx_rc_status(bf, ts, nbad,
481 0, false);
482 ath_tx_complete_buf(sc, bf, txq,
483 &bf_head,
484 ts, 0, 0);
485 break;
486 }
487
488 ath9k_hw_cleartxdesc(sc->sc_ah,
489 tbf->bf_desc);
490 list_add_tail(&tbf->list, &bf_head);
491 } else {
492 /*
493 * Clear descriptor status words for
494 * software retry
495 */
496 ath9k_hw_cleartxdesc(sc->sc_ah,
497 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400498 }
Sujithe8324352009-01-16 21:38:42 +0530499 }
500
501 /*
502 * Put this buffer to the temporary pending
503 * queue to retain ordering
504 */
505 list_splice_tail_init(&bf_head, &bf_pending);
506 }
507
508 bf = bf_next;
509 }
510
Felix Fietkau4cee7862010-07-23 03:53:16 +0200511 /* prepend un-acked frames to the beginning of the pending frame queue */
512 if (!list_empty(&bf_pending)) {
513 spin_lock_bh(&txq->axq_lock);
514 list_splice(&bf_pending, &tid->buf_q);
515 ath_tx_queue_tid(txq, tid);
516 spin_unlock_bh(&txq->axq_lock);
517 }
518
Sujithe8324352009-01-16 21:38:42 +0530519 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200520 ath_tx_flush_tid(sc, tid);
521
Sujithe8324352009-01-16 21:38:42 +0530522 if (tid->baw_head == tid->baw_tail) {
523 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530524 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530525 }
Sujithe8324352009-01-16 21:38:42 +0530526 }
527
Sujith1286ec62009-01-27 13:30:37 +0530528 rcu_read_unlock();
529
Sujithe8324352009-01-16 21:38:42 +0530530 if (needreset)
531 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530532}
533
534static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
535 struct ath_atx_tid *tid)
536{
Sujithe8324352009-01-16 21:38:42 +0530537 struct sk_buff *skb;
538 struct ieee80211_tx_info *tx_info;
539 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530540 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530541 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530542 int i;
543
Sujitha22be222009-03-30 15:28:36 +0530544 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530545 tx_info = IEEE80211_SKB_CB(skb);
546 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530547
548 /*
549 * Find the lowest frame length among the rate series that will have a
550 * 4ms transmit duration.
551 * TODO - TXOP limit needs to be considered.
552 */
553 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
554
555 for (i = 0; i < 4; i++) {
556 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100557 int modeidx;
558 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530559 legacy = 1;
560 break;
561 }
562
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200563 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100564 modeidx = MCS_HT40;
565 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200566 modeidx = MCS_HT20;
567
568 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
569 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100570
571 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530572 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530573 }
574 }
575
576 /*
577 * limit aggregate size by the minimum rate if rate selected is
578 * not a probe rate, if rate selected is a probe rate then
579 * avoid aggregation of this packet.
580 */
581 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
582 return 0;
583
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530584 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
585 aggr_limit = min((max_4ms_framelen * 3) / 8,
586 (u32)ATH_AMPDU_LIMIT_MAX);
587 else
588 aggr_limit = min(max_4ms_framelen,
589 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530590
591 /*
592 * h/w can accept aggregates upto 16 bit lengths (65535).
593 * The IE, however can hold upto 65536, which shows up here
594 * as zero. Ignore 65536 since we are constrained by hw.
595 */
Sujith4ef70842009-07-23 15:32:41 +0530596 if (tid->an->maxampdu)
597 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530598
599 return aggr_limit;
600}
601
602/*
Sujithd43f30152009-01-16 21:38:53 +0530603 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530604 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530605 */
606static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
607 struct ath_buf *bf, u16 frmlen)
608{
Sujithe8324352009-01-16 21:38:42 +0530609 struct sk_buff *skb = bf->bf_mpdu;
610 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530611 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530612 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100613 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200614 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530615
616 /* Select standard number of delimiters based on frame length alone */
617 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
618
619 /*
620 * If encryption enabled, hardware requires some more padding between
621 * subframes.
622 * TODO - this could be improved to be dependent on the rate.
623 * The hardware can keep up at lower rates, but not higher rates
624 */
625 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
626 ndelim += ATH_AGGR_ENCRYPTDELIM;
627
628 /*
629 * Convert desired mpdu density from microeconds to bytes based
630 * on highest rate in rate series (i.e. first rate) to determine
631 * required minimum length for subframe. Take into account
632 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530633 *
Sujithe8324352009-01-16 21:38:42 +0530634 * If there is no mpdu density restriction, no further calculation
635 * is needed.
636 */
Sujith4ef70842009-07-23 15:32:41 +0530637
638 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530639 return ndelim;
640
641 rix = tx_info->control.rates[0].idx;
642 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530643 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
644 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
645
646 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530647 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530648 else
Sujith4ef70842009-07-23 15:32:41 +0530649 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530650
651 if (nsymbols == 0)
652 nsymbols = 1;
653
Felix Fietkauc6663872010-04-19 19:57:33 +0200654 streams = HT_RC_2_STREAMS(rix);
655 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530656 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
657
Sujithe8324352009-01-16 21:38:42 +0530658 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530659 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
660 ndelim = max(mindelim, ndelim);
661 }
662
663 return ndelim;
664}
665
666static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530667 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530668 struct ath_atx_tid *tid,
669 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530670{
671#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530672 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
673 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530674 u16 aggr_limit = 0, al = 0, bpad = 0,
675 al_delta, h_baw = tid->baw_size / 2;
676 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujithe8324352009-01-16 21:38:42 +0530677
678 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
679
680 do {
681 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
682
Sujithd43f30152009-01-16 21:38:53 +0530683 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530684 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
685 status = ATH_AGGR_BAW_CLOSED;
686 break;
687 }
688
689 if (!rl) {
690 aggr_limit = ath_lookup_rate(sc, bf, tid);
691 rl = 1;
692 }
693
Sujithd43f30152009-01-16 21:38:53 +0530694 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530695 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
696
Sujithd43f30152009-01-16 21:38:53 +0530697 if (nframes &&
698 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530699 status = ATH_AGGR_LIMITED;
700 break;
701 }
702
Sujithd43f30152009-01-16 21:38:53 +0530703 /* do not exceed subframe limit */
704 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530705 status = ATH_AGGR_LIMITED;
706 break;
707 }
Sujithd43f30152009-01-16 21:38:53 +0530708 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530709
Sujithd43f30152009-01-16 21:38:53 +0530710 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530711 al += bpad + al_delta;
712
713 /*
714 * Get the delimiters needed to meet the MPDU
715 * density for this node.
716 */
717 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530718 bpad = PADBYTES(al_delta) + (ndelim << 2);
719
720 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400721 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530722
Sujithd43f30152009-01-16 21:38:53 +0530723 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530724 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530725 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
726 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530727 if (bf_prev) {
728 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400729 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
730 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530731 }
732 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530733
Sujithe8324352009-01-16 21:38:42 +0530734 } while (!list_empty(&tid->buf_q));
735
736 bf_first->bf_al = al;
737 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530738
Sujithe8324352009-01-16 21:38:42 +0530739 return status;
740#undef PADBYTES
741}
742
743static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
744 struct ath_atx_tid *tid)
745{
Sujithd43f30152009-01-16 21:38:53 +0530746 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530747 enum ATH_AGGR_STATUS status;
748 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530749
750 do {
751 if (list_empty(&tid->buf_q))
752 return;
753
754 INIT_LIST_HEAD(&bf_q);
755
Sujithfec247c2009-07-27 12:08:16 +0530756 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530757
758 /*
Sujithd43f30152009-01-16 21:38:53 +0530759 * no frames picked up to be aggregated;
760 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530761 */
762 if (list_empty(&bf_q))
763 break;
764
765 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530766 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530767
Sujithd43f30152009-01-16 21:38:53 +0530768 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530769 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530770 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530771 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530772 ath_buf_set_rate(sc, bf);
773 ath_tx_txqaddbuf(sc, txq, &bf_q);
774 continue;
775 }
776
Sujithd43f30152009-01-16 21:38:53 +0530777 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530778 bf->bf_state.bf_type |= BUF_AGGR;
779 ath_buf_set_rate(sc, bf);
780 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
781
Sujithd43f30152009-01-16 21:38:53 +0530782 /* anchor last desc of aggregate */
783 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530784
Sujithe8324352009-01-16 21:38:42 +0530785 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530786 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530787
788 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
789 status != ATH_AGGR_BAW_CLOSED);
790}
791
Felix Fietkau231c3a12010-09-20 19:35:28 +0200792int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
793 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530794{
795 struct ath_atx_tid *txtid;
796 struct ath_node *an;
797
798 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530799 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200800
801 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
802 return -EAGAIN;
803
Sujithf83da962009-07-23 15:32:37 +0530804 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200805 txtid->paused = true;
Sujithf83da962009-07-23 15:32:37 +0530806 *ssn = txtid->seq_start;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200807
808 return 0;
Sujithe8324352009-01-16 21:38:42 +0530809}
810
Sujithf83da962009-07-23 15:32:37 +0530811void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530812{
813 struct ath_node *an = (struct ath_node *)sta->drv_priv;
814 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
815 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Sujithe8324352009-01-16 21:38:42 +0530816
817 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530818 return;
Sujithe8324352009-01-16 21:38:42 +0530819
820 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530821 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530822 return;
Sujithe8324352009-01-16 21:38:42 +0530823 }
824
Sujithe8324352009-01-16 21:38:42 +0530825 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200826 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200827
828 /*
829 * If frames are still being transmitted for this TID, they will be
830 * cleaned up during tx completion. To prevent race conditions, this
831 * TID can only be reused after all in-progress subframes have been
832 * completed.
833 */
834 if (txtid->baw_head != txtid->baw_tail)
835 txtid->state |= AGGR_CLEANUP;
836 else
837 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530838 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530839
Felix Fietkau90fa5392010-09-20 13:45:38 +0200840 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530841}
842
843void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
844{
845 struct ath_atx_tid *txtid;
846 struct ath_node *an;
847
848 an = (struct ath_node *)sta->drv_priv;
849
850 if (sc->sc_flags & SC_OP_TXAGGR) {
851 txtid = ATH_AN_2_TID(an, tid);
852 txtid->baw_size =
853 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
854 txtid->state |= AGGR_ADDBA_COMPLETE;
855 txtid->state &= ~AGGR_ADDBA_PROGRESS;
856 ath_tx_resume_tid(sc, txtid);
857 }
858}
859
Sujithe8324352009-01-16 21:38:42 +0530860/********************/
861/* Queue Management */
862/********************/
863
Sujithe8324352009-01-16 21:38:42 +0530864static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
865 struct ath_txq *txq)
866{
867 struct ath_atx_ac *ac, *ac_tmp;
868 struct ath_atx_tid *tid, *tid_tmp;
869
870 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
871 list_del(&ac->list);
872 ac->sched = false;
873 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
874 list_del(&tid->list);
875 tid->sched = false;
876 ath_tid_drain(sc, txq, tid);
877 }
878 }
879}
880
881struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
882{
Sujithcbe61d82009-02-09 13:27:12 +0530883 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700884 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530885 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400886 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530887
888 memset(&qi, 0, sizeof(qi));
889 qi.tqi_subtype = subtype;
890 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
891 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
892 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
893 qi.tqi_physCompBuf = 0;
894
895 /*
896 * Enable interrupts only for EOL and DESC conditions.
897 * We mark tx descriptors to receive a DESC interrupt
898 * when a tx queue gets deep; otherwise waiting for the
899 * EOL to reap descriptors. Note that this is done to
900 * reduce interrupt load and this only defers reaping
901 * descriptors, never transmitting frames. Aside from
902 * reducing interrupts this also permits more concurrency.
903 * The only potential downside is if the tx queue backs
904 * up in which case the top half of the kernel may backup
905 * due to a lack of tx descriptors.
906 *
907 * The UAPSD queue is an exception, since we take a desc-
908 * based intr on the EOSP frames.
909 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400910 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
911 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
912 TXQ_FLAG_TXERRINT_ENABLE;
913 } else {
914 if (qtype == ATH9K_TX_QUEUE_UAPSD)
915 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
916 else
917 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
918 TXQ_FLAG_TXDESCINT_ENABLE;
919 }
Sujithe8324352009-01-16 21:38:42 +0530920 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
921 if (qnum == -1) {
922 /*
923 * NB: don't print a message, this happens
924 * normally on parts with too few tx queues
925 */
926 return NULL;
927 }
928 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700929 ath_print(common, ATH_DBG_FATAL,
930 "qnum %u out of range, max %u!\n",
931 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530932 ath9k_hw_releasetxqueue(ah, qnum);
933 return NULL;
934 }
935 if (!ATH_TXQ_SETUP(sc, qnum)) {
936 struct ath_txq *txq = &sc->tx.txq[qnum];
937
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400938 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530939 txq->axq_qnum = qnum;
940 txq->axq_link = NULL;
941 INIT_LIST_HEAD(&txq->axq_q);
942 INIT_LIST_HEAD(&txq->axq_acq);
943 spin_lock_init(&txq->axq_lock);
944 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400945 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530946 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400947
948 txq->txq_headidx = txq->txq_tailidx = 0;
949 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
950 INIT_LIST_HEAD(&txq->txq_fifo[i]);
951 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530952 }
953 return &sc->tx.txq[qnum];
954}
955
Sujithe8324352009-01-16 21:38:42 +0530956int ath_txq_update(struct ath_softc *sc, int qnum,
957 struct ath9k_tx_queue_info *qinfo)
958{
Sujithcbe61d82009-02-09 13:27:12 +0530959 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530960 int error = 0;
961 struct ath9k_tx_queue_info qi;
962
963 if (qnum == sc->beacon.beaconq) {
964 /*
965 * XXX: for beacon queue, we just save the parameter.
966 * It will be picked up by ath_beaconq_config when
967 * it's necessary.
968 */
969 sc->beacon.beacon_qi = *qinfo;
970 return 0;
971 }
972
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700973 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530974
975 ath9k_hw_get_txq_props(ah, qnum, &qi);
976 qi.tqi_aifs = qinfo->tqi_aifs;
977 qi.tqi_cwmin = qinfo->tqi_cwmin;
978 qi.tqi_cwmax = qinfo->tqi_cwmax;
979 qi.tqi_burstTime = qinfo->tqi_burstTime;
980 qi.tqi_readyTime = qinfo->tqi_readyTime;
981
982 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700983 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
984 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +0530985 error = -EIO;
986 } else {
987 ath9k_hw_resettxqueue(ah, qnum);
988 }
989
990 return error;
991}
992
993int ath_cabq_update(struct ath_softc *sc)
994{
995 struct ath9k_tx_queue_info qi;
996 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +0530997
998 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
999 /*
1000 * Ensure the readytime % is within the bounds.
1001 */
Sujith17d79042009-02-09 13:27:03 +05301002 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1003 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1004 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1005 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301006
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001007 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301008 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301009 ath_txq_update(sc, qnum, &qi);
1010
1011 return 0;
1012}
1013
Sujith043a0402009-01-16 21:38:47 +05301014/*
1015 * Drain a given TX queue (could be Beacon or Data)
1016 *
1017 * This assumes output has been stopped and
1018 * we do not need to block ath_tx_tasklet.
1019 */
1020void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301021{
1022 struct ath_buf *bf, *lastbf;
1023 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001024 struct ath_tx_status ts;
1025
1026 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301027 INIT_LIST_HEAD(&bf_head);
1028
Sujithe8324352009-01-16 21:38:42 +05301029 for (;;) {
1030 spin_lock_bh(&txq->axq_lock);
1031
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001032 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1033 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1034 txq->txq_headidx = txq->txq_tailidx = 0;
1035 spin_unlock_bh(&txq->axq_lock);
1036 break;
1037 } else {
1038 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1039 struct ath_buf, list);
1040 }
1041 } else {
1042 if (list_empty(&txq->axq_q)) {
1043 txq->axq_link = NULL;
1044 spin_unlock_bh(&txq->axq_lock);
1045 break;
1046 }
1047 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1048 list);
Sujithe8324352009-01-16 21:38:42 +05301049
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001050 if (bf->bf_stale) {
1051 list_del(&bf->list);
1052 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301053
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001054 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001055 continue;
1056 }
Sujithe8324352009-01-16 21:38:42 +05301057 }
1058
1059 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001060 if (!retry_tx)
1061 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301062
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001063 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1064 list_cut_position(&bf_head,
1065 &txq->txq_fifo[txq->txq_tailidx],
1066 &lastbf->list);
1067 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1068 } else {
1069 /* remove ath_buf's of the same mpdu from txq */
1070 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1071 }
1072
Sujithe8324352009-01-16 21:38:42 +05301073 txq->axq_depth--;
1074
1075 spin_unlock_bh(&txq->axq_lock);
1076
1077 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001078 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301079 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001080 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301081 }
1082
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001083 spin_lock_bh(&txq->axq_lock);
1084 txq->axq_tx_inprogress = false;
1085 spin_unlock_bh(&txq->axq_lock);
1086
Sujithe8324352009-01-16 21:38:42 +05301087 /* flush any pending frames if aggregation is enabled */
1088 if (sc->sc_flags & SC_OP_TXAGGR) {
1089 if (!retry_tx) {
1090 spin_lock_bh(&txq->axq_lock);
1091 ath_txq_drain_pending_buffers(sc, txq);
1092 spin_unlock_bh(&txq->axq_lock);
1093 }
1094 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001095
1096 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1097 spin_lock_bh(&txq->axq_lock);
1098 while (!list_empty(&txq->txq_fifo_pending)) {
1099 bf = list_first_entry(&txq->txq_fifo_pending,
1100 struct ath_buf, list);
1101 list_cut_position(&bf_head,
1102 &txq->txq_fifo_pending,
1103 &bf->bf_lastbf->list);
1104 spin_unlock_bh(&txq->axq_lock);
1105
1106 if (bf_isampdu(bf))
1107 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1108 &ts, 0);
1109 else
1110 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1111 &ts, 0, 0);
1112 spin_lock_bh(&txq->axq_lock);
1113 }
1114 spin_unlock_bh(&txq->axq_lock);
1115 }
Sujithe8324352009-01-16 21:38:42 +05301116}
1117
Sujith043a0402009-01-16 21:38:47 +05301118void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1119{
Sujithcbe61d82009-02-09 13:27:12 +05301120 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001121 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301122 struct ath_txq *txq;
1123 int i, npend = 0;
1124
1125 if (sc->sc_flags & SC_OP_INVALID)
1126 return;
1127
1128 /* Stop beacon queue */
1129 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1130
1131 /* Stop data queues */
1132 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1133 if (ATH_TXQ_SETUP(sc, i)) {
1134 txq = &sc->tx.txq[i];
1135 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1136 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1137 }
1138 }
1139
1140 if (npend) {
1141 int r;
1142
Sujithe8009e92009-12-14 14:57:08 +05301143 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001144 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301145
1146 spin_lock_bh(&sc->sc_resetlock);
Felix Fietkau20bd2a02010-07-31 00:12:00 +02001147 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
Sujith043a0402009-01-16 21:38:47 +05301148 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001149 ath_print(common, ATH_DBG_FATAL,
1150 "Unable to reset hardware; reset status %d\n",
1151 r);
Sujith043a0402009-01-16 21:38:47 +05301152 spin_unlock_bh(&sc->sc_resetlock);
1153 }
1154
1155 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1156 if (ATH_TXQ_SETUP(sc, i))
1157 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1158 }
1159}
1160
Sujithe8324352009-01-16 21:38:42 +05301161void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1162{
1163 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1164 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1165}
1166
Sujithe8324352009-01-16 21:38:42 +05301167void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1168{
1169 struct ath_atx_ac *ac;
1170 struct ath_atx_tid *tid;
1171
1172 if (list_empty(&txq->axq_acq))
1173 return;
1174
1175 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1176 list_del(&ac->list);
1177 ac->sched = false;
1178
1179 do {
1180 if (list_empty(&ac->tid_q))
1181 return;
1182
1183 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1184 list_del(&tid->list);
1185 tid->sched = false;
1186
1187 if (tid->paused)
1188 continue;
1189
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001190 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301191
1192 /*
1193 * add tid to round-robin queue if more frames
1194 * are pending for the tid
1195 */
1196 if (!list_empty(&tid->buf_q))
1197 ath_tx_queue_tid(txq, tid);
1198
1199 break;
1200 } while (!list_empty(&ac->tid_q));
1201
1202 if (!list_empty(&ac->tid_q)) {
1203 if (!ac->sched) {
1204 ac->sched = true;
1205 list_add_tail(&ac->list, &txq->axq_acq);
1206 }
1207 }
1208}
1209
1210int ath_tx_setup(struct ath_softc *sc, int haltype)
1211{
1212 struct ath_txq *txq;
1213
1214 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001215 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1216 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301217 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1218 return 0;
1219 }
1220 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1221 if (txq != NULL) {
1222 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1223 return 1;
1224 } else
1225 return 0;
1226}
1227
1228/***********/
1229/* TX, DMA */
1230/***********/
1231
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001232/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001233 * Insert a chain of ath_buf (descriptors) on a txq and
1234 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001235 */
Sujith102e0572008-10-29 10:15:16 +05301236static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1237 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001238{
Sujithcbe61d82009-02-09 13:27:12 +05301239 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001240 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001241 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301242
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001243 /*
1244 * Insert the frame on the outbound list and
1245 * pass it on to the hardware.
1246 */
1247
1248 if (list_empty(head))
1249 return;
1250
1251 bf = list_first_entry(head, struct ath_buf, list);
1252
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001253 ath_print(common, ATH_DBG_QUEUE,
1254 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001255
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001256 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1257 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1258 list_splice_tail_init(head, &txq->txq_fifo_pending);
1259 return;
1260 }
1261 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1262 ath_print(common, ATH_DBG_XMIT,
1263 "Initializing tx fifo %d which "
1264 "is non-empty\n",
1265 txq->txq_headidx);
1266 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1267 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1268 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001269 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001270 ath_print(common, ATH_DBG_XMIT,
1271 "TXDP[%u] = %llx (%p)\n",
1272 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001273 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001274 list_splice_tail_init(head, &txq->axq_q);
1275
1276 if (txq->axq_link == NULL) {
1277 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1278 ath_print(common, ATH_DBG_XMIT,
1279 "TXDP[%u] = %llx (%p)\n",
1280 txq->axq_qnum, ito64(bf->bf_daddr),
1281 bf->bf_desc);
1282 } else {
1283 *txq->axq_link = bf->bf_daddr;
1284 ath_print(common, ATH_DBG_XMIT,
1285 "link[%u] (%p)=%llx (%p)\n",
1286 txq->axq_qnum, txq->axq_link,
1287 ito64(bf->bf_daddr), bf->bf_desc);
1288 }
1289 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1290 &txq->axq_link);
1291 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001292 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001293 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001294}
1295
Sujithe8324352009-01-16 21:38:42 +05301296static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1297 struct list_head *bf_head,
1298 struct ath_tx_control *txctl)
1299{
1300 struct ath_buf *bf;
1301
Sujithe8324352009-01-16 21:38:42 +05301302 bf = list_first_entry(bf_head, struct ath_buf, list);
1303 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301304 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301305
1306 /*
1307 * Do not queue to h/w when any of the following conditions is true:
1308 * - there are pending frames in software queue
1309 * - the TID is currently paused for ADDBA/BAR request
1310 * - seqno is not within block-ack window
1311 * - h/w queue depth exceeds low water mark
1312 */
1313 if (!list_empty(&tid->buf_q) || tid->paused ||
1314 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1315 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001316 /*
Sujithe8324352009-01-16 21:38:42 +05301317 * Add this frame to software queue for scheduling later
1318 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001319 */
Sujithd43f30152009-01-16 21:38:53 +05301320 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301321 ath_tx_queue_tid(txctl->txq, tid);
1322 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001323 }
1324
Sujithe8324352009-01-16 21:38:42 +05301325 /* Add sub-frame to BAW */
1326 ath_tx_addto_baw(sc, tid, bf);
1327
1328 /* Queue to h/w without aggregation */
1329 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301330 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301331 ath_buf_set_rate(sc, bf);
1332 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301333}
1334
Sujithc37452b2009-03-09 09:31:57 +05301335static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1336 struct ath_atx_tid *tid,
1337 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001338{
Sujithe8324352009-01-16 21:38:42 +05301339 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001340
Sujithe8324352009-01-16 21:38:42 +05301341 bf = list_first_entry(bf_head, struct ath_buf, list);
1342 bf->bf_state.bf_type &= ~BUF_AMPDU;
1343
1344 /* update starting sequence number for subsequent ADDBA request */
1345 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1346
1347 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301348 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301349 ath_buf_set_rate(sc, bf);
1350 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301351 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001352}
1353
Sujithc37452b2009-03-09 09:31:57 +05301354static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1355 struct list_head *bf_head)
1356{
1357 struct ath_buf *bf;
1358
1359 bf = list_first_entry(bf_head, struct ath_buf, list);
1360
1361 bf->bf_lastbf = bf;
1362 bf->bf_nframes = 1;
1363 ath_buf_set_rate(sc, bf);
1364 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301365 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301366}
1367
Sujith528f0c62008-10-29 10:14:26 +05301368static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001369{
Sujith528f0c62008-10-29 10:14:26 +05301370 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001371 enum ath9k_pkt_type htype;
1372 __le16 fc;
1373
Sujith528f0c62008-10-29 10:14:26 +05301374 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001375 fc = hdr->frame_control;
1376
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001377 if (ieee80211_is_beacon(fc))
1378 htype = ATH9K_PKT_TYPE_BEACON;
1379 else if (ieee80211_is_probe_resp(fc))
1380 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1381 else if (ieee80211_is_atim(fc))
1382 htype = ATH9K_PKT_TYPE_ATIM;
1383 else if (ieee80211_is_pspoll(fc))
1384 htype = ATH9K_PKT_TYPE_PSPOLL;
1385 else
1386 htype = ATH9K_PKT_TYPE_NORMAL;
1387
1388 return htype;
1389}
1390
Sujith528f0c62008-10-29 10:14:26 +05301391static void assign_aggr_tid_seqno(struct sk_buff *skb,
1392 struct ath_buf *bf)
1393{
1394 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1395 struct ieee80211_hdr *hdr;
1396 struct ath_node *an;
1397 struct ath_atx_tid *tid;
1398 __le16 fc;
1399 u8 *qc;
1400
1401 if (!tx_info->control.sta)
1402 return;
1403
1404 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1405 hdr = (struct ieee80211_hdr *)skb->data;
1406 fc = hdr->frame_control;
1407
Sujith528f0c62008-10-29 10:14:26 +05301408 if (ieee80211_is_data_qos(fc)) {
1409 qc = ieee80211_get_qos_ctl(hdr);
1410 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301411 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001412
Sujithe8324352009-01-16 21:38:42 +05301413 /*
1414 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301415 * We also override seqno set by upper layer with the one
1416 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301417 */
1418 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301419 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301420 bf->bf_seqno = tid->seq_next;
1421 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301422}
1423
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001424static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301425{
1426 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1427 int flags = 0;
1428
1429 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1430 flags |= ATH9K_TXDESC_INTREQ;
1431
1432 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1433 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301434
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001435 if (use_ldpc)
1436 flags |= ATH9K_TXDESC_LDPC;
1437
Sujith528f0c62008-10-29 10:14:26 +05301438 return flags;
1439}
1440
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001441/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001442 * rix - rate index
1443 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1444 * width - 0 for 20 MHz, 1 for 40 MHz
1445 * half_gi - to use 4us v/s 3.6 us for symbol time
1446 */
Sujith102e0572008-10-29 10:15:16 +05301447static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1448 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001449{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001450 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001451 int streams, pktlen;
1452
Sujithcd3d39a2008-08-11 14:03:34 +05301453 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301454
1455 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001456 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001457 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001458 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001459 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1460
1461 if (!half_gi)
1462 duration = SYMBOL_TIME(nsymbols);
1463 else
1464 duration = SYMBOL_TIME_HALFGI(nsymbols);
1465
Sujithe63835b2008-11-18 09:07:53 +05301466 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001467 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301468
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001469 return duration;
1470}
1471
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001472static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1473{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001474 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001475 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301476 struct sk_buff *skb;
1477 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301478 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001479 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301480 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301481 int i, flags = 0;
1482 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301483 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301484
1485 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301486
Sujitha22be222009-03-30 15:28:36 +05301487 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301488 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301489 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301490 hdr = (struct ieee80211_hdr *)skb->data;
1491 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301492
Sujithc89424d2009-01-30 14:29:28 +05301493 /*
1494 * We check if Short Preamble is needed for the CTS rate by
1495 * checking the BSS's global flag.
1496 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1497 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001498 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1499 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301500 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001501 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001502
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001503 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001504 bool is_40, is_sgi, is_sp;
1505 int phy;
1506
Sujithe63835b2008-11-18 09:07:53 +05301507 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001508 continue;
1509
Sujitha8efee42008-11-18 09:07:30 +05301510 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301511 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001512 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001513
Felix Fietkau27032052010-01-17 21:08:50 +01001514 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1515 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301516 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001517 flags |= ATH9K_TXDESC_RTSENA;
1518 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1519 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1520 flags |= ATH9K_TXDESC_CTSENA;
1521 }
1522
Sujithc89424d2009-01-30 14:29:28 +05301523 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1524 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1525 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1526 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001527
Felix Fietkau545750d2009-11-23 22:21:01 +01001528 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1529 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1530 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1531
1532 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1533 /* MCS rates */
1534 series[i].Rate = rix | 0x80;
1535 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1536 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001537 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1538 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001539 continue;
1540 }
1541
1542 /* legcay rates */
1543 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1544 !(rate->flags & IEEE80211_RATE_ERP_G))
1545 phy = WLAN_RC_PHY_CCK;
1546 else
1547 phy = WLAN_RC_PHY_OFDM;
1548
1549 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1550 series[i].Rate = rate->hw_value;
1551 if (rate->hw_value_short) {
1552 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1553 series[i].Rate |= rate->hw_value_short;
1554 } else {
1555 is_sp = false;
1556 }
1557
1558 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1559 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001560 }
1561
Felix Fietkau27032052010-01-17 21:08:50 +01001562 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1563 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1564 flags &= ~ATH9K_TXDESC_RTSENA;
1565
1566 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1567 if (flags & ATH9K_TXDESC_RTSENA)
1568 flags &= ~ATH9K_TXDESC_CTSENA;
1569
Sujithe63835b2008-11-18 09:07:53 +05301570 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301571 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1572 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301573 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301574 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301575
Sujith17d79042009-02-09 13:27:03 +05301576 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301577 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001578}
1579
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001580static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301581 struct sk_buff *skb,
1582 struct ath_tx_control *txctl)
1583{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001584 struct ath_wiphy *aphy = hw->priv;
1585 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301586 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1587 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301588 int hdrlen;
1589 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001590 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001591 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301592
Felix Fietkau827e69b2009-11-15 23:09:25 +01001593 tx_info->pad[0] = 0;
1594 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001595 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001596 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001597 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001598 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1599 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001600 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001601 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1602 break;
1603 }
Sujithe8324352009-01-16 21:38:42 +05301604 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1605 fc = hdr->frame_control;
1606
1607 ATH_TXBUF_RESET(bf);
1608
Felix Fietkau827e69b2009-11-15 23:09:25 +01001609 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001610 bf->bf_frmlen = skb->len + FCS_LEN;
1611 /* Remove the padding size from bf_frmlen, if any */
1612 padpos = ath9k_cmn_padpos(hdr->frame_control);
1613 padsize = padpos & 3;
1614 if (padsize && skb->len>padpos+padsize) {
1615 bf->bf_frmlen -= padsize;
1616 }
Sujithe8324352009-01-16 21:38:42 +05301617
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001618 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301619 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001620 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1621 use_ldpc = true;
1622 }
Sujithe8324352009-01-16 21:38:42 +05301623
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001624 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001625 if (txctl->paprd)
1626 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001627 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301628
Luis R. Rodriguezc17512d2010-08-05 17:56:54 -04001629 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301630 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1631 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1632 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1633 } else {
1634 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1635 }
1636
Sujith17b182e2009-12-14 14:56:56 +05301637 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1638 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301639 assign_aggr_tid_seqno(skb, bf);
1640
1641 bf->bf_mpdu = skb;
1642
1643 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1644 skb->len, DMA_TO_DEVICE);
1645 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1646 bf->bf_mpdu = NULL;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001647 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1648 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301649 return -ENOMEM;
1650 }
1651
1652 bf->bf_buf_addr = bf->bf_dmacontext;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001653
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001654 bf->bf_tx_aborted = false;
1655
Sujithe8324352009-01-16 21:38:42 +05301656 return 0;
1657}
1658
1659/* FIXME: tx power */
1660static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1661 struct ath_tx_control *txctl)
1662{
Sujitha22be222009-03-30 15:28:36 +05301663 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301664 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301665 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301666 struct ath_node *an = NULL;
1667 struct list_head bf_head;
1668 struct ath_desc *ds;
1669 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301670 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301671 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301672 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301673
1674 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301675 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301676
1677 INIT_LIST_HEAD(&bf_head);
1678 list_add_tail(&bf->list, &bf_head);
1679
1680 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001681 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301682
1683 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1684 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1685
1686 ath9k_hw_filltxdesc(ah, ds,
1687 skb->len, /* segment length */
1688 true, /* first segment */
1689 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001690 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001691 bf->bf_buf_addr,
1692 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301693
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001694 if (bf->bf_state.bfs_paprd)
1695 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1696
Sujithe8324352009-01-16 21:38:42 +05301697 spin_lock_bh(&txctl->txq->axq_lock);
1698
1699 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1700 tx_info->control.sta) {
1701 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1702 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1703
Sujithc37452b2009-03-09 09:31:57 +05301704 if (!ieee80211_is_data_qos(fc)) {
1705 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1706 goto tx_done;
1707 }
1708
Felix Fietkau4fdec032010-03-12 04:02:43 +01001709 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301710 /*
1711 * Try aggregation if it's a unicast data frame
1712 * and the destination is HT capable.
1713 */
1714 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1715 } else {
1716 /*
1717 * Send this frame as regular when ADDBA
1718 * exchange is neither complete nor pending.
1719 */
Sujithc37452b2009-03-09 09:31:57 +05301720 ath_tx_send_ht_normal(sc, txctl->txq,
1721 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301722 }
1723 } else {
Sujithc37452b2009-03-09 09:31:57 +05301724 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301725 }
1726
Sujithc37452b2009-03-09 09:31:57 +05301727tx_done:
Sujithe8324352009-01-16 21:38:42 +05301728 spin_unlock_bh(&txctl->txq->axq_lock);
1729}
1730
1731/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001732int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301733 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001734{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001735 struct ath_wiphy *aphy = hw->priv;
1736 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001737 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001738 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001739 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001740 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001741
Sujithe8324352009-01-16 21:38:42 +05301742 bf = ath_tx_get_buffer(sc);
1743 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001744 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301745 return -1;
1746 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001747
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001748 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301749 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001750 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001751
Sujithe8324352009-01-16 21:38:42 +05301752 /* upon ath_tx_processq() this TX queue will be resumed, we
1753 * guarantee this will happen by knowing beforehand that
1754 * we will at least have to run TX completionon one buffer
1755 * on the queue */
1756 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001757 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001758 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301759 txq->stopped = 1;
1760 }
1761 spin_unlock_bh(&txq->axq_lock);
1762
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001763 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301764
1765 return r;
1766 }
1767
Felix Fietkau97923b12010-06-12 00:33:55 -04001768 q = skb_get_queue_mapping(skb);
1769 if (q >= 4)
1770 q = 0;
1771
1772 spin_lock_bh(&txq->axq_lock);
1773 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1774 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1775 txq->stopped = 1;
1776 }
1777 spin_unlock_bh(&txq->axq_lock);
1778
Sujithe8324352009-01-16 21:38:42 +05301779 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001780
1781 return 0;
1782}
1783
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001784void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001785{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001786 struct ath_wiphy *aphy = hw->priv;
1787 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001788 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001789 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1790 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301791 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1792 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001793
Sujithe8324352009-01-16 21:38:42 +05301794 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001795
Sujithe8324352009-01-16 21:38:42 +05301796 /*
1797 * As a temporary workaround, assign seq# here; this will likely need
1798 * to be cleaned up to work better with Beacon transmission and virtual
1799 * BSSes.
1800 */
1801 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301802 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1803 sc->tx.seq_no += 0x10;
1804 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1805 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001806 }
1807
Sujithe8324352009-01-16 21:38:42 +05301808 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001809 padpos = ath9k_cmn_padpos(hdr->frame_control);
1810 padsize = padpos & 3;
1811 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301812 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001813 ath_print(common, ATH_DBG_XMIT,
1814 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301815 dev_kfree_skb_any(skb);
1816 return;
1817 }
1818 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001819 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001820 }
1821
Sujithe8324352009-01-16 21:38:42 +05301822 txctl.txq = sc->beacon.cabq;
1823
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001824 ath_print(common, ATH_DBG_XMIT,
1825 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301826
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001827 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001828 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301829 goto exit;
1830 }
1831
1832 return;
1833exit:
1834 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001835}
1836
Sujithe8324352009-01-16 21:38:42 +05301837/*****************/
1838/* TX Completion */
1839/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001840
Sujithe8324352009-01-16 21:38:42 +05301841static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001842 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001843{
Sujithe8324352009-01-16 21:38:42 +05301844 struct ieee80211_hw *hw = sc->hw;
1845 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001846 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001847 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001848 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301849
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001850 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301851
Felix Fietkau827e69b2009-11-15 23:09:25 +01001852 if (aphy)
1853 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301854
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301855 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301856 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301857
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301858 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301859 /* Frame was ACKed */
1860 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1861 }
1862
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001863 padpos = ath9k_cmn_padpos(hdr->frame_control);
1864 padsize = padpos & 3;
1865 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301866 /*
1867 * Remove MAC header padding before giving the frame back to
1868 * mac80211.
1869 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001870 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301871 skb_pull(skb, padsize);
1872 }
1873
Sujith1b04b932010-01-08 10:36:05 +05301874 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1875 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001876 ath_print(common, ATH_DBG_PS,
1877 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001878 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301879 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1880 PS_WAIT_FOR_CAB |
1881 PS_WAIT_FOR_PSPOLL_DATA |
1882 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001883 }
1884
Felix Fietkau827e69b2009-11-15 23:09:25 +01001885 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001886 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001887 else {
1888 q = skb_get_queue_mapping(skb);
1889 if (q >= 4)
1890 q = 0;
1891
1892 if (--sc->tx.pending_frames[q] < 0)
1893 sc->tx.pending_frames[q] = 0;
1894
Felix Fietkau827e69b2009-11-15 23:09:25 +01001895 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001896 }
Sujithe8324352009-01-16 21:38:42 +05301897}
1898
1899static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001900 struct ath_txq *txq, struct list_head *bf_q,
1901 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301902{
1903 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301904 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301905 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301906
Sujithe8324352009-01-16 21:38:42 +05301907 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301908 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301909
1910 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301911 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301912
1913 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301914 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301915 }
1916
1917 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001918
1919 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001920 if (time_after(jiffies,
1921 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001922 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001923 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001924 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001925 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001926 } else {
1927 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1928 ath_debug_stat_tx(sc, txq, bf, ts);
1929 }
Sujithe8324352009-01-16 21:38:42 +05301930
1931 /*
1932 * Return the list of ath_buf of this mpdu to free queue
1933 */
1934 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1935 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1936 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1937}
1938
1939static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001940 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301941{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001942 u16 seq_st = 0;
1943 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301944 int ba_index;
1945 int nbad = 0;
1946 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001947
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001948 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301949 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301950
Sujithcd3d39a2008-08-11 14:03:34 +05301951 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001952 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001953 seq_st = ts->ts_seqnum;
1954 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001955 }
1956
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001957 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301958 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1959 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1960 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001961
Sujithe8324352009-01-16 21:38:42 +05301962 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001963 }
1964
Sujithe8324352009-01-16 21:38:42 +05301965 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001966}
1967
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001968static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301969 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301970{
Sujitha22be222009-03-30 15:28:36 +05301971 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301972 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301973 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001974 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301975 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301976
Sujith95e4acb2009-03-13 08:56:09 +05301977 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001978 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301979
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001980 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301981 WARN_ON(tx_rateindex >= hw->max_rates);
1982
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001983 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301984 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001985 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001986 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301987
Björn Smedmanebd02282010-10-10 22:44:39 +02001988 BUG_ON(nbad > bf->bf_nframes);
1989
1990 tx_info->status.ampdu_len = bf->bf_nframes;
1991 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
1992 }
1993
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001994 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301995 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05301996 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001997 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01001998 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
1999 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002000 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2001 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002002 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
Sujithc4288392008-11-18 09:09:30 +05302003 }
2004 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302005
Felix Fietkau545750d2009-11-23 22:21:01 +01002006 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302007 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002008 tx_info->status.rates[i].idx = -1;
2009 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302010
Felix Fietkau78c46532010-06-25 01:26:16 +02002011 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302012}
2013
Sujith059d8062009-01-16 21:38:49 +05302014static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2015{
2016 int qnum;
2017
Felix Fietkau97923b12010-06-12 00:33:55 -04002018 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2019 if (qnum == -1)
2020 return;
2021
Sujith059d8062009-01-16 21:38:49 +05302022 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002023 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07002024 if (ath_mac80211_start_queue(sc, qnum))
2025 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302026 }
2027 spin_unlock_bh(&txq->axq_lock);
2028}
2029
Sujithc4288392008-11-18 09:09:30 +05302030static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002031{
Sujithcbe61d82009-02-09 13:27:12 +05302032 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002033 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002034 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2035 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302036 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002037 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302038 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002039 int status;
2040
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002041 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2042 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2043 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002044
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002045 for (;;) {
2046 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002047 if (list_empty(&txq->axq_q)) {
2048 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002049 spin_unlock_bh(&txq->axq_lock);
2050 break;
2051 }
2052 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2053
2054 /*
2055 * There is a race condition that a BH gets scheduled
2056 * after sw writes TxE and before hw re-load the last
2057 * descriptor to get the newly chained one.
2058 * Software must keep the last DONE descriptor as a
2059 * holding descriptor - software does so by marking
2060 * it with the STALE flag.
2061 */
2062 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302063 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002064 bf_held = bf;
2065 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302066 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002067 break;
2068 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002069 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302070 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002071 }
2072 }
2073
2074 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302075 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076
Felix Fietkau29bffa92010-03-29 20:14:23 -07002077 memset(&ts, 0, sizeof(ts));
2078 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002079 if (status == -EINPROGRESS) {
2080 spin_unlock_bh(&txq->axq_lock);
2081 break;
2082 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002083
2084 /*
2085 * Remove ath_buf's of the same transmit unit from txq,
2086 * however leave the last descriptor back as the holding
2087 * descriptor for hw.
2088 */
Sujitha119cc42009-03-30 15:28:38 +05302089 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002090 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002091 if (!list_is_singular(&lastbf->list))
2092 list_cut_position(&bf_head,
2093 &txq->axq_q, lastbf->list.prev);
2094
2095 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002096 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002097 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002098 if (bf_held)
2099 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002100 spin_unlock_bh(&txq->axq_lock);
2101
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002102 if (bf_held)
2103 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002104
Sujithcd3d39a2008-08-11 14:03:34 +05302105 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002106 /*
2107 * This frame is sent out as a single frame.
2108 * Use hardware retry status for this frame.
2109 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002110 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302111 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002112 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113 }
Johannes Berge6a98542008-10-21 12:40:02 +02002114
Sujithcd3d39a2008-08-11 14:03:34 +05302115 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002116 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002117 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002118 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002119
Sujith059d8062009-01-16 21:38:49 +05302120 ath_wake_mac80211_queue(sc, txq);
2121
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002122 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302123 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 ath_txq_schedule(sc, txq);
2125 spin_unlock_bh(&txq->axq_lock);
2126 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127}
2128
Sujith305fe472009-07-23 15:32:29 +05302129static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002130{
2131 struct ath_softc *sc = container_of(work, struct ath_softc,
2132 tx_complete_work.work);
2133 struct ath_txq *txq;
2134 int i;
2135 bool needreset = false;
2136
2137 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2138 if (ATH_TXQ_SETUP(sc, i)) {
2139 txq = &sc->tx.txq[i];
2140 spin_lock_bh(&txq->axq_lock);
2141 if (txq->axq_depth) {
2142 if (txq->axq_tx_inprogress) {
2143 needreset = true;
2144 spin_unlock_bh(&txq->axq_lock);
2145 break;
2146 } else {
2147 txq->axq_tx_inprogress = true;
2148 }
2149 }
2150 spin_unlock_bh(&txq->axq_lock);
2151 }
2152
2153 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002154 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2155 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302156 ath9k_ps_wakeup(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002157 ath_reset(sc, false);
Sujith332c5562009-10-09 09:51:28 +05302158 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002159 }
2160
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002161 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002162 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2163}
2164
2165
Sujithe8324352009-01-16 21:38:42 +05302166
2167void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002168{
Sujithe8324352009-01-16 21:38:42 +05302169 int i;
2170 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171
Sujithe8324352009-01-16 21:38:42 +05302172 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002173
2174 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302175 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2176 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177 }
2178}
2179
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002180void ath_tx_edma_tasklet(struct ath_softc *sc)
2181{
2182 struct ath_tx_status txs;
2183 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2184 struct ath_hw *ah = sc->sc_ah;
2185 struct ath_txq *txq;
2186 struct ath_buf *bf, *lastbf;
2187 struct list_head bf_head;
2188 int status;
2189 int txok;
2190
2191 for (;;) {
2192 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2193 if (status == -EINPROGRESS)
2194 break;
2195 if (status == -EIO) {
2196 ath_print(common, ATH_DBG_XMIT,
2197 "Error processing tx status\n");
2198 break;
2199 }
2200
2201 /* Skip beacon completions */
2202 if (txs.qid == sc->beacon.beaconq)
2203 continue;
2204
2205 txq = &sc->tx.txq[txs.qid];
2206
2207 spin_lock_bh(&txq->axq_lock);
2208 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2209 spin_unlock_bh(&txq->axq_lock);
2210 return;
2211 }
2212
2213 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2214 struct ath_buf, list);
2215 lastbf = bf->bf_lastbf;
2216
2217 INIT_LIST_HEAD(&bf_head);
2218 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2219 &lastbf->list);
2220 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2221 txq->axq_depth--;
2222 txq->axq_tx_inprogress = false;
2223 spin_unlock_bh(&txq->axq_lock);
2224
2225 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2226
2227 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002228 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2229 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002230 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002231 }
2232
2233 if (bf_isampdu(bf))
2234 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2235 else
2236 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2237 &txs, txok, 0);
2238
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002239 ath_wake_mac80211_queue(sc, txq);
2240
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002241 spin_lock_bh(&txq->axq_lock);
2242 if (!list_empty(&txq->txq_fifo_pending)) {
2243 INIT_LIST_HEAD(&bf_head);
2244 bf = list_first_entry(&txq->txq_fifo_pending,
2245 struct ath_buf, list);
2246 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2247 &bf->bf_lastbf->list);
2248 ath_tx_txqaddbuf(sc, txq, &bf_head);
2249 } else if (sc->sc_flags & SC_OP_TXAGGR)
2250 ath_txq_schedule(sc, txq);
2251 spin_unlock_bh(&txq->axq_lock);
2252 }
2253}
2254
Sujithe8324352009-01-16 21:38:42 +05302255/*****************/
2256/* Init, Cleanup */
2257/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002258
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002259static int ath_txstatus_setup(struct ath_softc *sc, int size)
2260{
2261 struct ath_descdma *dd = &sc->txsdma;
2262 u8 txs_len = sc->sc_ah->caps.txs_len;
2263
2264 dd->dd_desc_len = size * txs_len;
2265 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2266 &dd->dd_desc_paddr, GFP_KERNEL);
2267 if (!dd->dd_desc)
2268 return -ENOMEM;
2269
2270 return 0;
2271}
2272
2273static int ath_tx_edma_init(struct ath_softc *sc)
2274{
2275 int err;
2276
2277 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2278 if (!err)
2279 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2280 sc->txsdma.dd_desc_paddr,
2281 ATH_TXSTATUS_RING_SIZE);
2282
2283 return err;
2284}
2285
2286static void ath_tx_edma_cleanup(struct ath_softc *sc)
2287{
2288 struct ath_descdma *dd = &sc->txsdma;
2289
2290 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2291 dd->dd_desc_paddr);
2292}
2293
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002294int ath_tx_init(struct ath_softc *sc, int nbufs)
2295{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002296 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002297 int error = 0;
2298
Sujith797fe5cb2009-03-30 15:28:45 +05302299 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002300
Sujith797fe5cb2009-03-30 15:28:45 +05302301 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002302 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302303 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002304 ath_print(common, ATH_DBG_FATAL,
2305 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302306 goto err;
2307 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002308
Sujith797fe5cb2009-03-30 15:28:45 +05302309 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002310 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302311 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002312 ath_print(common, ATH_DBG_FATAL,
2313 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302314 goto err;
2315 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002316
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002317 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2318
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002319 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2320 error = ath_tx_edma_init(sc);
2321 if (error)
2322 goto err;
2323 }
2324
Sujith797fe5cb2009-03-30 15:28:45 +05302325err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002326 if (error != 0)
2327 ath_tx_cleanup(sc);
2328
2329 return error;
2330}
2331
Sujith797fe5cb2009-03-30 15:28:45 +05302332void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002333{
Sujithb77f4832008-12-07 21:44:03 +05302334 if (sc->beacon.bdma.dd_desc_len != 0)
2335 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002336
Sujithb77f4832008-12-07 21:44:03 +05302337 if (sc->tx.txdma.dd_desc_len != 0)
2338 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002339
2340 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2341 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002342}
2343
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002344void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2345{
Sujithc5170162008-10-29 10:13:59 +05302346 struct ath_atx_tid *tid;
2347 struct ath_atx_ac *ac;
2348 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349
Sujith8ee5afb2008-12-07 21:43:36 +05302350 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302351 tidno < WME_NUM_TID;
2352 tidno++, tid++) {
2353 tid->an = an;
2354 tid->tidno = tidno;
2355 tid->seq_start = tid->seq_next = 0;
2356 tid->baw_size = WME_MAX_BA;
2357 tid->baw_head = tid->baw_tail = 0;
2358 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302359 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302360 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302361 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302362 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302363 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302364 tid->state &= ~AGGR_ADDBA_COMPLETE;
2365 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302366 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002367
Sujith8ee5afb2008-12-07 21:43:36 +05302368 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302369 acno < WME_NUM_AC; acno++, ac++) {
2370 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002371 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302372 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373 }
2374}
2375
Sujithb5aa9bf2008-10-29 10:13:31 +05302376void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002377{
Felix Fietkau2b409942010-07-07 19:42:08 +02002378 struct ath_atx_ac *ac;
2379 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380 struct ath_txq *txq;
Felix Fietkau2b409942010-07-07 19:42:08 +02002381 int i, tidno;
Sujithe8324352009-01-16 21:38:42 +05302382
Felix Fietkau2b409942010-07-07 19:42:08 +02002383 for (tidno = 0, tid = &an->tid[tidno];
2384 tidno < WME_NUM_TID; tidno++, tid++) {
2385 i = tid->ac->qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002386
Felix Fietkau2b409942010-07-07 19:42:08 +02002387 if (!ATH_TXQ_SETUP(sc, i))
2388 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389
Felix Fietkau2b409942010-07-07 19:42:08 +02002390 txq = &sc->tx.txq[i];
2391 ac = tid->ac;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
Felix Fietkau2b409942010-07-07 19:42:08 +02002393 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002394
Felix Fietkau2b409942010-07-07 19:42:08 +02002395 if (tid->sched) {
2396 list_del(&tid->list);
2397 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002398 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002399
2400 if (ac->sched) {
2401 list_del(&ac->list);
2402 tid->ac->sched = false;
2403 }
2404
2405 ath_tid_drain(sc, txq, tid);
2406 tid->state &= ~AGGR_ADDBA_COMPLETE;
2407 tid->state &= ~AGGR_CLEANUP;
2408
2409 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410 }
2411}