blob: d077186da870ea2ccc507b168fba81d21ffcac8d [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020064static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
Sujithe8324352009-01-16 21:38:42 +053066
Felix Fietkau545750d2009-11-23 22:21:01 +010067enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020068 MCS_HT20,
69 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010070 MCS_HT40,
71 MCS_HT40_SGI,
72};
73
Felix Fietkau0e668cd2010-04-19 19:57:32 +020074static int ath_max_4ms_framelen[4][32] = {
75 [MCS_HT20] = {
76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
80 },
81 [MCS_HT20_SGI] = {
82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010086 },
87 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020088 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010092 },
93 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020094 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010098 }
99};
100
Sujithe8324352009-01-16 21:38:42 +0530101/*********************/
102/* Aggregation logic */
103/*********************/
104
Sujithe8324352009-01-16 21:38:42 +0530105static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
106{
107 struct ath_atx_ac *ac = tid->ac;
108
109 if (tid->paused)
110 return;
111
112 if (tid->sched)
113 return;
114
115 tid->sched = true;
116 list_add_tail(&tid->list, &ac->tid_q);
117
118 if (ac->sched)
119 return;
120
121 ac->sched = true;
122 list_add_tail(&ac->list, &txq->axq_acq);
123}
124
Sujithe8324352009-01-16 21:38:42 +0530125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
126{
127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
128
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 WARN_ON(!tid->paused);
130
Sujithe8324352009-01-16 21:38:42 +0530131 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200132 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530133
134 if (list_empty(&tid->buf_q))
135 goto unlock;
136
137 ath_tx_queue_tid(txq, tid);
138 ath_txq_schedule(sc, txq);
139unlock:
140 spin_unlock_bh(&txq->axq_lock);
141}
142
143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
144{
145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
146 struct ath_buf *bf;
147 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200148 struct ath_tx_status ts;
149
Sujithe8324352009-01-16 21:38:42 +0530150 INIT_LIST_HEAD(&bf_head);
151
Felix Fietkau90fa5392010-09-20 13:45:38 +0200152 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530153 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530154
155 while (!list_empty(&tid->buf_q)) {
156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530157 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158
159 if (bf_isretried(bf)) {
160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
164 }
Sujithe8324352009-01-16 21:38:42 +0530165 }
166
167 spin_unlock_bh(&txq->axq_lock);
168}
169
170static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
171 int seqno)
172{
173 int index, cindex;
174
175 index = ATH_BA_INDEX(tid->seq_start, seqno);
176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
177
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200178 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530179
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
183 }
184}
185
186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf)
188{
189 int index, cindex;
190
191 if (bf_isretried(bf))
192 return;
193
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200196 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530197
198 if (index >= ((tid->baw_tail - tid->baw_head) &
199 (ATH_TID_MAX_BUFS - 1))) {
200 tid->baw_tail = cindex;
201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
202 }
203}
204
205/*
206 * TODO: For frame(s) that are in the retry state, we will reuse the
207 * sequence number(s) without setting the retry bit. The
208 * alternative is to give up on these and BAR the receiver's window
209 * forward.
210 */
211static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
212 struct ath_atx_tid *tid)
213
214{
215 struct ath_buf *bf;
216 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700217 struct ath_tx_status ts;
218
219 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530220 INIT_LIST_HEAD(&bf_head);
221
222 for (;;) {
223 if (list_empty(&tid->buf_q))
224 break;
Sujithe8324352009-01-16 21:38:42 +0530225
Sujithd43f30152009-01-16 21:38:53 +0530226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
227 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530228
229 if (bf_isretried(bf))
230 ath_tx_update_baw(sc, tid, bf->bf_seqno);
231
232 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530234 spin_lock(&txq->axq_lock);
235 }
236
237 tid->seq_next = tid->seq_start;
238 tid->baw_tail = tid->baw_head;
239}
240
Sujithfec247c2009-07-27 12:08:16 +0530241static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
242 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530243{
244 struct sk_buff *skb;
245 struct ieee80211_hdr *hdr;
246
247 bf->bf_state.bf_type |= BUF_RETRY;
248 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530249 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530250
251 skb = bf->bf_mpdu;
252 hdr = (struct ieee80211_hdr *)skb->data;
253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
254}
255
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200256static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
257{
258 struct ath_buf *bf = NULL;
259
260 spin_lock_bh(&sc->tx.txbuflock);
261
262 if (unlikely(list_empty(&sc->tx.txbuf))) {
263 spin_unlock_bh(&sc->tx.txbuflock);
264 return NULL;
265 }
266
267 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
268 list_del(&bf->list);
269
270 spin_unlock_bh(&sc->tx.txbuflock);
271
272 return bf;
273}
274
275static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
276{
277 spin_lock_bh(&sc->tx.txbuflock);
278 list_add_tail(&bf->list, &sc->tx.txbuf);
279 spin_unlock_bh(&sc->tx.txbuflock);
280}
281
Sujithd43f30152009-01-16 21:38:53 +0530282static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
283{
284 struct ath_buf *tbf;
285
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200286 tbf = ath_tx_get_buffer(sc);
287 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530288 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530289
290 ATH_TXBUF_RESET(tbf);
291
Felix Fietkau827e69b2009-11-15 23:09:25 +0100292 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530293 tbf->bf_mpdu = bf->bf_mpdu;
294 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400295 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530296 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 return tbf;
299}
300
301static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
302 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700303 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530304{
305 struct ath_node *an = NULL;
306 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530307 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800308 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530309 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800310 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530311 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530312 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530313 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530314 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530315 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
317 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200318 struct ieee80211_tx_rate rates[4];
Björn Smedmanebd02282010-10-10 22:44:39 +0200319 int nframes;
Sujithe8324352009-01-16 21:38:42 +0530320
Sujitha22be222009-03-30 15:28:36 +0530321 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530322 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530323
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800324 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100325 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800326
Felix Fietkau78c46532010-06-25 01:26:16 +0200327 memcpy(rates, tx_info->control.rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200328 nframes = bf->bf_nframes;
Felix Fietkau78c46532010-06-25 01:26:16 +0200329
Sujith1286ec62009-01-27 13:30:37 +0530330 rcu_read_lock();
331
Ben Greear686b9cb2010-09-23 09:44:36 -0700332 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530333 if (!sta) {
334 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200335
Felix Fietkau31e79a52010-07-12 23:16:34 +0200336 INIT_LIST_HEAD(&bf_head);
337 while (bf) {
338 bf_next = bf->bf_next;
339
340 bf->bf_state.bf_type |= BUF_XRETRY;
341 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
342 !bf->bf_stale || bf_next != NULL)
343 list_move_tail(&bf->list, &bf_head);
344
Björn Smedmanebd02282010-10-10 22:44:39 +0200345 ath_tx_rc_status(bf, ts, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
347 0, 0);
348
349 bf = bf_next;
350 }
Sujith1286ec62009-01-27 13:30:37 +0530351 return;
Sujithe8324352009-01-16 21:38:42 +0530352 }
353
Sujith1286ec62009-01-27 13:30:37 +0530354 an = (struct ath_node *)sta->drv_priv;
355 tid = ATH_AN_2_TID(an, bf->bf_tidno);
356
Felix Fietkaub11b1602010-07-11 12:48:44 +0200357 /*
358 * The hardware occasionally sends a tx status for the wrong TID.
359 * In this case, the BA status cannot be considered valid and all
360 * subframes need to be retransmitted
361 */
362 if (bf->bf_tidno != ts->tid)
363 txok = false;
364
Sujithe8324352009-01-16 21:38:42 +0530365 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530366 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530367
Sujithd43f30152009-01-16 21:38:53 +0530368 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700369 if (ts->ts_flags & ATH9K_TX_BA) {
370 seq_st = ts->ts_seqnum;
371 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530372 } else {
Sujithd43f30152009-01-16 21:38:53 +0530373 /*
374 * AR5416 can become deaf/mute when BA
375 * issue happens. Chip needs to be reset.
376 * But AP code may have sychronization issues
377 * when perform internal reset in this routine.
378 * Only enable reset in STA mode for now.
379 */
Sujith2660b812009-02-09 13:27:26 +0530380 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530381 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530382 }
383 }
384
385 INIT_LIST_HEAD(&bf_pending);
386 INIT_LIST_HEAD(&bf_head);
387
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700388 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530389 while (bf) {
390 txfail = txpending = 0;
391 bf_next = bf->bf_next;
392
Felix Fietkau78c46532010-06-25 01:26:16 +0200393 skb = bf->bf_mpdu;
394 tx_info = IEEE80211_SKB_CB(skb);
395
Sujithe8324352009-01-16 21:38:42 +0530396 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
397 /* transmit completion, subframe is
398 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530399 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530400 } else if (!isaggr && txok) {
401 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530402 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530403 } else {
Sujithe8324352009-01-16 21:38:42 +0530404 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400405 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530406 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530407 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530408 txpending = 1;
409 } else {
410 bf->bf_state.bf_type |= BUF_XRETRY;
411 txfail = 1;
412 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530413 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530414 }
415 } else {
416 /*
417 * cleanup in progress, just fail
418 * the un-acked sub-frames
419 */
420 txfail = 1;
421 }
422 }
423
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400424 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
425 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530426 /*
427 * Make sure the last desc is reclaimed if it
428 * not a holding desc.
429 */
430 if (!bf_last->bf_stale)
431 list_move_tail(&bf->list, &bf_head);
432 else
433 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530434 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700435 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530436 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530437 }
438
Felix Fietkau90fa5392010-09-20 13:45:38 +0200439 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530440 /*
441 * complete the acked-ones/xretried ones; update
442 * block-ack window
443 */
444 spin_lock_bh(&txq->axq_lock);
445 ath_tx_update_baw(sc, tid, bf->bf_seqno);
446 spin_unlock_bh(&txq->axq_lock);
447
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200449 memcpy(tx_info->control.rates, rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200450 bf->bf_nframes = nframes;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700451 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530452 rc_update = false;
453 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700454 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530455 }
456
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
458 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530459 } else {
Sujithd43f30152009-01-16 21:38:53 +0530460 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400461 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
462 if (bf->bf_next == NULL && bf_last->bf_stale) {
463 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530464
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400465 tbf = ath_clone_txbuf(sc, bf_last);
466 /*
467 * Update tx baw and complete the
468 * frame with failed status if we
469 * run out of tx buf.
470 */
471 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid,
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400476
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400477 bf->bf_state.bf_type |=
478 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad,
480 0, false);
481 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head,
483 ts, 0, 0);
484 break;
485 }
486
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 tbf->bf_desc);
489 list_add_tail(&tbf->list, &bf_head);
490 } else {
491 /*
492 * Clear descriptor status words for
493 * software retry
494 */
495 ath9k_hw_cleartxdesc(sc->sc_ah,
496 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400497 }
Sujithe8324352009-01-16 21:38:42 +0530498 }
499
500 /*
501 * Put this buffer to the temporary pending
502 * queue to retain ordering
503 */
504 list_splice_tail_init(&bf_head, &bf_pending);
505 }
506
507 bf = bf_next;
508 }
509
Felix Fietkau4cee7862010-07-23 03:53:16 +0200510 /* prepend un-acked frames to the beginning of the pending frame queue */
511 if (!list_empty(&bf_pending)) {
512 spin_lock_bh(&txq->axq_lock);
513 list_splice(&bf_pending, &tid->buf_q);
514 ath_tx_queue_tid(txq, tid);
515 spin_unlock_bh(&txq->axq_lock);
516 }
517
Sujithe8324352009-01-16 21:38:42 +0530518 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200519 ath_tx_flush_tid(sc, tid);
520
Sujithe8324352009-01-16 21:38:42 +0530521 if (tid->baw_head == tid->baw_tail) {
522 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530523 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530524 }
Sujithe8324352009-01-16 21:38:42 +0530525 }
526
Sujith1286ec62009-01-27 13:30:37 +0530527 rcu_read_unlock();
528
Sujithe8324352009-01-16 21:38:42 +0530529 if (needreset)
530 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530531}
532
533static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
534 struct ath_atx_tid *tid)
535{
Sujithe8324352009-01-16 21:38:42 +0530536 struct sk_buff *skb;
537 struct ieee80211_tx_info *tx_info;
538 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530539 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530540 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530541 int i;
542
Sujitha22be222009-03-30 15:28:36 +0530543 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530544 tx_info = IEEE80211_SKB_CB(skb);
545 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530546
547 /*
548 * Find the lowest frame length among the rate series that will have a
549 * 4ms transmit duration.
550 * TODO - TXOP limit needs to be considered.
551 */
552 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
553
554 for (i = 0; i < 4; i++) {
555 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100556 int modeidx;
557 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530558 legacy = 1;
559 break;
560 }
561
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200562 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100563 modeidx = MCS_HT40;
564 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200565 modeidx = MCS_HT20;
566
567 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
568 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100569
570 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530571 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530572 }
573 }
574
575 /*
576 * limit aggregate size by the minimum rate if rate selected is
577 * not a probe rate, if rate selected is a probe rate then
578 * avoid aggregation of this packet.
579 */
580 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
581 return 0;
582
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530583 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
584 aggr_limit = min((max_4ms_framelen * 3) / 8,
585 (u32)ATH_AMPDU_LIMIT_MAX);
586 else
587 aggr_limit = min(max_4ms_framelen,
588 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530589
590 /*
591 * h/w can accept aggregates upto 16 bit lengths (65535).
592 * The IE, however can hold upto 65536, which shows up here
593 * as zero. Ignore 65536 since we are constrained by hw.
594 */
Sujith4ef70842009-07-23 15:32:41 +0530595 if (tid->an->maxampdu)
596 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530597
598 return aggr_limit;
599}
600
601/*
Sujithd43f30152009-01-16 21:38:53 +0530602 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530603 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530604 */
605static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
606 struct ath_buf *bf, u16 frmlen)
607{
Sujithe8324352009-01-16 21:38:42 +0530608 struct sk_buff *skb = bf->bf_mpdu;
609 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530610 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530611 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100612 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200613 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530614
615 /* Select standard number of delimiters based on frame length alone */
616 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
617
618 /*
619 * If encryption enabled, hardware requires some more padding between
620 * subframes.
621 * TODO - this could be improved to be dependent on the rate.
622 * The hardware can keep up at lower rates, but not higher rates
623 */
624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
625 ndelim += ATH_AGGR_ENCRYPTDELIM;
626
627 /*
628 * Convert desired mpdu density from microeconds to bytes based
629 * on highest rate in rate series (i.e. first rate) to determine
630 * required minimum length for subframe. Take into account
631 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530632 *
Sujithe8324352009-01-16 21:38:42 +0530633 * If there is no mpdu density restriction, no further calculation
634 * is needed.
635 */
Sujith4ef70842009-07-23 15:32:41 +0530636
637 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530638 return ndelim;
639
640 rix = tx_info->control.rates[0].idx;
641 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530642 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
643 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
644
645 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530646 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530647 else
Sujith4ef70842009-07-23 15:32:41 +0530648 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530649
650 if (nsymbols == 0)
651 nsymbols = 1;
652
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 streams = HT_RC_2_STREAMS(rix);
654 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530655 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
656
Sujithe8324352009-01-16 21:38:42 +0530657 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530658 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
659 ndelim = max(mindelim, ndelim);
660 }
661
662 return ndelim;
663}
664
665static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530666 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530667 struct ath_atx_tid *tid,
668 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530669{
670#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530671 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
672 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530673 u16 aggr_limit = 0, al = 0, bpad = 0,
674 al_delta, h_baw = tid->baw_size / 2;
675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujithe8324352009-01-16 21:38:42 +0530676
677 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
678
679 do {
680 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
681
Sujithd43f30152009-01-16 21:38:53 +0530682 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530683 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
684 status = ATH_AGGR_BAW_CLOSED;
685 break;
686 }
687
688 if (!rl) {
689 aggr_limit = ath_lookup_rate(sc, bf, tid);
690 rl = 1;
691 }
692
Sujithd43f30152009-01-16 21:38:53 +0530693 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530694 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
695
Sujithd43f30152009-01-16 21:38:53 +0530696 if (nframes &&
697 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530698 status = ATH_AGGR_LIMITED;
699 break;
700 }
701
Sujithd43f30152009-01-16 21:38:53 +0530702 /* do not exceed subframe limit */
703 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530704 status = ATH_AGGR_LIMITED;
705 break;
706 }
Sujithd43f30152009-01-16 21:38:53 +0530707 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530708
Sujithd43f30152009-01-16 21:38:53 +0530709 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530710 al += bpad + al_delta;
711
712 /*
713 * Get the delimiters needed to meet the MPDU
714 * density for this node.
715 */
716 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530717 bpad = PADBYTES(al_delta) + (ndelim << 2);
718
719 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400720 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530721
Sujithd43f30152009-01-16 21:38:53 +0530722 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530723 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530724 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
725 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530726 if (bf_prev) {
727 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400728 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
729 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530730 }
731 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530732
Sujithe8324352009-01-16 21:38:42 +0530733 } while (!list_empty(&tid->buf_q));
734
735 bf_first->bf_al = al;
736 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530737
Sujithe8324352009-01-16 21:38:42 +0530738 return status;
739#undef PADBYTES
740}
741
742static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
743 struct ath_atx_tid *tid)
744{
Sujithd43f30152009-01-16 21:38:53 +0530745 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530746 enum ATH_AGGR_STATUS status;
747 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530748
749 do {
750 if (list_empty(&tid->buf_q))
751 return;
752
753 INIT_LIST_HEAD(&bf_q);
754
Sujithfec247c2009-07-27 12:08:16 +0530755 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530756
757 /*
Sujithd43f30152009-01-16 21:38:53 +0530758 * no frames picked up to be aggregated;
759 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530760 */
761 if (list_empty(&bf_q))
762 break;
763
764 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530765 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530766
Sujithd43f30152009-01-16 21:38:53 +0530767 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530768 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530769 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530770 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530771 ath_buf_set_rate(sc, bf);
772 ath_tx_txqaddbuf(sc, txq, &bf_q);
773 continue;
774 }
775
Sujithd43f30152009-01-16 21:38:53 +0530776 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530777 bf->bf_state.bf_type |= BUF_AGGR;
778 ath_buf_set_rate(sc, bf);
779 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
780
Sujithd43f30152009-01-16 21:38:53 +0530781 /* anchor last desc of aggregate */
782 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530783
Sujithe8324352009-01-16 21:38:42 +0530784 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530785 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530786
787 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
788 status != ATH_AGGR_BAW_CLOSED);
789}
790
Felix Fietkau231c3a12010-09-20 19:35:28 +0200791int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
792 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530793{
794 struct ath_atx_tid *txtid;
795 struct ath_node *an;
796
797 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530798 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200799
800 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
801 return -EAGAIN;
802
Sujithf83da962009-07-23 15:32:37 +0530803 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200804 txtid->paused = true;
Sujithf83da962009-07-23 15:32:37 +0530805 *ssn = txtid->seq_start;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200806
807 return 0;
Sujithe8324352009-01-16 21:38:42 +0530808}
809
Sujithf83da962009-07-23 15:32:37 +0530810void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530811{
812 struct ath_node *an = (struct ath_node *)sta->drv_priv;
813 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
814 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Sujithe8324352009-01-16 21:38:42 +0530815
816 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530817 return;
Sujithe8324352009-01-16 21:38:42 +0530818
819 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530820 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530821 return;
Sujithe8324352009-01-16 21:38:42 +0530822 }
823
Sujithe8324352009-01-16 21:38:42 +0530824 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200825 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200826
827 /*
828 * If frames are still being transmitted for this TID, they will be
829 * cleaned up during tx completion. To prevent race conditions, this
830 * TID can only be reused after all in-progress subframes have been
831 * completed.
832 */
833 if (txtid->baw_head != txtid->baw_tail)
834 txtid->state |= AGGR_CLEANUP;
835 else
836 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530837 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530838
Felix Fietkau90fa5392010-09-20 13:45:38 +0200839 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530840}
841
842void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
843{
844 struct ath_atx_tid *txtid;
845 struct ath_node *an;
846
847 an = (struct ath_node *)sta->drv_priv;
848
849 if (sc->sc_flags & SC_OP_TXAGGR) {
850 txtid = ATH_AN_2_TID(an, tid);
851 txtid->baw_size =
852 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
853 txtid->state |= AGGR_ADDBA_COMPLETE;
854 txtid->state &= ~AGGR_ADDBA_PROGRESS;
855 ath_tx_resume_tid(sc, txtid);
856 }
857}
858
Sujithe8324352009-01-16 21:38:42 +0530859/********************/
860/* Queue Management */
861/********************/
862
Sujithe8324352009-01-16 21:38:42 +0530863static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
864 struct ath_txq *txq)
865{
866 struct ath_atx_ac *ac, *ac_tmp;
867 struct ath_atx_tid *tid, *tid_tmp;
868
869 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
870 list_del(&ac->list);
871 ac->sched = false;
872 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
873 list_del(&tid->list);
874 tid->sched = false;
875 ath_tid_drain(sc, txq, tid);
876 }
877 }
878}
879
880struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
881{
Sujithcbe61d82009-02-09 13:27:12 +0530882 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700883 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530884 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400885 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530886
887 memset(&qi, 0, sizeof(qi));
888 qi.tqi_subtype = subtype;
889 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
890 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
891 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
892 qi.tqi_physCompBuf = 0;
893
894 /*
895 * Enable interrupts only for EOL and DESC conditions.
896 * We mark tx descriptors to receive a DESC interrupt
897 * when a tx queue gets deep; otherwise waiting for the
898 * EOL to reap descriptors. Note that this is done to
899 * reduce interrupt load and this only defers reaping
900 * descriptors, never transmitting frames. Aside from
901 * reducing interrupts this also permits more concurrency.
902 * The only potential downside is if the tx queue backs
903 * up in which case the top half of the kernel may backup
904 * due to a lack of tx descriptors.
905 *
906 * The UAPSD queue is an exception, since we take a desc-
907 * based intr on the EOSP frames.
908 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400909 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
910 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
911 TXQ_FLAG_TXERRINT_ENABLE;
912 } else {
913 if (qtype == ATH9K_TX_QUEUE_UAPSD)
914 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
915 else
916 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
917 TXQ_FLAG_TXDESCINT_ENABLE;
918 }
Sujithe8324352009-01-16 21:38:42 +0530919 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
920 if (qnum == -1) {
921 /*
922 * NB: don't print a message, this happens
923 * normally on parts with too few tx queues
924 */
925 return NULL;
926 }
927 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700928 ath_print(common, ATH_DBG_FATAL,
929 "qnum %u out of range, max %u!\n",
930 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530931 ath9k_hw_releasetxqueue(ah, qnum);
932 return NULL;
933 }
934 if (!ATH_TXQ_SETUP(sc, qnum)) {
935 struct ath_txq *txq = &sc->tx.txq[qnum];
936
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400937 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530938 txq->axq_qnum = qnum;
939 txq->axq_link = NULL;
940 INIT_LIST_HEAD(&txq->axq_q);
941 INIT_LIST_HEAD(&txq->axq_acq);
942 spin_lock_init(&txq->axq_lock);
943 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400944 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530945 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400946
947 txq->txq_headidx = txq->txq_tailidx = 0;
948 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
949 INIT_LIST_HEAD(&txq->txq_fifo[i]);
950 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530951 }
952 return &sc->tx.txq[qnum];
953}
954
Sujithe8324352009-01-16 21:38:42 +0530955int ath_txq_update(struct ath_softc *sc, int qnum,
956 struct ath9k_tx_queue_info *qinfo)
957{
Sujithcbe61d82009-02-09 13:27:12 +0530958 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530959 int error = 0;
960 struct ath9k_tx_queue_info qi;
961
962 if (qnum == sc->beacon.beaconq) {
963 /*
964 * XXX: for beacon queue, we just save the parameter.
965 * It will be picked up by ath_beaconq_config when
966 * it's necessary.
967 */
968 sc->beacon.beacon_qi = *qinfo;
969 return 0;
970 }
971
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700972 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530973
974 ath9k_hw_get_txq_props(ah, qnum, &qi);
975 qi.tqi_aifs = qinfo->tqi_aifs;
976 qi.tqi_cwmin = qinfo->tqi_cwmin;
977 qi.tqi_cwmax = qinfo->tqi_cwmax;
978 qi.tqi_burstTime = qinfo->tqi_burstTime;
979 qi.tqi_readyTime = qinfo->tqi_readyTime;
980
981 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700982 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
983 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +0530984 error = -EIO;
985 } else {
986 ath9k_hw_resettxqueue(ah, qnum);
987 }
988
989 return error;
990}
991
992int ath_cabq_update(struct ath_softc *sc)
993{
994 struct ath9k_tx_queue_info qi;
995 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +0530996
997 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
998 /*
999 * Ensure the readytime % is within the bounds.
1000 */
Sujith17d79042009-02-09 13:27:03 +05301001 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1002 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1003 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1004 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301005
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001006 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301007 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301008 ath_txq_update(sc, qnum, &qi);
1009
1010 return 0;
1011}
1012
Sujith043a0402009-01-16 21:38:47 +05301013/*
1014 * Drain a given TX queue (could be Beacon or Data)
1015 *
1016 * This assumes output has been stopped and
1017 * we do not need to block ath_tx_tasklet.
1018 */
1019void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301020{
1021 struct ath_buf *bf, *lastbf;
1022 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001023 struct ath_tx_status ts;
1024
1025 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301026 INIT_LIST_HEAD(&bf_head);
1027
Sujithe8324352009-01-16 21:38:42 +05301028 for (;;) {
1029 spin_lock_bh(&txq->axq_lock);
1030
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001031 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1032 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1033 txq->txq_headidx = txq->txq_tailidx = 0;
1034 spin_unlock_bh(&txq->axq_lock);
1035 break;
1036 } else {
1037 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1038 struct ath_buf, list);
1039 }
1040 } else {
1041 if (list_empty(&txq->axq_q)) {
1042 txq->axq_link = NULL;
1043 spin_unlock_bh(&txq->axq_lock);
1044 break;
1045 }
1046 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1047 list);
Sujithe8324352009-01-16 21:38:42 +05301048
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001049 if (bf->bf_stale) {
1050 list_del(&bf->list);
1051 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301052
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001053 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001054 continue;
1055 }
Sujithe8324352009-01-16 21:38:42 +05301056 }
1057
1058 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001059 if (!retry_tx)
1060 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301061
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001062 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1063 list_cut_position(&bf_head,
1064 &txq->txq_fifo[txq->txq_tailidx],
1065 &lastbf->list);
1066 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1067 } else {
1068 /* remove ath_buf's of the same mpdu from txq */
1069 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1070 }
1071
Sujithe8324352009-01-16 21:38:42 +05301072 txq->axq_depth--;
1073
1074 spin_unlock_bh(&txq->axq_lock);
1075
1076 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001077 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301078 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001079 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301080 }
1081
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001082 spin_lock_bh(&txq->axq_lock);
1083 txq->axq_tx_inprogress = false;
1084 spin_unlock_bh(&txq->axq_lock);
1085
Sujithe8324352009-01-16 21:38:42 +05301086 /* flush any pending frames if aggregation is enabled */
1087 if (sc->sc_flags & SC_OP_TXAGGR) {
1088 if (!retry_tx) {
1089 spin_lock_bh(&txq->axq_lock);
1090 ath_txq_drain_pending_buffers(sc, txq);
1091 spin_unlock_bh(&txq->axq_lock);
1092 }
1093 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001094
1095 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1096 spin_lock_bh(&txq->axq_lock);
1097 while (!list_empty(&txq->txq_fifo_pending)) {
1098 bf = list_first_entry(&txq->txq_fifo_pending,
1099 struct ath_buf, list);
1100 list_cut_position(&bf_head,
1101 &txq->txq_fifo_pending,
1102 &bf->bf_lastbf->list);
1103 spin_unlock_bh(&txq->axq_lock);
1104
1105 if (bf_isampdu(bf))
1106 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1107 &ts, 0);
1108 else
1109 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1110 &ts, 0, 0);
1111 spin_lock_bh(&txq->axq_lock);
1112 }
1113 spin_unlock_bh(&txq->axq_lock);
1114 }
Sujithe8324352009-01-16 21:38:42 +05301115}
1116
Sujith043a0402009-01-16 21:38:47 +05301117void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1118{
Sujithcbe61d82009-02-09 13:27:12 +05301119 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001120 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301121 struct ath_txq *txq;
1122 int i, npend = 0;
1123
1124 if (sc->sc_flags & SC_OP_INVALID)
1125 return;
1126
1127 /* Stop beacon queue */
1128 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1129
1130 /* Stop data queues */
1131 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1132 if (ATH_TXQ_SETUP(sc, i)) {
1133 txq = &sc->tx.txq[i];
1134 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1135 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1136 }
1137 }
1138
1139 if (npend) {
1140 int r;
1141
Sujithe8009e92009-12-14 14:57:08 +05301142 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001143 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301144
1145 spin_lock_bh(&sc->sc_resetlock);
Felix Fietkau20bd2a02010-07-31 00:12:00 +02001146 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
Sujith043a0402009-01-16 21:38:47 +05301147 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001148 ath_print(common, ATH_DBG_FATAL,
1149 "Unable to reset hardware; reset status %d\n",
1150 r);
Sujith043a0402009-01-16 21:38:47 +05301151 spin_unlock_bh(&sc->sc_resetlock);
1152 }
1153
1154 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1155 if (ATH_TXQ_SETUP(sc, i))
1156 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1157 }
1158}
1159
Sujithe8324352009-01-16 21:38:42 +05301160void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1161{
1162 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1163 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1164}
1165
Sujithe8324352009-01-16 21:38:42 +05301166void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1167{
1168 struct ath_atx_ac *ac;
1169 struct ath_atx_tid *tid;
1170
1171 if (list_empty(&txq->axq_acq))
1172 return;
1173
1174 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1175 list_del(&ac->list);
1176 ac->sched = false;
1177
1178 do {
1179 if (list_empty(&ac->tid_q))
1180 return;
1181
1182 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1183 list_del(&tid->list);
1184 tid->sched = false;
1185
1186 if (tid->paused)
1187 continue;
1188
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001189 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301190
1191 /*
1192 * add tid to round-robin queue if more frames
1193 * are pending for the tid
1194 */
1195 if (!list_empty(&tid->buf_q))
1196 ath_tx_queue_tid(txq, tid);
1197
1198 break;
1199 } while (!list_empty(&ac->tid_q));
1200
1201 if (!list_empty(&ac->tid_q)) {
1202 if (!ac->sched) {
1203 ac->sched = true;
1204 list_add_tail(&ac->list, &txq->axq_acq);
1205 }
1206 }
1207}
1208
1209int ath_tx_setup(struct ath_softc *sc, int haltype)
1210{
1211 struct ath_txq *txq;
1212
1213 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001214 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1215 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301216 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1217 return 0;
1218 }
1219 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1220 if (txq != NULL) {
1221 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1222 return 1;
1223 } else
1224 return 0;
1225}
1226
1227/***********/
1228/* TX, DMA */
1229/***********/
1230
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001231/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001232 * Insert a chain of ath_buf (descriptors) on a txq and
1233 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001234 */
Sujith102e0572008-10-29 10:15:16 +05301235static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1236 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001237{
Sujithcbe61d82009-02-09 13:27:12 +05301238 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001239 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001240 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301241
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001242 /*
1243 * Insert the frame on the outbound list and
1244 * pass it on to the hardware.
1245 */
1246
1247 if (list_empty(head))
1248 return;
1249
1250 bf = list_first_entry(head, struct ath_buf, list);
1251
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001252 ath_print(common, ATH_DBG_QUEUE,
1253 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001254
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001255 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1256 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1257 list_splice_tail_init(head, &txq->txq_fifo_pending);
1258 return;
1259 }
1260 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1261 ath_print(common, ATH_DBG_XMIT,
1262 "Initializing tx fifo %d which "
1263 "is non-empty\n",
1264 txq->txq_headidx);
1265 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1266 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1267 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001268 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001269 ath_print(common, ATH_DBG_XMIT,
1270 "TXDP[%u] = %llx (%p)\n",
1271 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001272 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001273 list_splice_tail_init(head, &txq->axq_q);
1274
1275 if (txq->axq_link == NULL) {
1276 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1277 ath_print(common, ATH_DBG_XMIT,
1278 "TXDP[%u] = %llx (%p)\n",
1279 txq->axq_qnum, ito64(bf->bf_daddr),
1280 bf->bf_desc);
1281 } else {
1282 *txq->axq_link = bf->bf_daddr;
1283 ath_print(common, ATH_DBG_XMIT,
1284 "link[%u] (%p)=%llx (%p)\n",
1285 txq->axq_qnum, txq->axq_link,
1286 ito64(bf->bf_daddr), bf->bf_desc);
1287 }
1288 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1289 &txq->axq_link);
1290 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001291 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001292 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001293}
1294
Sujithe8324352009-01-16 21:38:42 +05301295static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1296 struct list_head *bf_head,
1297 struct ath_tx_control *txctl)
1298{
1299 struct ath_buf *bf;
1300
Sujithe8324352009-01-16 21:38:42 +05301301 bf = list_first_entry(bf_head, struct ath_buf, list);
1302 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301303 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301304
1305 /*
1306 * Do not queue to h/w when any of the following conditions is true:
1307 * - there are pending frames in software queue
1308 * - the TID is currently paused for ADDBA/BAR request
1309 * - seqno is not within block-ack window
1310 * - h/w queue depth exceeds low water mark
1311 */
1312 if (!list_empty(&tid->buf_q) || tid->paused ||
1313 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1314 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001315 /*
Sujithe8324352009-01-16 21:38:42 +05301316 * Add this frame to software queue for scheduling later
1317 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001318 */
Sujithd43f30152009-01-16 21:38:53 +05301319 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301320 ath_tx_queue_tid(txctl->txq, tid);
1321 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001322 }
1323
Sujithe8324352009-01-16 21:38:42 +05301324 /* Add sub-frame to BAW */
1325 ath_tx_addto_baw(sc, tid, bf);
1326
1327 /* Queue to h/w without aggregation */
1328 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301329 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301330 ath_buf_set_rate(sc, bf);
1331 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301332}
1333
Sujithc37452b2009-03-09 09:31:57 +05301334static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1335 struct ath_atx_tid *tid,
1336 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001337{
Sujithe8324352009-01-16 21:38:42 +05301338 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001339
Sujithe8324352009-01-16 21:38:42 +05301340 bf = list_first_entry(bf_head, struct ath_buf, list);
1341 bf->bf_state.bf_type &= ~BUF_AMPDU;
1342
1343 /* update starting sequence number for subsequent ADDBA request */
1344 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1345
1346 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301347 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301348 ath_buf_set_rate(sc, bf);
1349 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301350 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001351}
1352
Sujithc37452b2009-03-09 09:31:57 +05301353static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1354 struct list_head *bf_head)
1355{
1356 struct ath_buf *bf;
1357
1358 bf = list_first_entry(bf_head, struct ath_buf, list);
1359
1360 bf->bf_lastbf = bf;
1361 bf->bf_nframes = 1;
1362 ath_buf_set_rate(sc, bf);
1363 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301364 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301365}
1366
Sujith528f0c62008-10-29 10:14:26 +05301367static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001368{
Sujith528f0c62008-10-29 10:14:26 +05301369 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001370 enum ath9k_pkt_type htype;
1371 __le16 fc;
1372
Sujith528f0c62008-10-29 10:14:26 +05301373 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001374 fc = hdr->frame_control;
1375
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001376 if (ieee80211_is_beacon(fc))
1377 htype = ATH9K_PKT_TYPE_BEACON;
1378 else if (ieee80211_is_probe_resp(fc))
1379 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1380 else if (ieee80211_is_atim(fc))
1381 htype = ATH9K_PKT_TYPE_ATIM;
1382 else if (ieee80211_is_pspoll(fc))
1383 htype = ATH9K_PKT_TYPE_PSPOLL;
1384 else
1385 htype = ATH9K_PKT_TYPE_NORMAL;
1386
1387 return htype;
1388}
1389
Sujith528f0c62008-10-29 10:14:26 +05301390static void assign_aggr_tid_seqno(struct sk_buff *skb,
1391 struct ath_buf *bf)
1392{
1393 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1394 struct ieee80211_hdr *hdr;
1395 struct ath_node *an;
1396 struct ath_atx_tid *tid;
1397 __le16 fc;
1398 u8 *qc;
1399
1400 if (!tx_info->control.sta)
1401 return;
1402
1403 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1404 hdr = (struct ieee80211_hdr *)skb->data;
1405 fc = hdr->frame_control;
1406
Sujith528f0c62008-10-29 10:14:26 +05301407 if (ieee80211_is_data_qos(fc)) {
1408 qc = ieee80211_get_qos_ctl(hdr);
1409 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301410 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001411
Sujithe8324352009-01-16 21:38:42 +05301412 /*
1413 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301414 * We also override seqno set by upper layer with the one
1415 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301416 */
1417 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301418 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301419 bf->bf_seqno = tid->seq_next;
1420 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301421}
1422
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001423static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301424{
1425 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1426 int flags = 0;
1427
1428 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1429 flags |= ATH9K_TXDESC_INTREQ;
1430
1431 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1432 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301433
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001434 if (use_ldpc)
1435 flags |= ATH9K_TXDESC_LDPC;
1436
Sujith528f0c62008-10-29 10:14:26 +05301437 return flags;
1438}
1439
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001440/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001441 * rix - rate index
1442 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1443 * width - 0 for 20 MHz, 1 for 40 MHz
1444 * half_gi - to use 4us v/s 3.6 us for symbol time
1445 */
Sujith102e0572008-10-29 10:15:16 +05301446static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1447 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001448{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001449 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001450 int streams, pktlen;
1451
Sujithcd3d39a2008-08-11 14:03:34 +05301452 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301453
1454 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001455 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001456 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001457 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001458 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1459
1460 if (!half_gi)
1461 duration = SYMBOL_TIME(nsymbols);
1462 else
1463 duration = SYMBOL_TIME_HALFGI(nsymbols);
1464
Sujithe63835b2008-11-18 09:07:53 +05301465 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001466 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301467
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001468 return duration;
1469}
1470
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001471static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1472{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001473 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001474 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301475 struct sk_buff *skb;
1476 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301477 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001478 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301479 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301480 int i, flags = 0;
1481 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301482 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301483
1484 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301485
Sujitha22be222009-03-30 15:28:36 +05301486 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301487 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301488 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301489 hdr = (struct ieee80211_hdr *)skb->data;
1490 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301491
Sujithc89424d2009-01-30 14:29:28 +05301492 /*
1493 * We check if Short Preamble is needed for the CTS rate by
1494 * checking the BSS's global flag.
1495 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1496 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001497 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1498 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301499 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001500 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001501
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001502 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001503 bool is_40, is_sgi, is_sp;
1504 int phy;
1505
Sujithe63835b2008-11-18 09:07:53 +05301506 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001507 continue;
1508
Sujitha8efee42008-11-18 09:07:30 +05301509 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301510 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001511 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001512
Felix Fietkau27032052010-01-17 21:08:50 +01001513 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1514 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301515 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001516 flags |= ATH9K_TXDESC_RTSENA;
1517 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1518 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1519 flags |= ATH9K_TXDESC_CTSENA;
1520 }
1521
Sujithc89424d2009-01-30 14:29:28 +05301522 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1523 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1524 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1525 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001526
Felix Fietkau545750d2009-11-23 22:21:01 +01001527 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1528 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1529 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1530
1531 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1532 /* MCS rates */
1533 series[i].Rate = rix | 0x80;
1534 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1535 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001536 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1537 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001538 continue;
1539 }
1540
1541 /* legcay rates */
1542 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1543 !(rate->flags & IEEE80211_RATE_ERP_G))
1544 phy = WLAN_RC_PHY_CCK;
1545 else
1546 phy = WLAN_RC_PHY_OFDM;
1547
1548 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1549 series[i].Rate = rate->hw_value;
1550 if (rate->hw_value_short) {
1551 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1552 series[i].Rate |= rate->hw_value_short;
1553 } else {
1554 is_sp = false;
1555 }
1556
1557 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1558 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001559 }
1560
Felix Fietkau27032052010-01-17 21:08:50 +01001561 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1562 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1563 flags &= ~ATH9K_TXDESC_RTSENA;
1564
1565 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1566 if (flags & ATH9K_TXDESC_RTSENA)
1567 flags &= ~ATH9K_TXDESC_CTSENA;
1568
Sujithe63835b2008-11-18 09:07:53 +05301569 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301570 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1571 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301572 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301573 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301574
Sujith17d79042009-02-09 13:27:03 +05301575 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301576 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001577}
1578
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001579static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301580 struct sk_buff *skb,
1581 struct ath_tx_control *txctl)
1582{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001583 struct ath_wiphy *aphy = hw->priv;
1584 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301585 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1586 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301587 int hdrlen;
1588 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001589 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001590 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301591
Felix Fietkau827e69b2009-11-15 23:09:25 +01001592 tx_info->pad[0] = 0;
1593 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001594 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001595 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001596 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001597 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1598 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001599 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001600 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1601 break;
1602 }
Sujithe8324352009-01-16 21:38:42 +05301603 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1604 fc = hdr->frame_control;
1605
1606 ATH_TXBUF_RESET(bf);
1607
Felix Fietkau827e69b2009-11-15 23:09:25 +01001608 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001609 bf->bf_frmlen = skb->len + FCS_LEN;
1610 /* Remove the padding size from bf_frmlen, if any */
1611 padpos = ath9k_cmn_padpos(hdr->frame_control);
1612 padsize = padpos & 3;
1613 if (padsize && skb->len>padpos+padsize) {
1614 bf->bf_frmlen -= padsize;
1615 }
Sujithe8324352009-01-16 21:38:42 +05301616
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001617 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301618 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001619 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1620 use_ldpc = true;
1621 }
Sujithe8324352009-01-16 21:38:42 +05301622
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001623 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001624 if (txctl->paprd)
1625 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001626 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301627
Luis R. Rodriguezc17512d2010-08-05 17:56:54 -04001628 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301629 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1630 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1631 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1632 } else {
1633 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1634 }
1635
Sujith17b182e2009-12-14 14:56:56 +05301636 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1637 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301638 assign_aggr_tid_seqno(skb, bf);
1639
1640 bf->bf_mpdu = skb;
1641
Ben Greearc1739eb2010-10-14 12:45:29 -07001642 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1643 skb->len, DMA_TO_DEVICE);
1644 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301645 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001646 bf->bf_buf_addr = 0;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001647 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1648 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301649 return -ENOMEM;
1650 }
1651
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001652 bf->bf_tx_aborted = false;
1653
Sujithe8324352009-01-16 21:38:42 +05301654 return 0;
1655}
1656
1657/* FIXME: tx power */
1658static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1659 struct ath_tx_control *txctl)
1660{
Sujitha22be222009-03-30 15:28:36 +05301661 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301662 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301663 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301664 struct ath_node *an = NULL;
1665 struct list_head bf_head;
1666 struct ath_desc *ds;
1667 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301668 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301669 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301670 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301671
1672 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301673 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301674
1675 INIT_LIST_HEAD(&bf_head);
1676 list_add_tail(&bf->list, &bf_head);
1677
1678 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001679 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301680
1681 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1682 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1683
1684 ath9k_hw_filltxdesc(ah, ds,
1685 skb->len, /* segment length */
1686 true, /* first segment */
1687 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001688 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001689 bf->bf_buf_addr,
1690 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301691
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001692 if (bf->bf_state.bfs_paprd)
1693 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1694
Sujithe8324352009-01-16 21:38:42 +05301695 spin_lock_bh(&txctl->txq->axq_lock);
1696
1697 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1698 tx_info->control.sta) {
1699 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1700 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1701
Sujithc37452b2009-03-09 09:31:57 +05301702 if (!ieee80211_is_data_qos(fc)) {
1703 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1704 goto tx_done;
1705 }
1706
Felix Fietkau4fdec032010-03-12 04:02:43 +01001707 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301708 /*
1709 * Try aggregation if it's a unicast data frame
1710 * and the destination is HT capable.
1711 */
1712 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1713 } else {
1714 /*
1715 * Send this frame as regular when ADDBA
1716 * exchange is neither complete nor pending.
1717 */
Sujithc37452b2009-03-09 09:31:57 +05301718 ath_tx_send_ht_normal(sc, txctl->txq,
1719 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301720 }
1721 } else {
Sujithc37452b2009-03-09 09:31:57 +05301722 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301723 }
1724
Sujithc37452b2009-03-09 09:31:57 +05301725tx_done:
Sujithe8324352009-01-16 21:38:42 +05301726 spin_unlock_bh(&txctl->txq->axq_lock);
1727}
1728
1729/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001730int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301731 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001732{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001733 struct ath_wiphy *aphy = hw->priv;
1734 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001735 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001736 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001737 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001738 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001739
Sujithe8324352009-01-16 21:38:42 +05301740 bf = ath_tx_get_buffer(sc);
1741 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001742 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301743 return -1;
1744 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001745
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001746 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301747 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001748 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001749
Sujithe8324352009-01-16 21:38:42 +05301750 /* upon ath_tx_processq() this TX queue will be resumed, we
1751 * guarantee this will happen by knowing beforehand that
1752 * we will at least have to run TX completionon one buffer
1753 * on the queue */
1754 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001755 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001756 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301757 txq->stopped = 1;
1758 }
1759 spin_unlock_bh(&txq->axq_lock);
1760
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001761 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301762
1763 return r;
1764 }
1765
Felix Fietkau97923b12010-06-12 00:33:55 -04001766 q = skb_get_queue_mapping(skb);
1767 if (q >= 4)
1768 q = 0;
1769
1770 spin_lock_bh(&txq->axq_lock);
1771 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1772 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1773 txq->stopped = 1;
1774 }
1775 spin_unlock_bh(&txq->axq_lock);
1776
Sujithe8324352009-01-16 21:38:42 +05301777 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001778
1779 return 0;
1780}
1781
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001782void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001783{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001784 struct ath_wiphy *aphy = hw->priv;
1785 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001786 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001787 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1788 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301789 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1790 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001791
Sujithe8324352009-01-16 21:38:42 +05301792 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001793
Sujithe8324352009-01-16 21:38:42 +05301794 /*
1795 * As a temporary workaround, assign seq# here; this will likely need
1796 * to be cleaned up to work better with Beacon transmission and virtual
1797 * BSSes.
1798 */
1799 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301800 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1801 sc->tx.seq_no += 0x10;
1802 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1803 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001804 }
1805
Sujithe8324352009-01-16 21:38:42 +05301806 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001807 padpos = ath9k_cmn_padpos(hdr->frame_control);
1808 padsize = padpos & 3;
1809 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301810 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001811 ath_print(common, ATH_DBG_XMIT,
1812 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301813 dev_kfree_skb_any(skb);
1814 return;
1815 }
1816 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001817 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001818 }
1819
Sujithe8324352009-01-16 21:38:42 +05301820 txctl.txq = sc->beacon.cabq;
1821
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001822 ath_print(common, ATH_DBG_XMIT,
1823 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301824
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001825 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001826 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301827 goto exit;
1828 }
1829
1830 return;
1831exit:
1832 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001833}
1834
Sujithe8324352009-01-16 21:38:42 +05301835/*****************/
1836/* TX Completion */
1837/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001838
Sujithe8324352009-01-16 21:38:42 +05301839static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001840 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001841{
Sujithe8324352009-01-16 21:38:42 +05301842 struct ieee80211_hw *hw = sc->hw;
1843 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001844 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001845 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001846 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301847
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001848 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301849
Felix Fietkau827e69b2009-11-15 23:09:25 +01001850 if (aphy)
1851 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301852
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301853 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301854 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301855
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301856 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301857 /* Frame was ACKed */
1858 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1859 }
1860
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001861 padpos = ath9k_cmn_padpos(hdr->frame_control);
1862 padsize = padpos & 3;
1863 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301864 /*
1865 * Remove MAC header padding before giving the frame back to
1866 * mac80211.
1867 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001868 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301869 skb_pull(skb, padsize);
1870 }
1871
Sujith1b04b932010-01-08 10:36:05 +05301872 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1873 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001874 ath_print(common, ATH_DBG_PS,
1875 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001876 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301877 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1878 PS_WAIT_FOR_CAB |
1879 PS_WAIT_FOR_PSPOLL_DATA |
1880 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001881 }
1882
Felix Fietkau827e69b2009-11-15 23:09:25 +01001883 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001884 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001885 else {
1886 q = skb_get_queue_mapping(skb);
1887 if (q >= 4)
1888 q = 0;
1889
1890 if (--sc->tx.pending_frames[q] < 0)
1891 sc->tx.pending_frames[q] = 0;
1892
Felix Fietkau827e69b2009-11-15 23:09:25 +01001893 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001894 }
Sujithe8324352009-01-16 21:38:42 +05301895}
1896
1897static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001898 struct ath_txq *txq, struct list_head *bf_q,
1899 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301900{
1901 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301902 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301903 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301904
Sujithe8324352009-01-16 21:38:42 +05301905 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301906 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301907
1908 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301909 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301910
1911 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301912 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301913 }
1914
Ben Greearc1739eb2010-10-14 12:45:29 -07001915 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001916 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001917
1918 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001919 if (time_after(jiffies,
1920 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001921 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001922 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001923 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001924 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001925 } else {
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001926 ath_debug_stat_tx(sc, txq, bf, ts);
Ben Greearc23cc812010-10-13 12:01:23 -07001927 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001928 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001929 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1930 * accidentally reference it later.
1931 */
1932 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301933
1934 /*
1935 * Return the list of ath_buf of this mpdu to free queue
1936 */
1937 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1938 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1939 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1940}
1941
1942static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001943 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301944{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001945 u16 seq_st = 0;
1946 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301947 int ba_index;
1948 int nbad = 0;
1949 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001950
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001951 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301952 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301953
Sujithcd3d39a2008-08-11 14:03:34 +05301954 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001955 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001956 seq_st = ts->ts_seqnum;
1957 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001958 }
1959
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001960 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301961 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1962 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1963 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001964
Sujithe8324352009-01-16 21:38:42 +05301965 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001966 }
1967
Sujithe8324352009-01-16 21:38:42 +05301968 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001969}
1970
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001971static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301972 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301973{
Sujitha22be222009-03-30 15:28:36 +05301974 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301975 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301976 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001977 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301978 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301979
Sujith95e4acb2009-03-13 08:56:09 +05301980 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001981 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301982
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001983 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301984 WARN_ON(tx_rateindex >= hw->max_rates);
1985
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001986 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301987 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001988 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001989 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301990
Björn Smedmanebd02282010-10-10 22:44:39 +02001991 BUG_ON(nbad > bf->bf_nframes);
1992
1993 tx_info->status.ampdu_len = bf->bf_nframes;
1994 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
1995 }
1996
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001997 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301998 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05301999 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002000 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01002001 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2002 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002003 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2004 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002005 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
Sujithc4288392008-11-18 09:09:30 +05302006 }
2007 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302008
Felix Fietkau545750d2009-11-23 22:21:01 +01002009 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302010 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002011 tx_info->status.rates[i].idx = -1;
2012 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302013
Felix Fietkau78c46532010-06-25 01:26:16 +02002014 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302015}
2016
Sujith059d8062009-01-16 21:38:49 +05302017static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2018{
2019 int qnum;
2020
Felix Fietkau97923b12010-06-12 00:33:55 -04002021 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2022 if (qnum == -1)
2023 return;
2024
Sujith059d8062009-01-16 21:38:49 +05302025 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002026 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07002027 if (ath_mac80211_start_queue(sc, qnum))
2028 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302029 }
2030 spin_unlock_bh(&txq->axq_lock);
2031}
2032
Sujithc4288392008-11-18 09:09:30 +05302033static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002034{
Sujithcbe61d82009-02-09 13:27:12 +05302035 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002036 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002037 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2038 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302039 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002040 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302041 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002042 int status;
2043
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002044 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2045 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2046 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002047
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002048 for (;;) {
2049 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002050 if (list_empty(&txq->axq_q)) {
2051 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002052 spin_unlock_bh(&txq->axq_lock);
2053 break;
2054 }
2055 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2056
2057 /*
2058 * There is a race condition that a BH gets scheduled
2059 * after sw writes TxE and before hw re-load the last
2060 * descriptor to get the newly chained one.
2061 * Software must keep the last DONE descriptor as a
2062 * holding descriptor - software does so by marking
2063 * it with the STALE flag.
2064 */
2065 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302066 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002067 bf_held = bf;
2068 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302069 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002070 break;
2071 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002072 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302073 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002074 }
2075 }
2076
2077 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302078 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002079
Felix Fietkau29bffa92010-03-29 20:14:23 -07002080 memset(&ts, 0, sizeof(ts));
2081 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002082 if (status == -EINPROGRESS) {
2083 spin_unlock_bh(&txq->axq_lock);
2084 break;
2085 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086
2087 /*
2088 * Remove ath_buf's of the same transmit unit from txq,
2089 * however leave the last descriptor back as the holding
2090 * descriptor for hw.
2091 */
Sujitha119cc42009-03-30 15:28:38 +05302092 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002093 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002094 if (!list_is_singular(&lastbf->list))
2095 list_cut_position(&bf_head,
2096 &txq->axq_q, lastbf->list.prev);
2097
2098 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002099 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002100 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002101 if (bf_held)
2102 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002103 spin_unlock_bh(&txq->axq_lock);
2104
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002105 if (bf_held)
2106 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002107
Sujithcd3d39a2008-08-11 14:03:34 +05302108 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109 /*
2110 * This frame is sent out as a single frame.
2111 * Use hardware retry status for this frame.
2112 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002113 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302114 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002115 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 }
Johannes Berge6a98542008-10-21 12:40:02 +02002117
Sujithcd3d39a2008-08-11 14:03:34 +05302118 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002119 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002121 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002122
Sujith059d8062009-01-16 21:38:49 +05302123 ath_wake_mac80211_queue(sc, txq);
2124
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002125 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302126 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 ath_txq_schedule(sc, txq);
2128 spin_unlock_bh(&txq->axq_lock);
2129 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002130}
2131
Sujith305fe472009-07-23 15:32:29 +05302132static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002133{
2134 struct ath_softc *sc = container_of(work, struct ath_softc,
2135 tx_complete_work.work);
2136 struct ath_txq *txq;
2137 int i;
2138 bool needreset = false;
2139
2140 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2141 if (ATH_TXQ_SETUP(sc, i)) {
2142 txq = &sc->tx.txq[i];
2143 spin_lock_bh(&txq->axq_lock);
2144 if (txq->axq_depth) {
2145 if (txq->axq_tx_inprogress) {
2146 needreset = true;
2147 spin_unlock_bh(&txq->axq_lock);
2148 break;
2149 } else {
2150 txq->axq_tx_inprogress = true;
2151 }
2152 }
2153 spin_unlock_bh(&txq->axq_lock);
2154 }
2155
2156 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002157 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2158 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302159 ath9k_ps_wakeup(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002160 ath_reset(sc, false);
Sujith332c5562009-10-09 09:51:28 +05302161 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002162 }
2163
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002164 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002165 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2166}
2167
2168
Sujithe8324352009-01-16 21:38:42 +05302169
2170void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171{
Sujithe8324352009-01-16 21:38:42 +05302172 int i;
2173 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174
Sujithe8324352009-01-16 21:38:42 +05302175 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002176
2177 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302178 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2179 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002180 }
2181}
2182
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002183void ath_tx_edma_tasklet(struct ath_softc *sc)
2184{
2185 struct ath_tx_status txs;
2186 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2187 struct ath_hw *ah = sc->sc_ah;
2188 struct ath_txq *txq;
2189 struct ath_buf *bf, *lastbf;
2190 struct list_head bf_head;
2191 int status;
2192 int txok;
2193
2194 for (;;) {
2195 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2196 if (status == -EINPROGRESS)
2197 break;
2198 if (status == -EIO) {
2199 ath_print(common, ATH_DBG_XMIT,
2200 "Error processing tx status\n");
2201 break;
2202 }
2203
2204 /* Skip beacon completions */
2205 if (txs.qid == sc->beacon.beaconq)
2206 continue;
2207
2208 txq = &sc->tx.txq[txs.qid];
2209
2210 spin_lock_bh(&txq->axq_lock);
2211 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2212 spin_unlock_bh(&txq->axq_lock);
2213 return;
2214 }
2215
2216 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2217 struct ath_buf, list);
2218 lastbf = bf->bf_lastbf;
2219
2220 INIT_LIST_HEAD(&bf_head);
2221 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2222 &lastbf->list);
2223 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2224 txq->axq_depth--;
2225 txq->axq_tx_inprogress = false;
2226 spin_unlock_bh(&txq->axq_lock);
2227
2228 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2229
2230 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002231 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2232 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002233 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002234 }
2235
2236 if (bf_isampdu(bf))
2237 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2238 else
2239 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2240 &txs, txok, 0);
2241
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002242 ath_wake_mac80211_queue(sc, txq);
2243
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002244 spin_lock_bh(&txq->axq_lock);
2245 if (!list_empty(&txq->txq_fifo_pending)) {
2246 INIT_LIST_HEAD(&bf_head);
2247 bf = list_first_entry(&txq->txq_fifo_pending,
2248 struct ath_buf, list);
2249 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2250 &bf->bf_lastbf->list);
2251 ath_tx_txqaddbuf(sc, txq, &bf_head);
2252 } else if (sc->sc_flags & SC_OP_TXAGGR)
2253 ath_txq_schedule(sc, txq);
2254 spin_unlock_bh(&txq->axq_lock);
2255 }
2256}
2257
Sujithe8324352009-01-16 21:38:42 +05302258/*****************/
2259/* Init, Cleanup */
2260/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002261
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002262static int ath_txstatus_setup(struct ath_softc *sc, int size)
2263{
2264 struct ath_descdma *dd = &sc->txsdma;
2265 u8 txs_len = sc->sc_ah->caps.txs_len;
2266
2267 dd->dd_desc_len = size * txs_len;
2268 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2269 &dd->dd_desc_paddr, GFP_KERNEL);
2270 if (!dd->dd_desc)
2271 return -ENOMEM;
2272
2273 return 0;
2274}
2275
2276static int ath_tx_edma_init(struct ath_softc *sc)
2277{
2278 int err;
2279
2280 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2281 if (!err)
2282 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2283 sc->txsdma.dd_desc_paddr,
2284 ATH_TXSTATUS_RING_SIZE);
2285
2286 return err;
2287}
2288
2289static void ath_tx_edma_cleanup(struct ath_softc *sc)
2290{
2291 struct ath_descdma *dd = &sc->txsdma;
2292
2293 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2294 dd->dd_desc_paddr);
2295}
2296
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002297int ath_tx_init(struct ath_softc *sc, int nbufs)
2298{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002299 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002300 int error = 0;
2301
Sujith797fe5c2009-03-30 15:28:45 +05302302 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002303
Sujith797fe5c2009-03-30 15:28:45 +05302304 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002305 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302306 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002307 ath_print(common, ATH_DBG_FATAL,
2308 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302309 goto err;
2310 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002311
Sujith797fe5c2009-03-30 15:28:45 +05302312 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002313 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302314 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002315 ath_print(common, ATH_DBG_FATAL,
2316 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302317 goto err;
2318 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002319
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002320 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2321
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002322 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2323 error = ath_tx_edma_init(sc);
2324 if (error)
2325 goto err;
2326 }
2327
Sujith797fe5c2009-03-30 15:28:45 +05302328err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002329 if (error != 0)
2330 ath_tx_cleanup(sc);
2331
2332 return error;
2333}
2334
Sujith797fe5c2009-03-30 15:28:45 +05302335void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002336{
Sujithb77f4832008-12-07 21:44:03 +05302337 if (sc->beacon.bdma.dd_desc_len != 0)
2338 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002339
Sujithb77f4832008-12-07 21:44:03 +05302340 if (sc->tx.txdma.dd_desc_len != 0)
2341 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002342
2343 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2344 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002345}
2346
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002347void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2348{
Sujithc5170162008-10-29 10:13:59 +05302349 struct ath_atx_tid *tid;
2350 struct ath_atx_ac *ac;
2351 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352
Sujith8ee5afb2008-12-07 21:43:36 +05302353 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302354 tidno < WME_NUM_TID;
2355 tidno++, tid++) {
2356 tid->an = an;
2357 tid->tidno = tidno;
2358 tid->seq_start = tid->seq_next = 0;
2359 tid->baw_size = WME_MAX_BA;
2360 tid->baw_head = tid->baw_tail = 0;
2361 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302362 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302363 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302364 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302365 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302366 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302367 tid->state &= ~AGGR_ADDBA_COMPLETE;
2368 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302369 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002370
Sujith8ee5afb2008-12-07 21:43:36 +05302371 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302372 acno < WME_NUM_AC; acno++, ac++) {
2373 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002374 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302375 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376 }
2377}
2378
Sujithb5aa9bf2008-10-29 10:13:31 +05302379void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380{
Felix Fietkau2b409942010-07-07 19:42:08 +02002381 struct ath_atx_ac *ac;
2382 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002383 struct ath_txq *txq;
Felix Fietkau2b409942010-07-07 19:42:08 +02002384 int i, tidno;
Sujithe8324352009-01-16 21:38:42 +05302385
Felix Fietkau2b409942010-07-07 19:42:08 +02002386 for (tidno = 0, tid = &an->tid[tidno];
2387 tidno < WME_NUM_TID; tidno++, tid++) {
2388 i = tid->ac->qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389
Felix Fietkau2b409942010-07-07 19:42:08 +02002390 if (!ATH_TXQ_SETUP(sc, i))
2391 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
Felix Fietkau2b409942010-07-07 19:42:08 +02002393 txq = &sc->tx.txq[i];
2394 ac = tid->ac;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002395
Felix Fietkau2b409942010-07-07 19:42:08 +02002396 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397
Felix Fietkau2b409942010-07-07 19:42:08 +02002398 if (tid->sched) {
2399 list_del(&tid->list);
2400 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002402
2403 if (ac->sched) {
2404 list_del(&ac->list);
2405 tid->ac->sched = false;
2406 }
2407
2408 ath_tid_drain(sc, txq, tid);
2409 tid->state &= ~AGGR_ADDBA_COMPLETE;
2410 tid->state &= ~AGGR_CLEANUP;
2411
2412 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002413 }
2414}