blob: aff04789f794ee805bf50f1f13d28783f79851d3 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020064static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
Sujithe8324352009-01-16 21:38:42 +053066
Felix Fietkau545750d2009-11-23 22:21:01 +010067enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020068 MCS_HT20,
69 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010070 MCS_HT40,
71 MCS_HT40_SGI,
72};
73
Felix Fietkau0e668cd2010-04-19 19:57:32 +020074static int ath_max_4ms_framelen[4][32] = {
75 [MCS_HT20] = {
76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
80 },
81 [MCS_HT20_SGI] = {
82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010086 },
87 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020088 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010092 },
93 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020094 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010098 }
99};
100
Sujithe8324352009-01-16 21:38:42 +0530101/*********************/
102/* Aggregation logic */
103/*********************/
104
Sujithe8324352009-01-16 21:38:42 +0530105static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
106{
107 struct ath_atx_ac *ac = tid->ac;
108
109 if (tid->paused)
110 return;
111
112 if (tid->sched)
113 return;
114
115 tid->sched = true;
116 list_add_tail(&tid->list, &ac->tid_q);
117
118 if (ac->sched)
119 return;
120
121 ac->sched = true;
122 list_add_tail(&ac->list, &txq->axq_acq);
123}
124
Sujithe8324352009-01-16 21:38:42 +0530125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
126{
127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
128
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 WARN_ON(!tid->paused);
130
Sujithe8324352009-01-16 21:38:42 +0530131 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200132 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530133
134 if (list_empty(&tid->buf_q))
135 goto unlock;
136
137 ath_tx_queue_tid(txq, tid);
138 ath_txq_schedule(sc, txq);
139unlock:
140 spin_unlock_bh(&txq->axq_lock);
141}
142
143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
144{
145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
146 struct ath_buf *bf;
147 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200148 struct ath_tx_status ts;
149
Sujithe8324352009-01-16 21:38:42 +0530150 INIT_LIST_HEAD(&bf_head);
151
Felix Fietkau90fa5392010-09-20 13:45:38 +0200152 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530153 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530154
155 while (!list_empty(&tid->buf_q)) {
156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530157 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158
159 if (bf_isretried(bf)) {
160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
164 }
Sujithe8324352009-01-16 21:38:42 +0530165 }
166
167 spin_unlock_bh(&txq->axq_lock);
168}
169
170static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
171 int seqno)
172{
173 int index, cindex;
174
175 index = ATH_BA_INDEX(tid->seq_start, seqno);
176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
177
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200178 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530179
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
183 }
184}
185
186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf)
188{
189 int index, cindex;
190
191 if (bf_isretried(bf))
192 return;
193
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200196 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530197
198 if (index >= ((tid->baw_tail - tid->baw_head) &
199 (ATH_TID_MAX_BUFS - 1))) {
200 tid->baw_tail = cindex;
201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
202 }
203}
204
205/*
206 * TODO: For frame(s) that are in the retry state, we will reuse the
207 * sequence number(s) without setting the retry bit. The
208 * alternative is to give up on these and BAR the receiver's window
209 * forward.
210 */
211static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
212 struct ath_atx_tid *tid)
213
214{
215 struct ath_buf *bf;
216 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700217 struct ath_tx_status ts;
218
219 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530220 INIT_LIST_HEAD(&bf_head);
221
222 for (;;) {
223 if (list_empty(&tid->buf_q))
224 break;
Sujithe8324352009-01-16 21:38:42 +0530225
Sujithd43f30152009-01-16 21:38:53 +0530226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
227 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530228
229 if (bf_isretried(bf))
230 ath_tx_update_baw(sc, tid, bf->bf_seqno);
231
232 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530234 spin_lock(&txq->axq_lock);
235 }
236
237 tid->seq_next = tid->seq_start;
238 tid->baw_tail = tid->baw_head;
239}
240
Sujithfec247c2009-07-27 12:08:16 +0530241static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
242 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530243{
244 struct sk_buff *skb;
245 struct ieee80211_hdr *hdr;
246
247 bf->bf_state.bf_type |= BUF_RETRY;
248 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530249 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530250
251 skb = bf->bf_mpdu;
252 hdr = (struct ieee80211_hdr *)skb->data;
253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
254}
255
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200256static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
257{
258 struct ath_buf *bf = NULL;
259
260 spin_lock_bh(&sc->tx.txbuflock);
261
262 if (unlikely(list_empty(&sc->tx.txbuf))) {
263 spin_unlock_bh(&sc->tx.txbuflock);
264 return NULL;
265 }
266
267 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
268 list_del(&bf->list);
269
270 spin_unlock_bh(&sc->tx.txbuflock);
271
272 return bf;
273}
274
275static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
276{
277 spin_lock_bh(&sc->tx.txbuflock);
278 list_add_tail(&bf->list, &sc->tx.txbuf);
279 spin_unlock_bh(&sc->tx.txbuflock);
280}
281
Sujithd43f30152009-01-16 21:38:53 +0530282static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
283{
284 struct ath_buf *tbf;
285
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200286 tbf = ath_tx_get_buffer(sc);
287 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530288 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530289
290 ATH_TXBUF_RESET(tbf);
291
Felix Fietkau827e69b2009-11-15 23:09:25 +0100292 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530293 tbf->bf_mpdu = bf->bf_mpdu;
294 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400295 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530296 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 return tbf;
299}
300
301static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
302 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700303 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530304{
305 struct ath_node *an = NULL;
306 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530307 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800308 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530309 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800310 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530311 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530312 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530313 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530314 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530315 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
317 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200318 struct ieee80211_tx_rate rates[4];
Björn Smedmanebd02282010-10-10 22:44:39 +0200319 int nframes;
Sujithe8324352009-01-16 21:38:42 +0530320
Sujitha22be222009-03-30 15:28:36 +0530321 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530322 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530323
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800324 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100325 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800326
Felix Fietkau78c46532010-06-25 01:26:16 +0200327 memcpy(rates, tx_info->control.rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200328 nframes = bf->bf_nframes;
Felix Fietkau78c46532010-06-25 01:26:16 +0200329
Sujith1286ec62009-01-27 13:30:37 +0530330 rcu_read_lock();
331
Ben Greear686b9cb2010-09-23 09:44:36 -0700332 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530333 if (!sta) {
334 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200335
Felix Fietkau31e79a52010-07-12 23:16:34 +0200336 INIT_LIST_HEAD(&bf_head);
337 while (bf) {
338 bf_next = bf->bf_next;
339
340 bf->bf_state.bf_type |= BUF_XRETRY;
341 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
342 !bf->bf_stale || bf_next != NULL)
343 list_move_tail(&bf->list, &bf_head);
344
Björn Smedmanebd02282010-10-10 22:44:39 +0200345 ath_tx_rc_status(bf, ts, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
347 0, 0);
348
349 bf = bf_next;
350 }
Sujith1286ec62009-01-27 13:30:37 +0530351 return;
Sujithe8324352009-01-16 21:38:42 +0530352 }
353
Sujith1286ec62009-01-27 13:30:37 +0530354 an = (struct ath_node *)sta->drv_priv;
355 tid = ATH_AN_2_TID(an, bf->bf_tidno);
356
Felix Fietkaub11b1602010-07-11 12:48:44 +0200357 /*
358 * The hardware occasionally sends a tx status for the wrong TID.
359 * In this case, the BA status cannot be considered valid and all
360 * subframes need to be retransmitted
361 */
362 if (bf->bf_tidno != ts->tid)
363 txok = false;
364
Sujithe8324352009-01-16 21:38:42 +0530365 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530366 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530367
Sujithd43f30152009-01-16 21:38:53 +0530368 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700369 if (ts->ts_flags & ATH9K_TX_BA) {
370 seq_st = ts->ts_seqnum;
371 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530372 } else {
Sujithd43f30152009-01-16 21:38:53 +0530373 /*
374 * AR5416 can become deaf/mute when BA
375 * issue happens. Chip needs to be reset.
376 * But AP code may have sychronization issues
377 * when perform internal reset in this routine.
378 * Only enable reset in STA mode for now.
379 */
Sujith2660b812009-02-09 13:27:26 +0530380 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530381 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530382 }
383 }
384
385 INIT_LIST_HEAD(&bf_pending);
386 INIT_LIST_HEAD(&bf_head);
387
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700388 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530389 while (bf) {
390 txfail = txpending = 0;
391 bf_next = bf->bf_next;
392
Felix Fietkau78c46532010-06-25 01:26:16 +0200393 skb = bf->bf_mpdu;
394 tx_info = IEEE80211_SKB_CB(skb);
395
Sujithe8324352009-01-16 21:38:42 +0530396 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
397 /* transmit completion, subframe is
398 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530399 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530400 } else if (!isaggr && txok) {
401 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530402 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530403 } else {
Sujithe8324352009-01-16 21:38:42 +0530404 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400405 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530406 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530407 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530408 txpending = 1;
409 } else {
410 bf->bf_state.bf_type |= BUF_XRETRY;
411 txfail = 1;
412 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530413 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530414 }
415 } else {
416 /*
417 * cleanup in progress, just fail
418 * the un-acked sub-frames
419 */
420 txfail = 1;
421 }
422 }
423
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400424 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
425 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530426 /*
427 * Make sure the last desc is reclaimed if it
428 * not a holding desc.
429 */
430 if (!bf_last->bf_stale)
431 list_move_tail(&bf->list, &bf_head);
432 else
433 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530434 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700435 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530436 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530437 }
438
Felix Fietkau90fa5392010-09-20 13:45:38 +0200439 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530440 /*
441 * complete the acked-ones/xretried ones; update
442 * block-ack window
443 */
444 spin_lock_bh(&txq->axq_lock);
445 ath_tx_update_baw(sc, tid, bf->bf_seqno);
446 spin_unlock_bh(&txq->axq_lock);
447
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200449 memcpy(tx_info->control.rates, rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200450 bf->bf_nframes = nframes;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700451 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530452 rc_update = false;
453 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700454 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530455 }
456
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
458 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530459 } else {
Sujithd43f30152009-01-16 21:38:53 +0530460 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400461 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
462 if (bf->bf_next == NULL && bf_last->bf_stale) {
463 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530464
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400465 tbf = ath_clone_txbuf(sc, bf_last);
466 /*
467 * Update tx baw and complete the
468 * frame with failed status if we
469 * run out of tx buf.
470 */
471 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid,
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400476
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400477 bf->bf_state.bf_type |=
478 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad,
480 0, false);
481 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head,
483 ts, 0, 0);
484 break;
485 }
486
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 tbf->bf_desc);
489 list_add_tail(&tbf->list, &bf_head);
490 } else {
491 /*
492 * Clear descriptor status words for
493 * software retry
494 */
495 ath9k_hw_cleartxdesc(sc->sc_ah,
496 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400497 }
Sujithe8324352009-01-16 21:38:42 +0530498 }
499
500 /*
501 * Put this buffer to the temporary pending
502 * queue to retain ordering
503 */
504 list_splice_tail_init(&bf_head, &bf_pending);
505 }
506
507 bf = bf_next;
508 }
509
Felix Fietkau4cee7862010-07-23 03:53:16 +0200510 /* prepend un-acked frames to the beginning of the pending frame queue */
511 if (!list_empty(&bf_pending)) {
512 spin_lock_bh(&txq->axq_lock);
513 list_splice(&bf_pending, &tid->buf_q);
514 ath_tx_queue_tid(txq, tid);
515 spin_unlock_bh(&txq->axq_lock);
516 }
517
Sujithe8324352009-01-16 21:38:42 +0530518 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200519 ath_tx_flush_tid(sc, tid);
520
Sujithe8324352009-01-16 21:38:42 +0530521 if (tid->baw_head == tid->baw_tail) {
522 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530523 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530524 }
Sujithe8324352009-01-16 21:38:42 +0530525 }
526
Sujith1286ec62009-01-27 13:30:37 +0530527 rcu_read_unlock();
528
Sujithe8324352009-01-16 21:38:42 +0530529 if (needreset)
530 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530531}
532
533static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
534 struct ath_atx_tid *tid)
535{
Sujithe8324352009-01-16 21:38:42 +0530536 struct sk_buff *skb;
537 struct ieee80211_tx_info *tx_info;
538 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530539 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530540 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530541 int i;
542
Sujitha22be222009-03-30 15:28:36 +0530543 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530544 tx_info = IEEE80211_SKB_CB(skb);
545 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530546
547 /*
548 * Find the lowest frame length among the rate series that will have a
549 * 4ms transmit duration.
550 * TODO - TXOP limit needs to be considered.
551 */
552 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
553
554 for (i = 0; i < 4; i++) {
555 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100556 int modeidx;
557 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530558 legacy = 1;
559 break;
560 }
561
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200562 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100563 modeidx = MCS_HT40;
564 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200565 modeidx = MCS_HT20;
566
567 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
568 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100569
570 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530571 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530572 }
573 }
574
575 /*
576 * limit aggregate size by the minimum rate if rate selected is
577 * not a probe rate, if rate selected is a probe rate then
578 * avoid aggregation of this packet.
579 */
580 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
581 return 0;
582
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530583 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
584 aggr_limit = min((max_4ms_framelen * 3) / 8,
585 (u32)ATH_AMPDU_LIMIT_MAX);
586 else
587 aggr_limit = min(max_4ms_framelen,
588 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530589
590 /*
591 * h/w can accept aggregates upto 16 bit lengths (65535).
592 * The IE, however can hold upto 65536, which shows up here
593 * as zero. Ignore 65536 since we are constrained by hw.
594 */
Sujith4ef70842009-07-23 15:32:41 +0530595 if (tid->an->maxampdu)
596 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530597
598 return aggr_limit;
599}
600
601/*
Sujithd43f30152009-01-16 21:38:53 +0530602 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530603 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530604 */
605static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
606 struct ath_buf *bf, u16 frmlen)
607{
Sujithe8324352009-01-16 21:38:42 +0530608 struct sk_buff *skb = bf->bf_mpdu;
609 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530610 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530611 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100612 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200613 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530614
615 /* Select standard number of delimiters based on frame length alone */
616 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
617
618 /*
619 * If encryption enabled, hardware requires some more padding between
620 * subframes.
621 * TODO - this could be improved to be dependent on the rate.
622 * The hardware can keep up at lower rates, but not higher rates
623 */
624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
625 ndelim += ATH_AGGR_ENCRYPTDELIM;
626
627 /*
628 * Convert desired mpdu density from microeconds to bytes based
629 * on highest rate in rate series (i.e. first rate) to determine
630 * required minimum length for subframe. Take into account
631 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530632 *
Sujithe8324352009-01-16 21:38:42 +0530633 * If there is no mpdu density restriction, no further calculation
634 * is needed.
635 */
Sujith4ef70842009-07-23 15:32:41 +0530636
637 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530638 return ndelim;
639
640 rix = tx_info->control.rates[0].idx;
641 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530642 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
643 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
644
645 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530646 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530647 else
Sujith4ef70842009-07-23 15:32:41 +0530648 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530649
650 if (nsymbols == 0)
651 nsymbols = 1;
652
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 streams = HT_RC_2_STREAMS(rix);
654 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530655 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
656
Sujithe8324352009-01-16 21:38:42 +0530657 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530658 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
659 ndelim = max(mindelim, ndelim);
660 }
661
662 return ndelim;
663}
664
665static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530666 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530667 struct ath_atx_tid *tid,
668 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530669{
670#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530671 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
672 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530673 u16 aggr_limit = 0, al = 0, bpad = 0,
674 al_delta, h_baw = tid->baw_size / 2;
675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200676 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530677
678 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
679
680 do {
681 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
682
Sujithd43f30152009-01-16 21:38:53 +0530683 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530684 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
685 status = ATH_AGGR_BAW_CLOSED;
686 break;
687 }
688
689 if (!rl) {
690 aggr_limit = ath_lookup_rate(sc, bf, tid);
691 rl = 1;
692 }
693
Sujithd43f30152009-01-16 21:38:53 +0530694 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530695 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
696
Sujithd43f30152009-01-16 21:38:53 +0530697 if (nframes &&
698 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530699 status = ATH_AGGR_LIMITED;
700 break;
701 }
702
Felix Fietkau0299a502010-10-21 02:47:24 +0200703 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
704 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
705 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
706 break;
707
Sujithd43f30152009-01-16 21:38:53 +0530708 /* do not exceed subframe limit */
709 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530710 status = ATH_AGGR_LIMITED;
711 break;
712 }
Sujithd43f30152009-01-16 21:38:53 +0530713 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530714
Sujithd43f30152009-01-16 21:38:53 +0530715 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530716 al += bpad + al_delta;
717
718 /*
719 * Get the delimiters needed to meet the MPDU
720 * density for this node.
721 */
722 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530723 bpad = PADBYTES(al_delta) + (ndelim << 2);
724
725 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400726 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530727
Sujithd43f30152009-01-16 21:38:53 +0530728 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530729 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530730 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
731 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530732 if (bf_prev) {
733 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400734 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
735 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530736 }
737 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530738
Sujithe8324352009-01-16 21:38:42 +0530739 } while (!list_empty(&tid->buf_q));
740
741 bf_first->bf_al = al;
742 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530743
Sujithe8324352009-01-16 21:38:42 +0530744 return status;
745#undef PADBYTES
746}
747
748static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
749 struct ath_atx_tid *tid)
750{
Sujithd43f30152009-01-16 21:38:53 +0530751 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530752 enum ATH_AGGR_STATUS status;
753 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530754
755 do {
756 if (list_empty(&tid->buf_q))
757 return;
758
759 INIT_LIST_HEAD(&bf_q);
760
Sujithfec247c2009-07-27 12:08:16 +0530761 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530762
763 /*
Sujithd43f30152009-01-16 21:38:53 +0530764 * no frames picked up to be aggregated;
765 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530766 */
767 if (list_empty(&bf_q))
768 break;
769
770 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530771 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530772
Sujithd43f30152009-01-16 21:38:53 +0530773 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530774 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530775 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530776 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530777 ath_buf_set_rate(sc, bf);
778 ath_tx_txqaddbuf(sc, txq, &bf_q);
779 continue;
780 }
781
Sujithd43f30152009-01-16 21:38:53 +0530782 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530783 bf->bf_state.bf_type |= BUF_AGGR;
784 ath_buf_set_rate(sc, bf);
785 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
786
Sujithd43f30152009-01-16 21:38:53 +0530787 /* anchor last desc of aggregate */
788 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530789
Sujithe8324352009-01-16 21:38:42 +0530790 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530791 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530792
793 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
794 status != ATH_AGGR_BAW_CLOSED);
795}
796
Felix Fietkau231c3a12010-09-20 19:35:28 +0200797int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
798 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530799{
800 struct ath_atx_tid *txtid;
801 struct ath_node *an;
802
803 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530804 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200805
806 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
807 return -EAGAIN;
808
Sujithf83da962009-07-23 15:32:37 +0530809 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200810 txtid->paused = true;
Sujithf83da962009-07-23 15:32:37 +0530811 *ssn = txtid->seq_start;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200812
813 return 0;
Sujithe8324352009-01-16 21:38:42 +0530814}
815
Sujithf83da962009-07-23 15:32:37 +0530816void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530817{
818 struct ath_node *an = (struct ath_node *)sta->drv_priv;
819 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
820 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Sujithe8324352009-01-16 21:38:42 +0530821
822 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530823 return;
Sujithe8324352009-01-16 21:38:42 +0530824
825 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530826 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530827 return;
Sujithe8324352009-01-16 21:38:42 +0530828 }
829
Sujithe8324352009-01-16 21:38:42 +0530830 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200831 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200832
833 /*
834 * If frames are still being transmitted for this TID, they will be
835 * cleaned up during tx completion. To prevent race conditions, this
836 * TID can only be reused after all in-progress subframes have been
837 * completed.
838 */
839 if (txtid->baw_head != txtid->baw_tail)
840 txtid->state |= AGGR_CLEANUP;
841 else
842 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530843 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530844
Felix Fietkau90fa5392010-09-20 13:45:38 +0200845 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530846}
847
848void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
849{
850 struct ath_atx_tid *txtid;
851 struct ath_node *an;
852
853 an = (struct ath_node *)sta->drv_priv;
854
855 if (sc->sc_flags & SC_OP_TXAGGR) {
856 txtid = ATH_AN_2_TID(an, tid);
857 txtid->baw_size =
858 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
859 txtid->state |= AGGR_ADDBA_COMPLETE;
860 txtid->state &= ~AGGR_ADDBA_PROGRESS;
861 ath_tx_resume_tid(sc, txtid);
862 }
863}
864
Sujithe8324352009-01-16 21:38:42 +0530865/********************/
866/* Queue Management */
867/********************/
868
Sujithe8324352009-01-16 21:38:42 +0530869static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
870 struct ath_txq *txq)
871{
872 struct ath_atx_ac *ac, *ac_tmp;
873 struct ath_atx_tid *tid, *tid_tmp;
874
875 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
876 list_del(&ac->list);
877 ac->sched = false;
878 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
879 list_del(&tid->list);
880 tid->sched = false;
881 ath_tid_drain(sc, txq, tid);
882 }
883 }
884}
885
886struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
887{
Sujithcbe61d82009-02-09 13:27:12 +0530888 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700889 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530890 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400891 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530892
893 memset(&qi, 0, sizeof(qi));
894 qi.tqi_subtype = subtype;
895 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
896 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
897 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
898 qi.tqi_physCompBuf = 0;
899
900 /*
901 * Enable interrupts only for EOL and DESC conditions.
902 * We mark tx descriptors to receive a DESC interrupt
903 * when a tx queue gets deep; otherwise waiting for the
904 * EOL to reap descriptors. Note that this is done to
905 * reduce interrupt load and this only defers reaping
906 * descriptors, never transmitting frames. Aside from
907 * reducing interrupts this also permits more concurrency.
908 * The only potential downside is if the tx queue backs
909 * up in which case the top half of the kernel may backup
910 * due to a lack of tx descriptors.
911 *
912 * The UAPSD queue is an exception, since we take a desc-
913 * based intr on the EOSP frames.
914 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400915 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
916 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
917 TXQ_FLAG_TXERRINT_ENABLE;
918 } else {
919 if (qtype == ATH9K_TX_QUEUE_UAPSD)
920 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
921 else
922 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
923 TXQ_FLAG_TXDESCINT_ENABLE;
924 }
Sujithe8324352009-01-16 21:38:42 +0530925 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
926 if (qnum == -1) {
927 /*
928 * NB: don't print a message, this happens
929 * normally on parts with too few tx queues
930 */
931 return NULL;
932 }
933 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700934 ath_print(common, ATH_DBG_FATAL,
935 "qnum %u out of range, max %u!\n",
936 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530937 ath9k_hw_releasetxqueue(ah, qnum);
938 return NULL;
939 }
940 if (!ATH_TXQ_SETUP(sc, qnum)) {
941 struct ath_txq *txq = &sc->tx.txq[qnum];
942
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400943 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530944 txq->axq_qnum = qnum;
945 txq->axq_link = NULL;
946 INIT_LIST_HEAD(&txq->axq_q);
947 INIT_LIST_HEAD(&txq->axq_acq);
948 spin_lock_init(&txq->axq_lock);
949 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400950 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530951 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400952
953 txq->txq_headidx = txq->txq_tailidx = 0;
954 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
955 INIT_LIST_HEAD(&txq->txq_fifo[i]);
956 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530957 }
958 return &sc->tx.txq[qnum];
959}
960
Sujithe8324352009-01-16 21:38:42 +0530961int ath_txq_update(struct ath_softc *sc, int qnum,
962 struct ath9k_tx_queue_info *qinfo)
963{
Sujithcbe61d82009-02-09 13:27:12 +0530964 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530965 int error = 0;
966 struct ath9k_tx_queue_info qi;
967
968 if (qnum == sc->beacon.beaconq) {
969 /*
970 * XXX: for beacon queue, we just save the parameter.
971 * It will be picked up by ath_beaconq_config when
972 * it's necessary.
973 */
974 sc->beacon.beacon_qi = *qinfo;
975 return 0;
976 }
977
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700978 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530979
980 ath9k_hw_get_txq_props(ah, qnum, &qi);
981 qi.tqi_aifs = qinfo->tqi_aifs;
982 qi.tqi_cwmin = qinfo->tqi_cwmin;
983 qi.tqi_cwmax = qinfo->tqi_cwmax;
984 qi.tqi_burstTime = qinfo->tqi_burstTime;
985 qi.tqi_readyTime = qinfo->tqi_readyTime;
986
987 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700988 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
989 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +0530990 error = -EIO;
991 } else {
992 ath9k_hw_resettxqueue(ah, qnum);
993 }
994
995 return error;
996}
997
998int ath_cabq_update(struct ath_softc *sc)
999{
1000 struct ath9k_tx_queue_info qi;
1001 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301002
1003 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1004 /*
1005 * Ensure the readytime % is within the bounds.
1006 */
Sujith17d79042009-02-09 13:27:03 +05301007 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1008 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1009 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1010 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301011
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001012 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301013 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301014 ath_txq_update(sc, qnum, &qi);
1015
1016 return 0;
1017}
1018
Sujith043a0402009-01-16 21:38:47 +05301019/*
1020 * Drain a given TX queue (could be Beacon or Data)
1021 *
1022 * This assumes output has been stopped and
1023 * we do not need to block ath_tx_tasklet.
1024 */
1025void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301026{
1027 struct ath_buf *bf, *lastbf;
1028 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001029 struct ath_tx_status ts;
1030
1031 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301032 INIT_LIST_HEAD(&bf_head);
1033
Sujithe8324352009-01-16 21:38:42 +05301034 for (;;) {
1035 spin_lock_bh(&txq->axq_lock);
1036
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001037 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1038 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1039 txq->txq_headidx = txq->txq_tailidx = 0;
1040 spin_unlock_bh(&txq->axq_lock);
1041 break;
1042 } else {
1043 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1044 struct ath_buf, list);
1045 }
1046 } else {
1047 if (list_empty(&txq->axq_q)) {
1048 txq->axq_link = NULL;
1049 spin_unlock_bh(&txq->axq_lock);
1050 break;
1051 }
1052 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1053 list);
Sujithe8324352009-01-16 21:38:42 +05301054
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001055 if (bf->bf_stale) {
1056 list_del(&bf->list);
1057 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301058
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001059 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001060 continue;
1061 }
Sujithe8324352009-01-16 21:38:42 +05301062 }
1063
1064 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001065 if (!retry_tx)
1066 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301067
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001068 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1069 list_cut_position(&bf_head,
1070 &txq->txq_fifo[txq->txq_tailidx],
1071 &lastbf->list);
1072 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1073 } else {
1074 /* remove ath_buf's of the same mpdu from txq */
1075 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1076 }
1077
Sujithe8324352009-01-16 21:38:42 +05301078 txq->axq_depth--;
1079
1080 spin_unlock_bh(&txq->axq_lock);
1081
1082 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001083 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301084 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001085 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301086 }
1087
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001088 spin_lock_bh(&txq->axq_lock);
1089 txq->axq_tx_inprogress = false;
1090 spin_unlock_bh(&txq->axq_lock);
1091
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001092 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1093 spin_lock_bh(&txq->axq_lock);
1094 while (!list_empty(&txq->txq_fifo_pending)) {
1095 bf = list_first_entry(&txq->txq_fifo_pending,
1096 struct ath_buf, list);
1097 list_cut_position(&bf_head,
1098 &txq->txq_fifo_pending,
1099 &bf->bf_lastbf->list);
1100 spin_unlock_bh(&txq->axq_lock);
1101
1102 if (bf_isampdu(bf))
1103 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1104 &ts, 0);
1105 else
1106 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1107 &ts, 0, 0);
1108 spin_lock_bh(&txq->axq_lock);
1109 }
1110 spin_unlock_bh(&txq->axq_lock);
1111 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001112
1113 /* flush any pending frames if aggregation is enabled */
1114 if (sc->sc_flags & SC_OP_TXAGGR) {
1115 if (!retry_tx) {
1116 spin_lock_bh(&txq->axq_lock);
1117 ath_txq_drain_pending_buffers(sc, txq);
1118 spin_unlock_bh(&txq->axq_lock);
1119 }
1120 }
Sujithe8324352009-01-16 21:38:42 +05301121}
1122
Felix Fietkau080e1a22010-12-05 20:17:53 +01001123bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301124{
Sujithcbe61d82009-02-09 13:27:12 +05301125 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001126 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301127 struct ath_txq *txq;
1128 int i, npend = 0;
1129
1130 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001131 return true;
Sujith043a0402009-01-16 21:38:47 +05301132
1133 /* Stop beacon queue */
1134 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1135
1136 /* Stop data queues */
1137 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1138 if (ATH_TXQ_SETUP(sc, i)) {
1139 txq = &sc->tx.txq[i];
1140 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1141 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1142 }
1143 }
1144
Felix Fietkau080e1a22010-12-05 20:17:53 +01001145 if (npend)
1146 ath_print(common, ATH_DBG_FATAL, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301147
1148 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1149 if (ATH_TXQ_SETUP(sc, i))
1150 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1151 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001152
1153 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301154}
1155
Sujithe8324352009-01-16 21:38:42 +05301156void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1157{
1158 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1159 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1160}
1161
Sujithe8324352009-01-16 21:38:42 +05301162void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1163{
1164 struct ath_atx_ac *ac;
1165 struct ath_atx_tid *tid;
1166
1167 if (list_empty(&txq->axq_acq))
1168 return;
1169
1170 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1171 list_del(&ac->list);
1172 ac->sched = false;
1173
1174 do {
1175 if (list_empty(&ac->tid_q))
1176 return;
1177
1178 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1179 list_del(&tid->list);
1180 tid->sched = false;
1181
1182 if (tid->paused)
1183 continue;
1184
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001185 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301186
1187 /*
1188 * add tid to round-robin queue if more frames
1189 * are pending for the tid
1190 */
1191 if (!list_empty(&tid->buf_q))
1192 ath_tx_queue_tid(txq, tid);
1193
1194 break;
1195 } while (!list_empty(&ac->tid_q));
1196
1197 if (!list_empty(&ac->tid_q)) {
1198 if (!ac->sched) {
1199 ac->sched = true;
1200 list_add_tail(&ac->list, &txq->axq_acq);
1201 }
1202 }
1203}
1204
1205int ath_tx_setup(struct ath_softc *sc, int haltype)
1206{
1207 struct ath_txq *txq;
1208
1209 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001210 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1211 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301212 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1213 return 0;
1214 }
1215 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1216 if (txq != NULL) {
1217 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1218 return 1;
1219 } else
1220 return 0;
1221}
1222
1223/***********/
1224/* TX, DMA */
1225/***********/
1226
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001227/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001228 * Insert a chain of ath_buf (descriptors) on a txq and
1229 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001230 */
Sujith102e0572008-10-29 10:15:16 +05301231static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1232 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001233{
Sujithcbe61d82009-02-09 13:27:12 +05301234 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001235 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001236 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301237
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001238 /*
1239 * Insert the frame on the outbound list and
1240 * pass it on to the hardware.
1241 */
1242
1243 if (list_empty(head))
1244 return;
1245
1246 bf = list_first_entry(head, struct ath_buf, list);
1247
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001248 ath_print(common, ATH_DBG_QUEUE,
1249 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001250
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001251 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1252 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1253 list_splice_tail_init(head, &txq->txq_fifo_pending);
1254 return;
1255 }
1256 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1257 ath_print(common, ATH_DBG_XMIT,
1258 "Initializing tx fifo %d which "
1259 "is non-empty\n",
1260 txq->txq_headidx);
1261 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1262 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1263 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001264 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001265 ath_print(common, ATH_DBG_XMIT,
1266 "TXDP[%u] = %llx (%p)\n",
1267 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001268 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001269 list_splice_tail_init(head, &txq->axq_q);
1270
1271 if (txq->axq_link == NULL) {
1272 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1273 ath_print(common, ATH_DBG_XMIT,
1274 "TXDP[%u] = %llx (%p)\n",
1275 txq->axq_qnum, ito64(bf->bf_daddr),
1276 bf->bf_desc);
1277 } else {
1278 *txq->axq_link = bf->bf_daddr;
1279 ath_print(common, ATH_DBG_XMIT,
1280 "link[%u] (%p)=%llx (%p)\n",
1281 txq->axq_qnum, txq->axq_link,
1282 ito64(bf->bf_daddr), bf->bf_desc);
1283 }
1284 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1285 &txq->axq_link);
1286 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001287 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001288 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001289}
1290
Sujithe8324352009-01-16 21:38:42 +05301291static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1292 struct list_head *bf_head,
1293 struct ath_tx_control *txctl)
1294{
1295 struct ath_buf *bf;
1296
Sujithe8324352009-01-16 21:38:42 +05301297 bf = list_first_entry(bf_head, struct ath_buf, list);
1298 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301299 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301300
1301 /*
1302 * Do not queue to h/w when any of the following conditions is true:
1303 * - there are pending frames in software queue
1304 * - the TID is currently paused for ADDBA/BAR request
1305 * - seqno is not within block-ack window
1306 * - h/w queue depth exceeds low water mark
1307 */
1308 if (!list_empty(&tid->buf_q) || tid->paused ||
1309 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1310 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001311 /*
Sujithe8324352009-01-16 21:38:42 +05301312 * Add this frame to software queue for scheduling later
1313 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001314 */
Sujithd43f30152009-01-16 21:38:53 +05301315 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301316 ath_tx_queue_tid(txctl->txq, tid);
1317 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001318 }
1319
Sujithe8324352009-01-16 21:38:42 +05301320 /* Add sub-frame to BAW */
1321 ath_tx_addto_baw(sc, tid, bf);
1322
1323 /* Queue to h/w without aggregation */
1324 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301325 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301326 ath_buf_set_rate(sc, bf);
1327 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301328}
1329
Sujithc37452b2009-03-09 09:31:57 +05301330static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1331 struct ath_atx_tid *tid,
1332 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001333{
Sujithe8324352009-01-16 21:38:42 +05301334 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001335
Sujithe8324352009-01-16 21:38:42 +05301336 bf = list_first_entry(bf_head, struct ath_buf, list);
1337 bf->bf_state.bf_type &= ~BUF_AMPDU;
1338
1339 /* update starting sequence number for subsequent ADDBA request */
1340 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1341
1342 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301343 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301344 ath_buf_set_rate(sc, bf);
1345 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301346 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001347}
1348
Sujithc37452b2009-03-09 09:31:57 +05301349static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1350 struct list_head *bf_head)
1351{
1352 struct ath_buf *bf;
1353
1354 bf = list_first_entry(bf_head, struct ath_buf, list);
1355
1356 bf->bf_lastbf = bf;
1357 bf->bf_nframes = 1;
1358 ath_buf_set_rate(sc, bf);
1359 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301360 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301361}
1362
Sujith528f0c62008-10-29 10:14:26 +05301363static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001364{
Sujith528f0c62008-10-29 10:14:26 +05301365 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001366 enum ath9k_pkt_type htype;
1367 __le16 fc;
1368
Sujith528f0c62008-10-29 10:14:26 +05301369 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001370 fc = hdr->frame_control;
1371
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001372 if (ieee80211_is_beacon(fc))
1373 htype = ATH9K_PKT_TYPE_BEACON;
1374 else if (ieee80211_is_probe_resp(fc))
1375 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1376 else if (ieee80211_is_atim(fc))
1377 htype = ATH9K_PKT_TYPE_ATIM;
1378 else if (ieee80211_is_pspoll(fc))
1379 htype = ATH9K_PKT_TYPE_PSPOLL;
1380 else
1381 htype = ATH9K_PKT_TYPE_NORMAL;
1382
1383 return htype;
1384}
1385
Sujith528f0c62008-10-29 10:14:26 +05301386static void assign_aggr_tid_seqno(struct sk_buff *skb,
1387 struct ath_buf *bf)
1388{
1389 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1390 struct ieee80211_hdr *hdr;
1391 struct ath_node *an;
1392 struct ath_atx_tid *tid;
1393 __le16 fc;
1394 u8 *qc;
1395
1396 if (!tx_info->control.sta)
1397 return;
1398
1399 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1400 hdr = (struct ieee80211_hdr *)skb->data;
1401 fc = hdr->frame_control;
1402
Sujith528f0c62008-10-29 10:14:26 +05301403 if (ieee80211_is_data_qos(fc)) {
1404 qc = ieee80211_get_qos_ctl(hdr);
1405 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301406 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001407
Sujithe8324352009-01-16 21:38:42 +05301408 /*
1409 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301410 * We also override seqno set by upper layer with the one
1411 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301412 */
1413 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301414 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301415 bf->bf_seqno = tid->seq_next;
1416 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301417}
1418
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001419static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301420{
1421 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1422 int flags = 0;
1423
1424 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1425 flags |= ATH9K_TXDESC_INTREQ;
1426
1427 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1428 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301429
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001430 if (use_ldpc)
1431 flags |= ATH9K_TXDESC_LDPC;
1432
Sujith528f0c62008-10-29 10:14:26 +05301433 return flags;
1434}
1435
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001436/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001437 * rix - rate index
1438 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1439 * width - 0 for 20 MHz, 1 for 40 MHz
1440 * half_gi - to use 4us v/s 3.6 us for symbol time
1441 */
Sujith102e0572008-10-29 10:15:16 +05301442static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1443 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001444{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001445 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001446 int streams, pktlen;
1447
Sujithcd3d39a2008-08-11 14:03:34 +05301448 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301449
1450 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001451 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001452 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001453 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001454 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1455
1456 if (!half_gi)
1457 duration = SYMBOL_TIME(nsymbols);
1458 else
1459 duration = SYMBOL_TIME_HALFGI(nsymbols);
1460
Sujithe63835b2008-11-18 09:07:53 +05301461 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001462 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301463
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001464 return duration;
1465}
1466
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001467static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1468{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001469 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001470 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301471 struct sk_buff *skb;
1472 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301473 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001474 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301475 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301476 int i, flags = 0;
1477 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301478 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301479
1480 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301481
Sujitha22be222009-03-30 15:28:36 +05301482 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301483 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301484 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301485 hdr = (struct ieee80211_hdr *)skb->data;
1486 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301487
Sujithc89424d2009-01-30 14:29:28 +05301488 /*
1489 * We check if Short Preamble is needed for the CTS rate by
1490 * checking the BSS's global flag.
1491 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1492 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001493 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1494 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301495 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001496 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001497
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001498 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001499 bool is_40, is_sgi, is_sp;
1500 int phy;
1501
Sujithe63835b2008-11-18 09:07:53 +05301502 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001503 continue;
1504
Sujitha8efee42008-11-18 09:07:30 +05301505 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301506 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001507 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001508
Felix Fietkau27032052010-01-17 21:08:50 +01001509 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1510 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301511 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001512 flags |= ATH9K_TXDESC_RTSENA;
1513 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1514 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1515 flags |= ATH9K_TXDESC_CTSENA;
1516 }
1517
Sujithc89424d2009-01-30 14:29:28 +05301518 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1519 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1520 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1521 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001522
Felix Fietkau545750d2009-11-23 22:21:01 +01001523 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1524 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1525 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1526
1527 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1528 /* MCS rates */
1529 series[i].Rate = rix | 0x80;
1530 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1531 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001532 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1533 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001534 continue;
1535 }
1536
1537 /* legcay rates */
1538 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1539 !(rate->flags & IEEE80211_RATE_ERP_G))
1540 phy = WLAN_RC_PHY_CCK;
1541 else
1542 phy = WLAN_RC_PHY_OFDM;
1543
1544 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1545 series[i].Rate = rate->hw_value;
1546 if (rate->hw_value_short) {
1547 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1548 series[i].Rate |= rate->hw_value_short;
1549 } else {
1550 is_sp = false;
1551 }
1552
1553 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1554 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001555 }
1556
Felix Fietkau27032052010-01-17 21:08:50 +01001557 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1558 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1559 flags &= ~ATH9K_TXDESC_RTSENA;
1560
1561 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1562 if (flags & ATH9K_TXDESC_RTSENA)
1563 flags &= ~ATH9K_TXDESC_CTSENA;
1564
Sujithe63835b2008-11-18 09:07:53 +05301565 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301566 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1567 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301568 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301569 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301570
Sujith17d79042009-02-09 13:27:03 +05301571 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301572 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001573}
1574
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001575static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301576 struct sk_buff *skb,
1577 struct ath_tx_control *txctl)
1578{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001579 struct ath_wiphy *aphy = hw->priv;
1580 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301581 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1582 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301583 int hdrlen;
1584 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001585 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001586 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301587
Felix Fietkau827e69b2009-11-15 23:09:25 +01001588 tx_info->pad[0] = 0;
1589 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001590 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001591 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001592 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001593 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1594 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001595 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001596 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1597 break;
1598 }
Sujithe8324352009-01-16 21:38:42 +05301599 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1600 fc = hdr->frame_control;
1601
1602 ATH_TXBUF_RESET(bf);
1603
Felix Fietkau827e69b2009-11-15 23:09:25 +01001604 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001605 bf->bf_frmlen = skb->len + FCS_LEN;
1606 /* Remove the padding size from bf_frmlen, if any */
1607 padpos = ath9k_cmn_padpos(hdr->frame_control);
1608 padsize = padpos & 3;
1609 if (padsize && skb->len>padpos+padsize) {
1610 bf->bf_frmlen -= padsize;
1611 }
Sujithe8324352009-01-16 21:38:42 +05301612
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001613 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301614 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001615 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1616 use_ldpc = true;
1617 }
Sujithe8324352009-01-16 21:38:42 +05301618
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001619 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001620 if (txctl->paprd)
1621 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001622 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301623
Luis R. Rodriguezc17512d2010-08-05 17:56:54 -04001624 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301625 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1626 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1627 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1628 } else {
1629 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1630 }
1631
Sujith17b182e2009-12-14 14:56:56 +05301632 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1633 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301634 assign_aggr_tid_seqno(skb, bf);
1635
1636 bf->bf_mpdu = skb;
1637
Ben Greearc1739eb2010-10-14 12:45:29 -07001638 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1639 skb->len, DMA_TO_DEVICE);
1640 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301641 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001642 bf->bf_buf_addr = 0;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001643 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1644 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301645 return -ENOMEM;
1646 }
1647
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001648 bf->bf_tx_aborted = false;
1649
Sujithe8324352009-01-16 21:38:42 +05301650 return 0;
1651}
1652
1653/* FIXME: tx power */
1654static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1655 struct ath_tx_control *txctl)
1656{
Sujitha22be222009-03-30 15:28:36 +05301657 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301658 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301659 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301660 struct ath_node *an = NULL;
1661 struct list_head bf_head;
1662 struct ath_desc *ds;
1663 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301664 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301665 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301666 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301667
1668 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301669 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301670
1671 INIT_LIST_HEAD(&bf_head);
1672 list_add_tail(&bf->list, &bf_head);
1673
1674 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001675 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301676
1677 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1678 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1679
1680 ath9k_hw_filltxdesc(ah, ds,
1681 skb->len, /* segment length */
1682 true, /* first segment */
1683 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001684 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001685 bf->bf_buf_addr,
1686 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301687
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001688 if (bf->bf_state.bfs_paprd)
1689 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1690
Sujithe8324352009-01-16 21:38:42 +05301691 spin_lock_bh(&txctl->txq->axq_lock);
1692
1693 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1694 tx_info->control.sta) {
1695 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1696 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1697
Sujithc37452b2009-03-09 09:31:57 +05301698 if (!ieee80211_is_data_qos(fc)) {
1699 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1700 goto tx_done;
1701 }
1702
Felix Fietkau4fdec032010-03-12 04:02:43 +01001703 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301704 /*
1705 * Try aggregation if it's a unicast data frame
1706 * and the destination is HT capable.
1707 */
1708 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1709 } else {
1710 /*
1711 * Send this frame as regular when ADDBA
1712 * exchange is neither complete nor pending.
1713 */
Sujithc37452b2009-03-09 09:31:57 +05301714 ath_tx_send_ht_normal(sc, txctl->txq,
1715 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301716 }
1717 } else {
Sujithc37452b2009-03-09 09:31:57 +05301718 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301719 }
1720
Sujithc37452b2009-03-09 09:31:57 +05301721tx_done:
Sujithe8324352009-01-16 21:38:42 +05301722 spin_unlock_bh(&txctl->txq->axq_lock);
1723}
1724
1725/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001726int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301727 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001728{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001729 struct ath_wiphy *aphy = hw->priv;
1730 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001731 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001732 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001733 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001734 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001735
Sujithe8324352009-01-16 21:38:42 +05301736 bf = ath_tx_get_buffer(sc);
1737 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001738 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301739 return -1;
1740 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001741
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001742 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301743 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001744 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001745
Sujithe8324352009-01-16 21:38:42 +05301746 /* upon ath_tx_processq() this TX queue will be resumed, we
1747 * guarantee this will happen by knowing beforehand that
1748 * we will at least have to run TX completionon one buffer
1749 * on the queue */
1750 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001751 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001752 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301753 txq->stopped = 1;
1754 }
1755 spin_unlock_bh(&txq->axq_lock);
1756
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001757 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301758
1759 return r;
1760 }
1761
Felix Fietkau97923b12010-06-12 00:33:55 -04001762 q = skb_get_queue_mapping(skb);
1763 if (q >= 4)
1764 q = 0;
1765
1766 spin_lock_bh(&txq->axq_lock);
1767 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1768 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1769 txq->stopped = 1;
1770 }
1771 spin_unlock_bh(&txq->axq_lock);
1772
Sujithe8324352009-01-16 21:38:42 +05301773 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001774
1775 return 0;
1776}
1777
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001778void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001779{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001780 struct ath_wiphy *aphy = hw->priv;
1781 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001782 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001783 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1784 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301785 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1786 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001787
Sujithe8324352009-01-16 21:38:42 +05301788 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001789
Sujithe8324352009-01-16 21:38:42 +05301790 /*
1791 * As a temporary workaround, assign seq# here; this will likely need
1792 * to be cleaned up to work better with Beacon transmission and virtual
1793 * BSSes.
1794 */
1795 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301796 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1797 sc->tx.seq_no += 0x10;
1798 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1799 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001800 }
1801
Sujithe8324352009-01-16 21:38:42 +05301802 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001803 padpos = ath9k_cmn_padpos(hdr->frame_control);
1804 padsize = padpos & 3;
1805 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301806 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001807 ath_print(common, ATH_DBG_XMIT,
1808 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301809 dev_kfree_skb_any(skb);
1810 return;
1811 }
1812 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001813 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001814 }
1815
Sujithe8324352009-01-16 21:38:42 +05301816 txctl.txq = sc->beacon.cabq;
1817
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001818 ath_print(common, ATH_DBG_XMIT,
1819 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301820
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001821 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001822 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301823 goto exit;
1824 }
1825
1826 return;
1827exit:
1828 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001829}
1830
Sujithe8324352009-01-16 21:38:42 +05301831/*****************/
1832/* TX Completion */
1833/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001834
Sujithe8324352009-01-16 21:38:42 +05301835static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001836 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001837{
Sujithe8324352009-01-16 21:38:42 +05301838 struct ieee80211_hw *hw = sc->hw;
1839 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001840 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001841 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001842 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301843
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001844 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301845
Felix Fietkau827e69b2009-11-15 23:09:25 +01001846 if (aphy)
1847 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301848
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301849 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301850 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301851
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301852 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301853 /* Frame was ACKed */
1854 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1855 }
1856
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001857 padpos = ath9k_cmn_padpos(hdr->frame_control);
1858 padsize = padpos & 3;
1859 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301860 /*
1861 * Remove MAC header padding before giving the frame back to
1862 * mac80211.
1863 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001864 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301865 skb_pull(skb, padsize);
1866 }
1867
Sujith1b04b932010-01-08 10:36:05 +05301868 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1869 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001870 ath_print(common, ATH_DBG_PS,
1871 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001872 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301873 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1874 PS_WAIT_FOR_CAB |
1875 PS_WAIT_FOR_PSPOLL_DATA |
1876 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001877 }
1878
Felix Fietkau827e69b2009-11-15 23:09:25 +01001879 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001880 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001881 else {
1882 q = skb_get_queue_mapping(skb);
1883 if (q >= 4)
1884 q = 0;
1885
1886 if (--sc->tx.pending_frames[q] < 0)
1887 sc->tx.pending_frames[q] = 0;
1888
Felix Fietkau827e69b2009-11-15 23:09:25 +01001889 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001890 }
Sujithe8324352009-01-16 21:38:42 +05301891}
1892
1893static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001894 struct ath_txq *txq, struct list_head *bf_q,
1895 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301896{
1897 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301898 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301899 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301900
Sujithe8324352009-01-16 21:38:42 +05301901 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301902 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301903
1904 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301905 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301906
1907 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301908 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301909 }
1910
Ben Greearc1739eb2010-10-14 12:45:29 -07001911 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001912 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001913
1914 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001915 if (time_after(jiffies,
1916 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001917 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001918 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001919 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001920 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001921 } else {
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001922 ath_debug_stat_tx(sc, txq, bf, ts);
Ben Greearc23cc812010-10-13 12:01:23 -07001923 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001924 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001925 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1926 * accidentally reference it later.
1927 */
1928 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301929
1930 /*
1931 * Return the list of ath_buf of this mpdu to free queue
1932 */
1933 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1934 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1935 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1936}
1937
1938static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001939 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301940{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001941 u16 seq_st = 0;
1942 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301943 int ba_index;
1944 int nbad = 0;
1945 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001946
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001947 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301948 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301949
Sujithcd3d39a2008-08-11 14:03:34 +05301950 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001951 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001952 seq_st = ts->ts_seqnum;
1953 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001954 }
1955
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001956 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301957 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1958 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1959 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001960
Sujithe8324352009-01-16 21:38:42 +05301961 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001962 }
1963
Sujithe8324352009-01-16 21:38:42 +05301964 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001965}
1966
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001967static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301968 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301969{
Sujitha22be222009-03-30 15:28:36 +05301970 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301971 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301972 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001973 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301974 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301975
Sujith95e4acb2009-03-13 08:56:09 +05301976 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001977 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301978
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001979 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301980 WARN_ON(tx_rateindex >= hw->max_rates);
1981
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001982 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301983 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001984 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001985 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301986
Björn Smedmanebd02282010-10-10 22:44:39 +02001987 BUG_ON(nbad > bf->bf_nframes);
1988
1989 tx_info->status.ampdu_len = bf->bf_nframes;
1990 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
1991 }
1992
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001993 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301994 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05301995 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001996 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01001997 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
1998 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001999 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2000 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002001 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
Sujithc4288392008-11-18 09:09:30 +05302002 }
2003 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302004
Felix Fietkau545750d2009-11-23 22:21:01 +01002005 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302006 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002007 tx_info->status.rates[i].idx = -1;
2008 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302009
Felix Fietkau78c46532010-06-25 01:26:16 +02002010 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302011}
2012
Sujith059d8062009-01-16 21:38:49 +05302013static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2014{
2015 int qnum;
2016
Felix Fietkau97923b12010-06-12 00:33:55 -04002017 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2018 if (qnum == -1)
2019 return;
2020
Sujith059d8062009-01-16 21:38:49 +05302021 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002022 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07002023 if (ath_mac80211_start_queue(sc, qnum))
2024 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302025 }
2026 spin_unlock_bh(&txq->axq_lock);
2027}
2028
Sujithc4288392008-11-18 09:09:30 +05302029static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002030{
Sujithcbe61d82009-02-09 13:27:12 +05302031 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002032 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002033 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2034 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302035 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002036 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302037 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002038 int status;
2039
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002040 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2041 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2042 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002043
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002044 for (;;) {
2045 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002046 if (list_empty(&txq->axq_q)) {
2047 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002048 spin_unlock_bh(&txq->axq_lock);
2049 break;
2050 }
2051 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2052
2053 /*
2054 * There is a race condition that a BH gets scheduled
2055 * after sw writes TxE and before hw re-load the last
2056 * descriptor to get the newly chained one.
2057 * Software must keep the last DONE descriptor as a
2058 * holding descriptor - software does so by marking
2059 * it with the STALE flag.
2060 */
2061 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302062 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002063 bf_held = bf;
2064 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302065 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002066 break;
2067 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002068 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302069 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002070 }
2071 }
2072
2073 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302074 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002075
Felix Fietkau29bffa92010-03-29 20:14:23 -07002076 memset(&ts, 0, sizeof(ts));
2077 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002078 if (status == -EINPROGRESS) {
2079 spin_unlock_bh(&txq->axq_lock);
2080 break;
2081 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002082
2083 /*
2084 * Remove ath_buf's of the same transmit unit from txq,
2085 * however leave the last descriptor back as the holding
2086 * descriptor for hw.
2087 */
Sujitha119cc42009-03-30 15:28:38 +05302088 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002090 if (!list_is_singular(&lastbf->list))
2091 list_cut_position(&bf_head,
2092 &txq->axq_q, lastbf->list.prev);
2093
2094 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002095 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002096 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002097 if (bf_held)
2098 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002099 spin_unlock_bh(&txq->axq_lock);
2100
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002101 if (bf_held)
2102 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002103
Sujithcd3d39a2008-08-11 14:03:34 +05302104 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002105 /*
2106 * This frame is sent out as a single frame.
2107 * Use hardware retry status for this frame.
2108 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002109 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302110 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002111 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112 }
Johannes Berge6a98542008-10-21 12:40:02 +02002113
Sujithcd3d39a2008-08-11 14:03:34 +05302114 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002115 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002117 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002118
Sujith059d8062009-01-16 21:38:49 +05302119 ath_wake_mac80211_queue(sc, txq);
2120
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002121 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302122 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002123 ath_txq_schedule(sc, txq);
2124 spin_unlock_bh(&txq->axq_lock);
2125 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002126}
2127
Sujith305fe472009-07-23 15:32:29 +05302128static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002129{
2130 struct ath_softc *sc = container_of(work, struct ath_softc,
2131 tx_complete_work.work);
2132 struct ath_txq *txq;
2133 int i;
2134 bool needreset = false;
2135
2136 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2137 if (ATH_TXQ_SETUP(sc, i)) {
2138 txq = &sc->tx.txq[i];
2139 spin_lock_bh(&txq->axq_lock);
2140 if (txq->axq_depth) {
2141 if (txq->axq_tx_inprogress) {
2142 needreset = true;
2143 spin_unlock_bh(&txq->axq_lock);
2144 break;
2145 } else {
2146 txq->axq_tx_inprogress = true;
2147 }
2148 }
2149 spin_unlock_bh(&txq->axq_lock);
2150 }
2151
2152 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002153 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2154 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302155 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002156 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302157 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002158 }
2159
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002160 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002161 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2162}
2163
2164
Sujithe8324352009-01-16 21:38:42 +05302165
2166void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002167{
Sujithe8324352009-01-16 21:38:42 +05302168 int i;
2169 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170
Sujithe8324352009-01-16 21:38:42 +05302171 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002172
2173 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302174 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2175 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002176 }
2177}
2178
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002179void ath_tx_edma_tasklet(struct ath_softc *sc)
2180{
2181 struct ath_tx_status txs;
2182 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2183 struct ath_hw *ah = sc->sc_ah;
2184 struct ath_txq *txq;
2185 struct ath_buf *bf, *lastbf;
2186 struct list_head bf_head;
2187 int status;
2188 int txok;
2189
2190 for (;;) {
2191 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2192 if (status == -EINPROGRESS)
2193 break;
2194 if (status == -EIO) {
2195 ath_print(common, ATH_DBG_XMIT,
2196 "Error processing tx status\n");
2197 break;
2198 }
2199
2200 /* Skip beacon completions */
2201 if (txs.qid == sc->beacon.beaconq)
2202 continue;
2203
2204 txq = &sc->tx.txq[txs.qid];
2205
2206 spin_lock_bh(&txq->axq_lock);
2207 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2208 spin_unlock_bh(&txq->axq_lock);
2209 return;
2210 }
2211
2212 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2213 struct ath_buf, list);
2214 lastbf = bf->bf_lastbf;
2215
2216 INIT_LIST_HEAD(&bf_head);
2217 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2218 &lastbf->list);
2219 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2220 txq->axq_depth--;
2221 txq->axq_tx_inprogress = false;
2222 spin_unlock_bh(&txq->axq_lock);
2223
2224 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2225
2226 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002227 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2228 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002229 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002230 }
2231
2232 if (bf_isampdu(bf))
2233 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2234 else
2235 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2236 &txs, txok, 0);
2237
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002238 ath_wake_mac80211_queue(sc, txq);
2239
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002240 spin_lock_bh(&txq->axq_lock);
2241 if (!list_empty(&txq->txq_fifo_pending)) {
2242 INIT_LIST_HEAD(&bf_head);
2243 bf = list_first_entry(&txq->txq_fifo_pending,
2244 struct ath_buf, list);
2245 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2246 &bf->bf_lastbf->list);
2247 ath_tx_txqaddbuf(sc, txq, &bf_head);
2248 } else if (sc->sc_flags & SC_OP_TXAGGR)
2249 ath_txq_schedule(sc, txq);
2250 spin_unlock_bh(&txq->axq_lock);
2251 }
2252}
2253
Sujithe8324352009-01-16 21:38:42 +05302254/*****************/
2255/* Init, Cleanup */
2256/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002257
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002258static int ath_txstatus_setup(struct ath_softc *sc, int size)
2259{
2260 struct ath_descdma *dd = &sc->txsdma;
2261 u8 txs_len = sc->sc_ah->caps.txs_len;
2262
2263 dd->dd_desc_len = size * txs_len;
2264 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2265 &dd->dd_desc_paddr, GFP_KERNEL);
2266 if (!dd->dd_desc)
2267 return -ENOMEM;
2268
2269 return 0;
2270}
2271
2272static int ath_tx_edma_init(struct ath_softc *sc)
2273{
2274 int err;
2275
2276 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2277 if (!err)
2278 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2279 sc->txsdma.dd_desc_paddr,
2280 ATH_TXSTATUS_RING_SIZE);
2281
2282 return err;
2283}
2284
2285static void ath_tx_edma_cleanup(struct ath_softc *sc)
2286{
2287 struct ath_descdma *dd = &sc->txsdma;
2288
2289 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2290 dd->dd_desc_paddr);
2291}
2292
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002293int ath_tx_init(struct ath_softc *sc, int nbufs)
2294{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002295 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002296 int error = 0;
2297
Sujith797fe5c2009-03-30 15:28:45 +05302298 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002299
Sujith797fe5c2009-03-30 15:28:45 +05302300 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002301 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302302 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002303 ath_print(common, ATH_DBG_FATAL,
2304 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302305 goto err;
2306 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002307
Sujith797fe5c2009-03-30 15:28:45 +05302308 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002309 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302310 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002311 ath_print(common, ATH_DBG_FATAL,
2312 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302313 goto err;
2314 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002315
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002316 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2317
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002318 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2319 error = ath_tx_edma_init(sc);
2320 if (error)
2321 goto err;
2322 }
2323
Sujith797fe5c2009-03-30 15:28:45 +05302324err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002325 if (error != 0)
2326 ath_tx_cleanup(sc);
2327
2328 return error;
2329}
2330
Sujith797fe5c2009-03-30 15:28:45 +05302331void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002332{
Sujithb77f4832008-12-07 21:44:03 +05302333 if (sc->beacon.bdma.dd_desc_len != 0)
2334 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002335
Sujithb77f4832008-12-07 21:44:03 +05302336 if (sc->tx.txdma.dd_desc_len != 0)
2337 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002338
2339 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2340 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002341}
2342
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002343void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2344{
Sujithc5170162008-10-29 10:13:59 +05302345 struct ath_atx_tid *tid;
2346 struct ath_atx_ac *ac;
2347 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002348
Sujith8ee5afb2008-12-07 21:43:36 +05302349 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302350 tidno < WME_NUM_TID;
2351 tidno++, tid++) {
2352 tid->an = an;
2353 tid->tidno = tidno;
2354 tid->seq_start = tid->seq_next = 0;
2355 tid->baw_size = WME_MAX_BA;
2356 tid->baw_head = tid->baw_tail = 0;
2357 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302358 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302359 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302360 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302361 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302362 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302363 tid->state &= ~AGGR_ADDBA_COMPLETE;
2364 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302365 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366
Sujith8ee5afb2008-12-07 21:43:36 +05302367 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302368 acno < WME_NUM_AC; acno++, ac++) {
2369 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002370 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302371 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002372 }
2373}
2374
Sujithb5aa9bf2008-10-29 10:13:31 +05302375void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376{
Felix Fietkau2b409942010-07-07 19:42:08 +02002377 struct ath_atx_ac *ac;
2378 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379 struct ath_txq *txq;
Felix Fietkau2b409942010-07-07 19:42:08 +02002380 int i, tidno;
Sujithe8324352009-01-16 21:38:42 +05302381
Felix Fietkau2b409942010-07-07 19:42:08 +02002382 for (tidno = 0, tid = &an->tid[tidno];
2383 tidno < WME_NUM_TID; tidno++, tid++) {
2384 i = tid->ac->qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385
Felix Fietkau2b409942010-07-07 19:42:08 +02002386 if (!ATH_TXQ_SETUP(sc, i))
2387 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002388
Felix Fietkau2b409942010-07-07 19:42:08 +02002389 txq = &sc->tx.txq[i];
2390 ac = tid->ac;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391
Felix Fietkau2b409942010-07-07 19:42:08 +02002392 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393
Felix Fietkau2b409942010-07-07 19:42:08 +02002394 if (tid->sched) {
2395 list_del(&tid->list);
2396 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002398
2399 if (ac->sched) {
2400 list_del(&ac->list);
2401 tid->ac->sched = false;
2402 }
2403
2404 ath_tid_drain(sc, txq, tid);
2405 tid->state &= ~AGGR_ADDBA_COMPLETE;
2406 tid->state &= ~AGGR_CLEANUP;
2407
2408 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002409 }
2410}