blob: c3681a1dc94116c32aa5e7bbc575f0cdc8bad0de [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
123static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
126
127 spin_lock_bh(&txq->axq_lock);
128 tid->paused++;
129 spin_unlock_bh(&txq->axq_lock);
130}
131
132static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
133{
134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
135
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700136 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530137 spin_lock_bh(&txq->axq_lock);
138
139 tid->paused--;
140
141 if (tid->paused > 0)
142 goto unlock;
143
144 if (list_empty(&tid->buf_q))
145 goto unlock;
146
147 ath_tx_queue_tid(txq, tid);
148 ath_txq_schedule(sc, txq);
149unlock:
150 spin_unlock_bh(&txq->axq_lock);
151}
152
153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
156 struct ath_buf *bf;
157 struct list_head bf_head;
158 INIT_LIST_HEAD(&bf_head);
159
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700160 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530161 spin_lock_bh(&txq->axq_lock);
162
163 tid->paused--;
164
165 if (tid->paused > 0) {
166 spin_unlock_bh(&txq->axq_lock);
167 return;
168 }
169
170 while (!list_empty(&tid->buf_q)) {
171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700172 BUG_ON(bf_isretried(bf));
Sujithd43f30152009-01-16 21:38:53 +0530173 list_move_tail(&bf->list, &bf_head);
Sujithc37452b2009-03-09 09:31:57 +0530174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
188 tid->tx_buf[cindex] = NULL;
189
190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 struct ath_buf *bf)
198{
199 int index, cindex;
200
201 if (bf_isretried(bf))
202 return;
203
204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
206
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700207 BUG_ON(tid->tx_buf[cindex] != NULL);
Sujithe8324352009-01-16 21:38:42 +0530208 tid->tx_buf[cindex] = bf;
209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
227 struct ath_buf *bf;
228 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530232 INIT_LIST_HEAD(&bf_head);
233
234 for (;;) {
235 if (list_empty(&tid->buf_q))
236 break;
Sujithe8324352009-01-16 21:38:42 +0530237
Sujithd43f30152009-01-16 21:38:53 +0530238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
239 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530240
241 if (bf_isretried(bf))
242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
243
244 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530246 spin_lock(&txq->axq_lock);
247 }
248
249 tid->seq_next = tid->seq_start;
250 tid->baw_tail = tid->baw_head;
251}
252
Sujithfec247c2009-07-27 12:08:16 +0530253static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
254 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530255{
256 struct sk_buff *skb;
257 struct ieee80211_hdr *hdr;
258
259 bf->bf_state.bf_type |= BUF_RETRY;
260 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530261 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530262
263 skb = bf->bf_mpdu;
264 hdr = (struct ieee80211_hdr *)skb->data;
265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
266}
267
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
269{
270 struct ath_buf *bf = NULL;
271
272 spin_lock_bh(&sc->tx.txbuflock);
273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
275 spin_unlock_bh(&sc->tx.txbuflock);
276 return NULL;
277 }
278
279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
282 spin_unlock_bh(&sc->tx.txbuflock);
283
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
Sujithd43f30152009-01-16 21:38:53 +0530294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530300 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530301
302 ATH_TXBUF_RESET(tbf);
303
Felix Fietkau827e69b2009-11-15 23:09:25 +0100304 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530305 tbf->bf_mpdu = bf->bf_mpdu;
306 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530308 tbf->bf_state = bf->bf_state;
309 tbf->bf_dmacontext = bf->bf_dmacontext;
310
311 return tbf;
312}
313
314static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
315 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700316 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530317{
318 struct ath_node *an = NULL;
319 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530320 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800321 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530322 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800323 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530324 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530326 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530328 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200331 struct ieee80211_tx_rate rates[4];
Sujithe8324352009-01-16 21:38:42 +0530332
Sujitha22be222009-03-30 15:28:36 +0530333 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530334 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530335
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800336 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100337 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800338
Felix Fietkau78c46532010-06-25 01:26:16 +0200339 memcpy(rates, tx_info->control.rates, sizeof(rates));
340
Sujith1286ec62009-01-27 13:30:37 +0530341 rcu_read_lock();
342
Johannes Berg5ed176e2009-11-04 14:42:28 +0100343 /* XXX: use ieee80211_find_sta! */
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800344 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
Sujith1286ec62009-01-27 13:30:37 +0530345 if (!sta) {
346 rcu_read_unlock();
347 return;
Sujithe8324352009-01-16 21:38:42 +0530348 }
349
Sujith1286ec62009-01-27 13:30:37 +0530350 an = (struct ath_node *)sta->drv_priv;
351 tid = ATH_AN_2_TID(an, bf->bf_tidno);
352
Sujithe8324352009-01-16 21:38:42 +0530353 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530354 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530355
Sujithd43f30152009-01-16 21:38:53 +0530356 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700357 if (ts->ts_flags & ATH9K_TX_BA) {
358 seq_st = ts->ts_seqnum;
359 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530360 } else {
Sujithd43f30152009-01-16 21:38:53 +0530361 /*
362 * AR5416 can become deaf/mute when BA
363 * issue happens. Chip needs to be reset.
364 * But AP code may have sychronization issues
365 * when perform internal reset in this routine.
366 * Only enable reset in STA mode for now.
367 */
Sujith2660b812009-02-09 13:27:26 +0530368 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530369 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530370 }
371 }
372
373 INIT_LIST_HEAD(&bf_pending);
374 INIT_LIST_HEAD(&bf_head);
375
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700376 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530377 while (bf) {
378 txfail = txpending = 0;
379 bf_next = bf->bf_next;
380
Felix Fietkau78c46532010-06-25 01:26:16 +0200381 skb = bf->bf_mpdu;
382 tx_info = IEEE80211_SKB_CB(skb);
383
Sujithe8324352009-01-16 21:38:42 +0530384 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
385 /* transmit completion, subframe is
386 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530387 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530388 } else if (!isaggr && txok) {
389 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530390 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530391 } else {
Sujithe8324352009-01-16 21:38:42 +0530392 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400393 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530394 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530395 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530396 txpending = 1;
397 } else {
398 bf->bf_state.bf_type |= BUF_XRETRY;
399 txfail = 1;
400 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530401 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530402 }
403 } else {
404 /*
405 * cleanup in progress, just fail
406 * the un-acked sub-frames
407 */
408 txfail = 1;
409 }
410 }
411
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400412 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
413 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530414 /*
415 * Make sure the last desc is reclaimed if it
416 * not a holding desc.
417 */
418 if (!bf_last->bf_stale)
419 list_move_tail(&bf->list, &bf_head);
420 else
421 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530422 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700423 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530424 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530425 }
426
427 if (!txpending) {
428 /*
429 * complete the acked-ones/xretried ones; update
430 * block-ack window
431 */
432 spin_lock_bh(&txq->axq_lock);
433 ath_tx_update_baw(sc, tid, bf->bf_seqno);
434 spin_unlock_bh(&txq->axq_lock);
435
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530436 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200437 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700438 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530439 rc_update = false;
440 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700441 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530442 }
443
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700444 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
445 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530446 } else {
Sujithd43f30152009-01-16 21:38:53 +0530447 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400448 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
449 if (bf->bf_next == NULL && bf_last->bf_stale) {
450 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530451
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400452 tbf = ath_clone_txbuf(sc, bf_last);
453 /*
454 * Update tx baw and complete the
455 * frame with failed status if we
456 * run out of tx buf.
457 */
458 if (!tbf) {
459 spin_lock_bh(&txq->axq_lock);
460 ath_tx_update_baw(sc, tid,
461 bf->bf_seqno);
462 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400463
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400464 bf->bf_state.bf_type |=
465 BUF_XRETRY;
466 ath_tx_rc_status(bf, ts, nbad,
467 0, false);
468 ath_tx_complete_buf(sc, bf, txq,
469 &bf_head,
470 ts, 0, 0);
471 break;
472 }
473
474 ath9k_hw_cleartxdesc(sc->sc_ah,
475 tbf->bf_desc);
476 list_add_tail(&tbf->list, &bf_head);
477 } else {
478 /*
479 * Clear descriptor status words for
480 * software retry
481 */
482 ath9k_hw_cleartxdesc(sc->sc_ah,
483 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400484 }
Sujithe8324352009-01-16 21:38:42 +0530485 }
486
487 /*
488 * Put this buffer to the temporary pending
489 * queue to retain ordering
490 */
491 list_splice_tail_init(&bf_head, &bf_pending);
492 }
493
494 bf = bf_next;
495 }
496
497 if (tid->state & AGGR_CLEANUP) {
Sujithe8324352009-01-16 21:38:42 +0530498 if (tid->baw_head == tid->baw_tail) {
499 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530500 tid->state &= ~AGGR_CLEANUP;
501
502 /* send buffered frames as singles */
503 ath_tx_flush_tid(sc, tid);
Sujithd43f30152009-01-16 21:38:53 +0530504 }
Sujith1286ec62009-01-27 13:30:37 +0530505 rcu_read_unlock();
Sujithe8324352009-01-16 21:38:42 +0530506 return;
507 }
508
Sujithd43f30152009-01-16 21:38:53 +0530509 /* prepend un-acked frames to the beginning of the pending frame queue */
Sujithe8324352009-01-16 21:38:42 +0530510 if (!list_empty(&bf_pending)) {
511 spin_lock_bh(&txq->axq_lock);
512 list_splice(&bf_pending, &tid->buf_q);
513 ath_tx_queue_tid(txq, tid);
514 spin_unlock_bh(&txq->axq_lock);
515 }
516
Sujith1286ec62009-01-27 13:30:37 +0530517 rcu_read_unlock();
518
Sujithe8324352009-01-16 21:38:42 +0530519 if (needreset)
520 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530521}
522
523static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
524 struct ath_atx_tid *tid)
525{
Sujithe8324352009-01-16 21:38:42 +0530526 struct sk_buff *skb;
527 struct ieee80211_tx_info *tx_info;
528 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530529 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530530 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530531 int i;
532
Sujitha22be222009-03-30 15:28:36 +0530533 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530534 tx_info = IEEE80211_SKB_CB(skb);
535 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530536
537 /*
538 * Find the lowest frame length among the rate series that will have a
539 * 4ms transmit duration.
540 * TODO - TXOP limit needs to be considered.
541 */
542 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
543
544 for (i = 0; i < 4; i++) {
545 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100546 int modeidx;
547 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530548 legacy = 1;
549 break;
550 }
551
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200552 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100553 modeidx = MCS_HT40;
554 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200555 modeidx = MCS_HT20;
556
557 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
558 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100559
560 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530561 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530562 }
563 }
564
565 /*
566 * limit aggregate size by the minimum rate if rate selected is
567 * not a probe rate, if rate selected is a probe rate then
568 * avoid aggregation of this packet.
569 */
570 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
571 return 0;
572
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530573 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
574 aggr_limit = min((max_4ms_framelen * 3) / 8,
575 (u32)ATH_AMPDU_LIMIT_MAX);
576 else
577 aggr_limit = min(max_4ms_framelen,
578 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530579
580 /*
581 * h/w can accept aggregates upto 16 bit lengths (65535).
582 * The IE, however can hold upto 65536, which shows up here
583 * as zero. Ignore 65536 since we are constrained by hw.
584 */
Sujith4ef70842009-07-23 15:32:41 +0530585 if (tid->an->maxampdu)
586 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530587
588 return aggr_limit;
589}
590
591/*
Sujithd43f30152009-01-16 21:38:53 +0530592 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530593 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530594 */
595static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
596 struct ath_buf *bf, u16 frmlen)
597{
Sujithe8324352009-01-16 21:38:42 +0530598 struct sk_buff *skb = bf->bf_mpdu;
599 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530600 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530601 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100602 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200603 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530604
605 /* Select standard number of delimiters based on frame length alone */
606 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
607
608 /*
609 * If encryption enabled, hardware requires some more padding between
610 * subframes.
611 * TODO - this could be improved to be dependent on the rate.
612 * The hardware can keep up at lower rates, but not higher rates
613 */
614 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
615 ndelim += ATH_AGGR_ENCRYPTDELIM;
616
617 /*
618 * Convert desired mpdu density from microeconds to bytes based
619 * on highest rate in rate series (i.e. first rate) to determine
620 * required minimum length for subframe. Take into account
621 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530622 *
Sujithe8324352009-01-16 21:38:42 +0530623 * If there is no mpdu density restriction, no further calculation
624 * is needed.
625 */
Sujith4ef70842009-07-23 15:32:41 +0530626
627 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530628 return ndelim;
629
630 rix = tx_info->control.rates[0].idx;
631 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530632 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
633 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
634
635 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530636 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530637 else
Sujith4ef70842009-07-23 15:32:41 +0530638 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530639
640 if (nsymbols == 0)
641 nsymbols = 1;
642
Felix Fietkauc6663872010-04-19 19:57:33 +0200643 streams = HT_RC_2_STREAMS(rix);
644 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530645 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
646
Sujithe8324352009-01-16 21:38:42 +0530647 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530648 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
649 ndelim = max(mindelim, ndelim);
650 }
651
652 return ndelim;
653}
654
655static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530656 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530657 struct ath_atx_tid *tid,
658 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530659{
660#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530661 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
662 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530663 u16 aggr_limit = 0, al = 0, bpad = 0,
664 al_delta, h_baw = tid->baw_size / 2;
665 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujithe8324352009-01-16 21:38:42 +0530666
667 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
668
669 do {
670 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
671
Sujithd43f30152009-01-16 21:38:53 +0530672 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530673 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
674 status = ATH_AGGR_BAW_CLOSED;
675 break;
676 }
677
678 if (!rl) {
679 aggr_limit = ath_lookup_rate(sc, bf, tid);
680 rl = 1;
681 }
682
Sujithd43f30152009-01-16 21:38:53 +0530683 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530684 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
685
Sujithd43f30152009-01-16 21:38:53 +0530686 if (nframes &&
687 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530688 status = ATH_AGGR_LIMITED;
689 break;
690 }
691
Sujithd43f30152009-01-16 21:38:53 +0530692 /* do not exceed subframe limit */
693 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530694 status = ATH_AGGR_LIMITED;
695 break;
696 }
Sujithd43f30152009-01-16 21:38:53 +0530697 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530698
Sujithd43f30152009-01-16 21:38:53 +0530699 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530700 al += bpad + al_delta;
701
702 /*
703 * Get the delimiters needed to meet the MPDU
704 * density for this node.
705 */
706 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530707 bpad = PADBYTES(al_delta) + (ndelim << 2);
708
709 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400710 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530711
Sujithd43f30152009-01-16 21:38:53 +0530712 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530713 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530714 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
715 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530716 if (bf_prev) {
717 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400718 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
719 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530720 }
721 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530722
Sujithe8324352009-01-16 21:38:42 +0530723 } while (!list_empty(&tid->buf_q));
724
725 bf_first->bf_al = al;
726 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530727
Sujithe8324352009-01-16 21:38:42 +0530728 return status;
729#undef PADBYTES
730}
731
732static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
733 struct ath_atx_tid *tid)
734{
Sujithd43f30152009-01-16 21:38:53 +0530735 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530736 enum ATH_AGGR_STATUS status;
737 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530738
739 do {
740 if (list_empty(&tid->buf_q))
741 return;
742
743 INIT_LIST_HEAD(&bf_q);
744
Sujithfec247c2009-07-27 12:08:16 +0530745 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530746
747 /*
Sujithd43f30152009-01-16 21:38:53 +0530748 * no frames picked up to be aggregated;
749 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530750 */
751 if (list_empty(&bf_q))
752 break;
753
754 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530755 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530756
Sujithd43f30152009-01-16 21:38:53 +0530757 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530758 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530759 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530760 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530761 ath_buf_set_rate(sc, bf);
762 ath_tx_txqaddbuf(sc, txq, &bf_q);
763 continue;
764 }
765
Sujithd43f30152009-01-16 21:38:53 +0530766 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530767 bf->bf_state.bf_type |= BUF_AGGR;
768 ath_buf_set_rate(sc, bf);
769 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
770
Sujithd43f30152009-01-16 21:38:53 +0530771 /* anchor last desc of aggregate */
772 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530773
Sujithe8324352009-01-16 21:38:42 +0530774 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530775 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530776
777 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
778 status != ATH_AGGR_BAW_CLOSED);
779}
780
Sujithf83da962009-07-23 15:32:37 +0530781void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
782 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530783{
784 struct ath_atx_tid *txtid;
785 struct ath_node *an;
786
787 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530788 txtid = ATH_AN_2_TID(an, tid);
789 txtid->state |= AGGR_ADDBA_PROGRESS;
790 ath_tx_pause_tid(sc, txtid);
791 *ssn = txtid->seq_start;
Sujithe8324352009-01-16 21:38:42 +0530792}
793
Sujithf83da962009-07-23 15:32:37 +0530794void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530795{
796 struct ath_node *an = (struct ath_node *)sta->drv_priv;
797 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
798 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700799 struct ath_tx_status ts;
Sujithe8324352009-01-16 21:38:42 +0530800 struct ath_buf *bf;
801 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700802
803 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530804 INIT_LIST_HEAD(&bf_head);
805
806 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530807 return;
Sujithe8324352009-01-16 21:38:42 +0530808
809 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530810 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530811 return;
Sujithe8324352009-01-16 21:38:42 +0530812 }
813
814 ath_tx_pause_tid(sc, txtid);
815
816 /* drop all software retried frames and mark this TID */
817 spin_lock_bh(&txq->axq_lock);
818 while (!list_empty(&txtid->buf_q)) {
819 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
820 if (!bf_isretried(bf)) {
821 /*
822 * NB: it's based on the assumption that
823 * software retried frame will always stay
824 * at the head of software queue.
825 */
826 break;
827 }
Sujithd43f30152009-01-16 21:38:53 +0530828 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530829 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700830 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530831 }
Sujithd43f30152009-01-16 21:38:53 +0530832 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530833
834 if (txtid->baw_head != txtid->baw_tail) {
Sujithe8324352009-01-16 21:38:42 +0530835 txtid->state |= AGGR_CLEANUP;
836 } else {
837 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530838 ath_tx_flush_tid(sc, txtid);
839 }
Sujithe8324352009-01-16 21:38:42 +0530840}
841
842void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
843{
844 struct ath_atx_tid *txtid;
845 struct ath_node *an;
846
847 an = (struct ath_node *)sta->drv_priv;
848
849 if (sc->sc_flags & SC_OP_TXAGGR) {
850 txtid = ATH_AN_2_TID(an, tid);
851 txtid->baw_size =
852 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
853 txtid->state |= AGGR_ADDBA_COMPLETE;
854 txtid->state &= ~AGGR_ADDBA_PROGRESS;
855 ath_tx_resume_tid(sc, txtid);
856 }
857}
858
859bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
860{
861 struct ath_atx_tid *txtid;
862
863 if (!(sc->sc_flags & SC_OP_TXAGGR))
864 return false;
865
866 txtid = ATH_AN_2_TID(an, tidno);
867
Vasanthakumar Thiagarajanc3d8f022009-06-10 17:50:08 +0530868 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
Sujithe8324352009-01-16 21:38:42 +0530869 return true;
Sujithe8324352009-01-16 21:38:42 +0530870 return false;
871}
872
873/********************/
874/* Queue Management */
875/********************/
876
Sujithe8324352009-01-16 21:38:42 +0530877static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
878 struct ath_txq *txq)
879{
880 struct ath_atx_ac *ac, *ac_tmp;
881 struct ath_atx_tid *tid, *tid_tmp;
882
883 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
884 list_del(&ac->list);
885 ac->sched = false;
886 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
887 list_del(&tid->list);
888 tid->sched = false;
889 ath_tid_drain(sc, txq, tid);
890 }
891 }
892}
893
894struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
895{
Sujithcbe61d82009-02-09 13:27:12 +0530896 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700897 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530898 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400899 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530900
901 memset(&qi, 0, sizeof(qi));
902 qi.tqi_subtype = subtype;
903 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
904 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
905 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
906 qi.tqi_physCompBuf = 0;
907
908 /*
909 * Enable interrupts only for EOL and DESC conditions.
910 * We mark tx descriptors to receive a DESC interrupt
911 * when a tx queue gets deep; otherwise waiting for the
912 * EOL to reap descriptors. Note that this is done to
913 * reduce interrupt load and this only defers reaping
914 * descriptors, never transmitting frames. Aside from
915 * reducing interrupts this also permits more concurrency.
916 * The only potential downside is if the tx queue backs
917 * up in which case the top half of the kernel may backup
918 * due to a lack of tx descriptors.
919 *
920 * The UAPSD queue is an exception, since we take a desc-
921 * based intr on the EOSP frames.
922 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400923 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
924 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
925 TXQ_FLAG_TXERRINT_ENABLE;
926 } else {
927 if (qtype == ATH9K_TX_QUEUE_UAPSD)
928 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
929 else
930 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
931 TXQ_FLAG_TXDESCINT_ENABLE;
932 }
Sujithe8324352009-01-16 21:38:42 +0530933 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
934 if (qnum == -1) {
935 /*
936 * NB: don't print a message, this happens
937 * normally on parts with too few tx queues
938 */
939 return NULL;
940 }
941 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700942 ath_print(common, ATH_DBG_FATAL,
943 "qnum %u out of range, max %u!\n",
944 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530945 ath9k_hw_releasetxqueue(ah, qnum);
946 return NULL;
947 }
948 if (!ATH_TXQ_SETUP(sc, qnum)) {
949 struct ath_txq *txq = &sc->tx.txq[qnum];
950
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400951 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530952 txq->axq_qnum = qnum;
953 txq->axq_link = NULL;
954 INIT_LIST_HEAD(&txq->axq_q);
955 INIT_LIST_HEAD(&txq->axq_acq);
956 spin_lock_init(&txq->axq_lock);
957 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400958 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530959 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400960
961 txq->txq_headidx = txq->txq_tailidx = 0;
962 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
963 INIT_LIST_HEAD(&txq->txq_fifo[i]);
964 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530965 }
966 return &sc->tx.txq[qnum];
967}
968
Sujithe8324352009-01-16 21:38:42 +0530969int ath_txq_update(struct ath_softc *sc, int qnum,
970 struct ath9k_tx_queue_info *qinfo)
971{
Sujithcbe61d82009-02-09 13:27:12 +0530972 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530973 int error = 0;
974 struct ath9k_tx_queue_info qi;
975
976 if (qnum == sc->beacon.beaconq) {
977 /*
978 * XXX: for beacon queue, we just save the parameter.
979 * It will be picked up by ath_beaconq_config when
980 * it's necessary.
981 */
982 sc->beacon.beacon_qi = *qinfo;
983 return 0;
984 }
985
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700986 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530987
988 ath9k_hw_get_txq_props(ah, qnum, &qi);
989 qi.tqi_aifs = qinfo->tqi_aifs;
990 qi.tqi_cwmin = qinfo->tqi_cwmin;
991 qi.tqi_cwmax = qinfo->tqi_cwmax;
992 qi.tqi_burstTime = qinfo->tqi_burstTime;
993 qi.tqi_readyTime = qinfo->tqi_readyTime;
994
995 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700996 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
997 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +0530998 error = -EIO;
999 } else {
1000 ath9k_hw_resettxqueue(ah, qnum);
1001 }
1002
1003 return error;
1004}
1005
1006int ath_cabq_update(struct ath_softc *sc)
1007{
1008 struct ath9k_tx_queue_info qi;
1009 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301010
1011 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1012 /*
1013 * Ensure the readytime % is within the bounds.
1014 */
Sujith17d79042009-02-09 13:27:03 +05301015 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1016 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1017 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1018 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301019
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001020 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301021 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301022 ath_txq_update(sc, qnum, &qi);
1023
1024 return 0;
1025}
1026
Sujith043a0402009-01-16 21:38:47 +05301027/*
1028 * Drain a given TX queue (could be Beacon or Data)
1029 *
1030 * This assumes output has been stopped and
1031 * we do not need to block ath_tx_tasklet.
1032 */
1033void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301034{
1035 struct ath_buf *bf, *lastbf;
1036 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001037 struct ath_tx_status ts;
1038
1039 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301040 INIT_LIST_HEAD(&bf_head);
1041
Sujithe8324352009-01-16 21:38:42 +05301042 for (;;) {
1043 spin_lock_bh(&txq->axq_lock);
1044
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001045 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1046 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1047 txq->txq_headidx = txq->txq_tailidx = 0;
1048 spin_unlock_bh(&txq->axq_lock);
1049 break;
1050 } else {
1051 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1052 struct ath_buf, list);
1053 }
1054 } else {
1055 if (list_empty(&txq->axq_q)) {
1056 txq->axq_link = NULL;
1057 spin_unlock_bh(&txq->axq_lock);
1058 break;
1059 }
1060 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1061 list);
Sujithe8324352009-01-16 21:38:42 +05301062
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001063 if (bf->bf_stale) {
1064 list_del(&bf->list);
1065 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301066
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001067 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001068 continue;
1069 }
Sujithe8324352009-01-16 21:38:42 +05301070 }
1071
1072 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001073 if (!retry_tx)
1074 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301075
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001076 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1077 list_cut_position(&bf_head,
1078 &txq->txq_fifo[txq->txq_tailidx],
1079 &lastbf->list);
1080 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1081 } else {
1082 /* remove ath_buf's of the same mpdu from txq */
1083 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1084 }
1085
Sujithe8324352009-01-16 21:38:42 +05301086 txq->axq_depth--;
1087
1088 spin_unlock_bh(&txq->axq_lock);
1089
1090 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001091 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301092 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001093 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301094 }
1095
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001096 spin_lock_bh(&txq->axq_lock);
1097 txq->axq_tx_inprogress = false;
1098 spin_unlock_bh(&txq->axq_lock);
1099
Sujithe8324352009-01-16 21:38:42 +05301100 /* flush any pending frames if aggregation is enabled */
1101 if (sc->sc_flags & SC_OP_TXAGGR) {
1102 if (!retry_tx) {
1103 spin_lock_bh(&txq->axq_lock);
1104 ath_txq_drain_pending_buffers(sc, txq);
1105 spin_unlock_bh(&txq->axq_lock);
1106 }
1107 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001108
1109 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1110 spin_lock_bh(&txq->axq_lock);
1111 while (!list_empty(&txq->txq_fifo_pending)) {
1112 bf = list_first_entry(&txq->txq_fifo_pending,
1113 struct ath_buf, list);
1114 list_cut_position(&bf_head,
1115 &txq->txq_fifo_pending,
1116 &bf->bf_lastbf->list);
1117 spin_unlock_bh(&txq->axq_lock);
1118
1119 if (bf_isampdu(bf))
1120 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1121 &ts, 0);
1122 else
1123 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1124 &ts, 0, 0);
1125 spin_lock_bh(&txq->axq_lock);
1126 }
1127 spin_unlock_bh(&txq->axq_lock);
1128 }
Sujithe8324352009-01-16 21:38:42 +05301129}
1130
Sujith043a0402009-01-16 21:38:47 +05301131void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1132{
Sujithcbe61d82009-02-09 13:27:12 +05301133 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001134 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301135 struct ath_txq *txq;
1136 int i, npend = 0;
1137
1138 if (sc->sc_flags & SC_OP_INVALID)
1139 return;
1140
1141 /* Stop beacon queue */
1142 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1143
1144 /* Stop data queues */
1145 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1146 if (ATH_TXQ_SETUP(sc, i)) {
1147 txq = &sc->tx.txq[i];
1148 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1149 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1150 }
1151 }
1152
1153 if (npend) {
1154 int r;
1155
Sujithe8009e92009-12-14 14:57:08 +05301156 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001157 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301158
1159 spin_lock_bh(&sc->sc_resetlock);
Sujithe8009e92009-12-14 14:57:08 +05301160 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
Sujith043a0402009-01-16 21:38:47 +05301161 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001162 ath_print(common, ATH_DBG_FATAL,
1163 "Unable to reset hardware; reset status %d\n",
1164 r);
Sujith043a0402009-01-16 21:38:47 +05301165 spin_unlock_bh(&sc->sc_resetlock);
1166 }
1167
1168 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1169 if (ATH_TXQ_SETUP(sc, i))
1170 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1171 }
1172}
1173
Sujithe8324352009-01-16 21:38:42 +05301174void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1175{
1176 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1177 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1178}
1179
Sujithe8324352009-01-16 21:38:42 +05301180void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1181{
1182 struct ath_atx_ac *ac;
1183 struct ath_atx_tid *tid;
1184
1185 if (list_empty(&txq->axq_acq))
1186 return;
1187
1188 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1189 list_del(&ac->list);
1190 ac->sched = false;
1191
1192 do {
1193 if (list_empty(&ac->tid_q))
1194 return;
1195
1196 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1197 list_del(&tid->list);
1198 tid->sched = false;
1199
1200 if (tid->paused)
1201 continue;
1202
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001203 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301204
1205 /*
1206 * add tid to round-robin queue if more frames
1207 * are pending for the tid
1208 */
1209 if (!list_empty(&tid->buf_q))
1210 ath_tx_queue_tid(txq, tid);
1211
1212 break;
1213 } while (!list_empty(&ac->tid_q));
1214
1215 if (!list_empty(&ac->tid_q)) {
1216 if (!ac->sched) {
1217 ac->sched = true;
1218 list_add_tail(&ac->list, &txq->axq_acq);
1219 }
1220 }
1221}
1222
1223int ath_tx_setup(struct ath_softc *sc, int haltype)
1224{
1225 struct ath_txq *txq;
1226
1227 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001228 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1229 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301230 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1231 return 0;
1232 }
1233 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1234 if (txq != NULL) {
1235 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1236 return 1;
1237 } else
1238 return 0;
1239}
1240
1241/***********/
1242/* TX, DMA */
1243/***********/
1244
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001245/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001246 * Insert a chain of ath_buf (descriptors) on a txq and
1247 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001248 */
Sujith102e0572008-10-29 10:15:16 +05301249static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1250 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001251{
Sujithcbe61d82009-02-09 13:27:12 +05301252 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001253 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001254 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301255
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001256 /*
1257 * Insert the frame on the outbound list and
1258 * pass it on to the hardware.
1259 */
1260
1261 if (list_empty(head))
1262 return;
1263
1264 bf = list_first_entry(head, struct ath_buf, list);
1265
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001266 ath_print(common, ATH_DBG_QUEUE,
1267 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001268
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001269 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1270 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1271 list_splice_tail_init(head, &txq->txq_fifo_pending);
1272 return;
1273 }
1274 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1275 ath_print(common, ATH_DBG_XMIT,
1276 "Initializing tx fifo %d which "
1277 "is non-empty\n",
1278 txq->txq_headidx);
1279 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1280 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1281 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001282 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001283 ath_print(common, ATH_DBG_XMIT,
1284 "TXDP[%u] = %llx (%p)\n",
1285 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001286 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001287 list_splice_tail_init(head, &txq->axq_q);
1288
1289 if (txq->axq_link == NULL) {
1290 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1291 ath_print(common, ATH_DBG_XMIT,
1292 "TXDP[%u] = %llx (%p)\n",
1293 txq->axq_qnum, ito64(bf->bf_daddr),
1294 bf->bf_desc);
1295 } else {
1296 *txq->axq_link = bf->bf_daddr;
1297 ath_print(common, ATH_DBG_XMIT,
1298 "link[%u] (%p)=%llx (%p)\n",
1299 txq->axq_qnum, txq->axq_link,
1300 ito64(bf->bf_daddr), bf->bf_desc);
1301 }
1302 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1303 &txq->axq_link);
1304 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001305 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001306 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001307}
1308
Sujithe8324352009-01-16 21:38:42 +05301309static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1310 struct list_head *bf_head,
1311 struct ath_tx_control *txctl)
1312{
1313 struct ath_buf *bf;
1314
Sujithe8324352009-01-16 21:38:42 +05301315 bf = list_first_entry(bf_head, struct ath_buf, list);
1316 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301317 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301318
1319 /*
1320 * Do not queue to h/w when any of the following conditions is true:
1321 * - there are pending frames in software queue
1322 * - the TID is currently paused for ADDBA/BAR request
1323 * - seqno is not within block-ack window
1324 * - h/w queue depth exceeds low water mark
1325 */
1326 if (!list_empty(&tid->buf_q) || tid->paused ||
1327 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1328 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001329 /*
Sujithe8324352009-01-16 21:38:42 +05301330 * Add this frame to software queue for scheduling later
1331 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001332 */
Sujithd43f30152009-01-16 21:38:53 +05301333 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301334 ath_tx_queue_tid(txctl->txq, tid);
1335 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001336 }
1337
Sujithe8324352009-01-16 21:38:42 +05301338 /* Add sub-frame to BAW */
1339 ath_tx_addto_baw(sc, tid, bf);
1340
1341 /* Queue to h/w without aggregation */
1342 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301343 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301344 ath_buf_set_rate(sc, bf);
1345 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301346}
1347
Sujithc37452b2009-03-09 09:31:57 +05301348static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1349 struct ath_atx_tid *tid,
1350 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001351{
Sujithe8324352009-01-16 21:38:42 +05301352 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001353
Sujithe8324352009-01-16 21:38:42 +05301354 bf = list_first_entry(bf_head, struct ath_buf, list);
1355 bf->bf_state.bf_type &= ~BUF_AMPDU;
1356
1357 /* update starting sequence number for subsequent ADDBA request */
1358 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1359
1360 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301361 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301362 ath_buf_set_rate(sc, bf);
1363 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301364 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001365}
1366
Sujithc37452b2009-03-09 09:31:57 +05301367static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1368 struct list_head *bf_head)
1369{
1370 struct ath_buf *bf;
1371
1372 bf = list_first_entry(bf_head, struct ath_buf, list);
1373
1374 bf->bf_lastbf = bf;
1375 bf->bf_nframes = 1;
1376 ath_buf_set_rate(sc, bf);
1377 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301378 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301379}
1380
Sujith528f0c62008-10-29 10:14:26 +05301381static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001382{
Sujith528f0c62008-10-29 10:14:26 +05301383 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001384 enum ath9k_pkt_type htype;
1385 __le16 fc;
1386
Sujith528f0c62008-10-29 10:14:26 +05301387 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001388 fc = hdr->frame_control;
1389
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001390 if (ieee80211_is_beacon(fc))
1391 htype = ATH9K_PKT_TYPE_BEACON;
1392 else if (ieee80211_is_probe_resp(fc))
1393 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1394 else if (ieee80211_is_atim(fc))
1395 htype = ATH9K_PKT_TYPE_ATIM;
1396 else if (ieee80211_is_pspoll(fc))
1397 htype = ATH9K_PKT_TYPE_PSPOLL;
1398 else
1399 htype = ATH9K_PKT_TYPE_NORMAL;
1400
1401 return htype;
1402}
1403
Sujith528f0c62008-10-29 10:14:26 +05301404static int get_hw_crypto_keytype(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001405{
Sujith528f0c62008-10-29 10:14:26 +05301406 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1407
1408 if (tx_info->control.hw_key) {
1409 if (tx_info->control.hw_key->alg == ALG_WEP)
1410 return ATH9K_KEY_TYPE_WEP;
1411 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1412 return ATH9K_KEY_TYPE_TKIP;
1413 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1414 return ATH9K_KEY_TYPE_AES;
1415 }
1416
1417 return ATH9K_KEY_TYPE_CLEAR;
1418}
1419
Sujith528f0c62008-10-29 10:14:26 +05301420static void assign_aggr_tid_seqno(struct sk_buff *skb,
1421 struct ath_buf *bf)
1422{
1423 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1424 struct ieee80211_hdr *hdr;
1425 struct ath_node *an;
1426 struct ath_atx_tid *tid;
1427 __le16 fc;
1428 u8 *qc;
1429
1430 if (!tx_info->control.sta)
1431 return;
1432
1433 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1434 hdr = (struct ieee80211_hdr *)skb->data;
1435 fc = hdr->frame_control;
1436
Sujith528f0c62008-10-29 10:14:26 +05301437 if (ieee80211_is_data_qos(fc)) {
1438 qc = ieee80211_get_qos_ctl(hdr);
1439 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301440 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001441
Sujithe8324352009-01-16 21:38:42 +05301442 /*
1443 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301444 * We also override seqno set by upper layer with the one
1445 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301446 */
1447 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301448 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301449 bf->bf_seqno = tid->seq_next;
1450 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301451}
1452
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001453static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301454{
1455 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1456 int flags = 0;
1457
1458 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1459 flags |= ATH9K_TXDESC_INTREQ;
1460
1461 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1462 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301463
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001464 if (use_ldpc)
1465 flags |= ATH9K_TXDESC_LDPC;
1466
Sujith528f0c62008-10-29 10:14:26 +05301467 return flags;
1468}
1469
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001470/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001471 * rix - rate index
1472 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1473 * width - 0 for 20 MHz, 1 for 40 MHz
1474 * half_gi - to use 4us v/s 3.6 us for symbol time
1475 */
Sujith102e0572008-10-29 10:15:16 +05301476static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1477 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001478{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001479 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001480 int streams, pktlen;
1481
Sujithcd3d39a2008-08-11 14:03:34 +05301482 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301483
1484 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001485 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001486 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001487 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001488 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1489
1490 if (!half_gi)
1491 duration = SYMBOL_TIME(nsymbols);
1492 else
1493 duration = SYMBOL_TIME_HALFGI(nsymbols);
1494
Sujithe63835b2008-11-18 09:07:53 +05301495 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001496 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301497
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001498 return duration;
1499}
1500
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001501static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1502{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001503 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001504 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301505 struct sk_buff *skb;
1506 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301507 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001508 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301509 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301510 int i, flags = 0;
1511 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301512 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301513
1514 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301515
Sujitha22be222009-03-30 15:28:36 +05301516 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301517 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301518 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301519 hdr = (struct ieee80211_hdr *)skb->data;
1520 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301521
Sujithc89424d2009-01-30 14:29:28 +05301522 /*
1523 * We check if Short Preamble is needed for the CTS rate by
1524 * checking the BSS's global flag.
1525 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1526 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001527 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1528 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301529 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001530 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001531
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001532 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001533 bool is_40, is_sgi, is_sp;
1534 int phy;
1535
Sujithe63835b2008-11-18 09:07:53 +05301536 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001537 continue;
1538
Sujitha8efee42008-11-18 09:07:30 +05301539 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301540 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001541 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001542
Felix Fietkau27032052010-01-17 21:08:50 +01001543 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1544 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301545 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001546 flags |= ATH9K_TXDESC_RTSENA;
1547 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1548 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1549 flags |= ATH9K_TXDESC_CTSENA;
1550 }
1551
Sujithc89424d2009-01-30 14:29:28 +05301552 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1553 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1554 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1555 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001556
Felix Fietkau545750d2009-11-23 22:21:01 +01001557 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1558 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1559 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1560
1561 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1562 /* MCS rates */
1563 series[i].Rate = rix | 0x80;
1564 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1565 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001566 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1567 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001568 continue;
1569 }
1570
1571 /* legcay rates */
1572 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1573 !(rate->flags & IEEE80211_RATE_ERP_G))
1574 phy = WLAN_RC_PHY_CCK;
1575 else
1576 phy = WLAN_RC_PHY_OFDM;
1577
1578 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1579 series[i].Rate = rate->hw_value;
1580 if (rate->hw_value_short) {
1581 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1582 series[i].Rate |= rate->hw_value_short;
1583 } else {
1584 is_sp = false;
1585 }
1586
1587 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1588 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001589 }
1590
Felix Fietkau27032052010-01-17 21:08:50 +01001591 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1592 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1593 flags &= ~ATH9K_TXDESC_RTSENA;
1594
1595 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1596 if (flags & ATH9K_TXDESC_RTSENA)
1597 flags &= ~ATH9K_TXDESC_CTSENA;
1598
Sujithe63835b2008-11-18 09:07:53 +05301599 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301600 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1601 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301602 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301603 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301604
Sujith17d79042009-02-09 13:27:03 +05301605 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301606 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607}
1608
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001609static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301610 struct sk_buff *skb,
1611 struct ath_tx_control *txctl)
1612{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001613 struct ath_wiphy *aphy = hw->priv;
1614 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301615 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1616 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301617 int hdrlen;
1618 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001619 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001620 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301621
Felix Fietkau827e69b2009-11-15 23:09:25 +01001622 tx_info->pad[0] = 0;
1623 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001624 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001625 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001626 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001627 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1628 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001629 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001630 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1631 break;
1632 }
Sujithe8324352009-01-16 21:38:42 +05301633 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1634 fc = hdr->frame_control;
1635
1636 ATH_TXBUF_RESET(bf);
1637
Felix Fietkau827e69b2009-11-15 23:09:25 +01001638 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001639 bf->bf_frmlen = skb->len + FCS_LEN;
1640 /* Remove the padding size from bf_frmlen, if any */
1641 padpos = ath9k_cmn_padpos(hdr->frame_control);
1642 padsize = padpos & 3;
1643 if (padsize && skb->len>padpos+padsize) {
1644 bf->bf_frmlen -= padsize;
1645 }
Sujithe8324352009-01-16 21:38:42 +05301646
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001647 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301648 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001649 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1650 use_ldpc = true;
1651 }
Sujithe8324352009-01-16 21:38:42 +05301652
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001653 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001654 if (txctl->paprd)
1655 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001656 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301657
1658 bf->bf_keytype = get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301659 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1660 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1661 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1662 } else {
1663 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1664 }
1665
Sujith17b182e2009-12-14 14:56:56 +05301666 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1667 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301668 assign_aggr_tid_seqno(skb, bf);
1669
1670 bf->bf_mpdu = skb;
1671
1672 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1673 skb->len, DMA_TO_DEVICE);
1674 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1675 bf->bf_mpdu = NULL;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001676 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1677 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301678 return -ENOMEM;
1679 }
1680
1681 bf->bf_buf_addr = bf->bf_dmacontext;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001682
1683 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1684 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1685 bf->bf_isnullfunc = true;
Sujith1b04b932010-01-08 10:36:05 +05301686 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001687 } else
1688 bf->bf_isnullfunc = false;
1689
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001690 bf->bf_tx_aborted = false;
1691
Sujithe8324352009-01-16 21:38:42 +05301692 return 0;
1693}
1694
1695/* FIXME: tx power */
1696static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1697 struct ath_tx_control *txctl)
1698{
Sujitha22be222009-03-30 15:28:36 +05301699 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301700 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301701 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301702 struct ath_node *an = NULL;
1703 struct list_head bf_head;
1704 struct ath_desc *ds;
1705 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301706 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301707 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301708 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301709
1710 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301711 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301712
1713 INIT_LIST_HEAD(&bf_head);
1714 list_add_tail(&bf->list, &bf_head);
1715
1716 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001717 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301718
1719 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1720 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1721
1722 ath9k_hw_filltxdesc(ah, ds,
1723 skb->len, /* segment length */
1724 true, /* first segment */
1725 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001726 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001727 bf->bf_buf_addr,
1728 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301729
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001730 if (bf->bf_state.bfs_paprd)
1731 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1732
Sujithe8324352009-01-16 21:38:42 +05301733 spin_lock_bh(&txctl->txq->axq_lock);
1734
1735 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1736 tx_info->control.sta) {
1737 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1738 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1739
Sujithc37452b2009-03-09 09:31:57 +05301740 if (!ieee80211_is_data_qos(fc)) {
1741 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1742 goto tx_done;
1743 }
1744
Felix Fietkau4fdec032010-03-12 04:02:43 +01001745 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301746 /*
1747 * Try aggregation if it's a unicast data frame
1748 * and the destination is HT capable.
1749 */
1750 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1751 } else {
1752 /*
1753 * Send this frame as regular when ADDBA
1754 * exchange is neither complete nor pending.
1755 */
Sujithc37452b2009-03-09 09:31:57 +05301756 ath_tx_send_ht_normal(sc, txctl->txq,
1757 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301758 }
1759 } else {
Sujithc37452b2009-03-09 09:31:57 +05301760 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301761 }
1762
Sujithc37452b2009-03-09 09:31:57 +05301763tx_done:
Sujithe8324352009-01-16 21:38:42 +05301764 spin_unlock_bh(&txctl->txq->axq_lock);
1765}
1766
1767/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001768int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301769 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001770{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001771 struct ath_wiphy *aphy = hw->priv;
1772 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001773 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001774 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001775 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001776 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001777
Sujithe8324352009-01-16 21:38:42 +05301778 bf = ath_tx_get_buffer(sc);
1779 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001780 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301781 return -1;
1782 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001783
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001784 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301785 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001786 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001787
Sujithe8324352009-01-16 21:38:42 +05301788 /* upon ath_tx_processq() this TX queue will be resumed, we
1789 * guarantee this will happen by knowing beforehand that
1790 * we will at least have to run TX completionon one buffer
1791 * on the queue */
1792 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001793 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001794 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301795 txq->stopped = 1;
1796 }
1797 spin_unlock_bh(&txq->axq_lock);
1798
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001799 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301800
1801 return r;
1802 }
1803
Felix Fietkau97923b12010-06-12 00:33:55 -04001804 q = skb_get_queue_mapping(skb);
1805 if (q >= 4)
1806 q = 0;
1807
1808 spin_lock_bh(&txq->axq_lock);
1809 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1810 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1811 txq->stopped = 1;
1812 }
1813 spin_unlock_bh(&txq->axq_lock);
1814
Sujithe8324352009-01-16 21:38:42 +05301815 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001816
1817 return 0;
1818}
1819
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001820void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001821{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001822 struct ath_wiphy *aphy = hw->priv;
1823 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001824 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001825 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1826 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301827 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1828 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001829
Sujithe8324352009-01-16 21:38:42 +05301830 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001831
Sujithe8324352009-01-16 21:38:42 +05301832 /*
1833 * As a temporary workaround, assign seq# here; this will likely need
1834 * to be cleaned up to work better with Beacon transmission and virtual
1835 * BSSes.
1836 */
1837 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301838 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1839 sc->tx.seq_no += 0x10;
1840 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1841 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001842 }
1843
Sujithe8324352009-01-16 21:38:42 +05301844 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001845 padpos = ath9k_cmn_padpos(hdr->frame_control);
1846 padsize = padpos & 3;
1847 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301848 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001849 ath_print(common, ATH_DBG_XMIT,
1850 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301851 dev_kfree_skb_any(skb);
1852 return;
1853 }
1854 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001855 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001856 }
1857
Sujithe8324352009-01-16 21:38:42 +05301858 txctl.txq = sc->beacon.cabq;
1859
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001860 ath_print(common, ATH_DBG_XMIT,
1861 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301862
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001863 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001864 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301865 goto exit;
1866 }
1867
1868 return;
1869exit:
1870 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001871}
1872
Sujithe8324352009-01-16 21:38:42 +05301873/*****************/
1874/* TX Completion */
1875/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001876
Sujithe8324352009-01-16 21:38:42 +05301877static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001878 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001879{
Sujithe8324352009-01-16 21:38:42 +05301880 struct ieee80211_hw *hw = sc->hw;
1881 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001882 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001883 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001884 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301885
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001886 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301887
Felix Fietkau827e69b2009-11-15 23:09:25 +01001888 if (aphy)
1889 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301890
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301891 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301892 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301893
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301894 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301895 /* Frame was ACKed */
1896 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1897 }
1898
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001899 padpos = ath9k_cmn_padpos(hdr->frame_control);
1900 padsize = padpos & 3;
1901 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301902 /*
1903 * Remove MAC header padding before giving the frame back to
1904 * mac80211.
1905 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001906 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301907 skb_pull(skb, padsize);
1908 }
1909
Sujith1b04b932010-01-08 10:36:05 +05301910 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1911 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001912 ath_print(common, ATH_DBG_PS,
1913 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001914 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301915 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1916 PS_WAIT_FOR_CAB |
1917 PS_WAIT_FOR_PSPOLL_DATA |
1918 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001919 }
1920
Felix Fietkau827e69b2009-11-15 23:09:25 +01001921 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001922 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001923 else {
1924 q = skb_get_queue_mapping(skb);
1925 if (q >= 4)
1926 q = 0;
1927
1928 if (--sc->tx.pending_frames[q] < 0)
1929 sc->tx.pending_frames[q] = 0;
1930
Felix Fietkau827e69b2009-11-15 23:09:25 +01001931 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001932 }
Sujithe8324352009-01-16 21:38:42 +05301933}
1934
1935static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001936 struct ath_txq *txq, struct list_head *bf_q,
1937 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301938{
1939 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301940 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301941 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301942
Sujithe8324352009-01-16 21:38:42 +05301943 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301944 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301945
1946 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301947 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301948
1949 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301950 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301951 }
1952
1953 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001954
1955 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001956 if (time_after(jiffies,
1957 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001958 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001959 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001960 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001961 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001962 } else {
1963 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1964 ath_debug_stat_tx(sc, txq, bf, ts);
1965 }
Sujithe8324352009-01-16 21:38:42 +05301966
1967 /*
1968 * Return the list of ath_buf of this mpdu to free queue
1969 */
1970 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1971 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1972 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1973}
1974
1975static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001976 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301977{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001978 u16 seq_st = 0;
1979 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301980 int ba_index;
1981 int nbad = 0;
1982 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001983
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001984 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301985 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301986
Sujithcd3d39a2008-08-11 14:03:34 +05301987 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001988 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001989 seq_st = ts->ts_seqnum;
1990 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001991 }
1992
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001993 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301994 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1995 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1996 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001997
Sujithe8324352009-01-16 21:38:42 +05301998 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001999 }
2000
Sujithe8324352009-01-16 21:38:42 +05302001 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002002}
2003
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002004static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302005 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302006{
Sujitha22be222009-03-30 15:28:36 +05302007 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302008 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302009 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01002010 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302011 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302012
Sujith95e4acb2009-03-13 08:56:09 +05302013 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002014 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302015
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002016 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302017 WARN_ON(tx_rateindex >= hw->max_rates);
2018
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002019 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302020 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Felix Fietkaud9698472010-03-01 13:32:11 +01002021 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2022 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302023
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002024 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302025 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05302026 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002027 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01002028 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2029 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002030 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2031 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002032 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2033 tx_info->status.ampdu_len = bf->bf_nframes;
2034 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
Sujithc4288392008-11-18 09:09:30 +05302035 }
2036 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302037
Felix Fietkau545750d2009-11-23 22:21:01 +01002038 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302039 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002040 tx_info->status.rates[i].idx = -1;
2041 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302042
Felix Fietkau78c46532010-06-25 01:26:16 +02002043 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302044}
2045
Sujith059d8062009-01-16 21:38:49 +05302046static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2047{
2048 int qnum;
2049
Felix Fietkau97923b12010-06-12 00:33:55 -04002050 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2051 if (qnum == -1)
2052 return;
2053
Sujith059d8062009-01-16 21:38:49 +05302054 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002055 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
2056 ath_mac80211_start_queue(sc, qnum);
2057 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302058 }
2059 spin_unlock_bh(&txq->axq_lock);
2060}
2061
Sujithc4288392008-11-18 09:09:30 +05302062static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002063{
Sujithcbe61d82009-02-09 13:27:12 +05302064 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002065 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002066 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2067 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302068 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002069 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302070 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002071 int status;
2072
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002073 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2074 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2075 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002077 for (;;) {
2078 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002079 if (list_empty(&txq->axq_q)) {
2080 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002081 spin_unlock_bh(&txq->axq_lock);
2082 break;
2083 }
2084 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2085
2086 /*
2087 * There is a race condition that a BH gets scheduled
2088 * after sw writes TxE and before hw re-load the last
2089 * descriptor to get the newly chained one.
2090 * Software must keep the last DONE descriptor as a
2091 * holding descriptor - software does so by marking
2092 * it with the STALE flag.
2093 */
2094 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302095 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002096 bf_held = bf;
2097 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302098 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002099 break;
2100 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002101 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302102 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002103 }
2104 }
2105
2106 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302107 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002108
Felix Fietkau29bffa92010-03-29 20:14:23 -07002109 memset(&ts, 0, sizeof(ts));
2110 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002111 if (status == -EINPROGRESS) {
2112 spin_unlock_bh(&txq->axq_lock);
2113 break;
2114 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002115
2116 /*
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002117 * We now know the nullfunc frame has been ACKed so we
2118 * can disable RX.
2119 */
2120 if (bf->bf_isnullfunc &&
Felix Fietkau29bffa92010-03-29 20:14:23 -07002121 (ts.ts_status & ATH9K_TX_ACKED)) {
Senthil Balasubramanian3f7c5c12010-02-03 22:51:13 +05302122 if ((sc->ps_flags & PS_ENABLED))
2123 ath9k_enable_ps(sc);
2124 else
Sujith1b04b932010-01-08 10:36:05 +05302125 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002126 }
2127
2128 /*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002129 * Remove ath_buf's of the same transmit unit from txq,
2130 * however leave the last descriptor back as the holding
2131 * descriptor for hw.
2132 */
Sujitha119cc42009-03-30 15:28:38 +05302133 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002134 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002135 if (!list_is_singular(&lastbf->list))
2136 list_cut_position(&bf_head,
2137 &txq->axq_q, lastbf->list.prev);
2138
2139 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002140 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002141 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002142 if (bf_held)
2143 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002144 spin_unlock_bh(&txq->axq_lock);
2145
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002146 if (bf_held)
2147 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148
Sujithcd3d39a2008-08-11 14:03:34 +05302149 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002150 /*
2151 * This frame is sent out as a single frame.
2152 * Use hardware retry status for this frame.
2153 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002154 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302155 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002156 ath_tx_rc_status(bf, &ts, 0, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002157 }
Johannes Berge6a98542008-10-21 12:40:02 +02002158
Sujithcd3d39a2008-08-11 14:03:34 +05302159 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002160 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002161 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002162 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163
Sujith059d8062009-01-16 21:38:49 +05302164 ath_wake_mac80211_queue(sc, txq);
2165
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302167 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002168 ath_txq_schedule(sc, txq);
2169 spin_unlock_bh(&txq->axq_lock);
2170 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171}
2172
Sujith305fe472009-07-23 15:32:29 +05302173static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002174{
2175 struct ath_softc *sc = container_of(work, struct ath_softc,
2176 tx_complete_work.work);
2177 struct ath_txq *txq;
2178 int i;
2179 bool needreset = false;
2180
2181 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2182 if (ATH_TXQ_SETUP(sc, i)) {
2183 txq = &sc->tx.txq[i];
2184 spin_lock_bh(&txq->axq_lock);
2185 if (txq->axq_depth) {
2186 if (txq->axq_tx_inprogress) {
2187 needreset = true;
2188 spin_unlock_bh(&txq->axq_lock);
2189 break;
2190 } else {
2191 txq->axq_tx_inprogress = true;
2192 }
2193 }
2194 spin_unlock_bh(&txq->axq_lock);
2195 }
2196
2197 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002198 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2199 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302200 ath9k_ps_wakeup(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002201 ath_reset(sc, false);
Sujith332c5562009-10-09 09:51:28 +05302202 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002203 }
2204
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002205 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002206 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2207}
2208
2209
Sujithe8324352009-01-16 21:38:42 +05302210
2211void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002212{
Sujithe8324352009-01-16 21:38:42 +05302213 int i;
2214 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002215
Sujithe8324352009-01-16 21:38:42 +05302216 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002217
2218 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302219 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2220 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002221 }
2222}
2223
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002224void ath_tx_edma_tasklet(struct ath_softc *sc)
2225{
2226 struct ath_tx_status txs;
2227 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2228 struct ath_hw *ah = sc->sc_ah;
2229 struct ath_txq *txq;
2230 struct ath_buf *bf, *lastbf;
2231 struct list_head bf_head;
2232 int status;
2233 int txok;
2234
2235 for (;;) {
2236 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2237 if (status == -EINPROGRESS)
2238 break;
2239 if (status == -EIO) {
2240 ath_print(common, ATH_DBG_XMIT,
2241 "Error processing tx status\n");
2242 break;
2243 }
2244
2245 /* Skip beacon completions */
2246 if (txs.qid == sc->beacon.beaconq)
2247 continue;
2248
2249 txq = &sc->tx.txq[txs.qid];
2250
2251 spin_lock_bh(&txq->axq_lock);
2252 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2253 spin_unlock_bh(&txq->axq_lock);
2254 return;
2255 }
2256
2257 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2258 struct ath_buf, list);
2259 lastbf = bf->bf_lastbf;
2260
2261 INIT_LIST_HEAD(&bf_head);
2262 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2263 &lastbf->list);
2264 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2265 txq->axq_depth--;
2266 txq->axq_tx_inprogress = false;
2267 spin_unlock_bh(&txq->axq_lock);
2268
2269 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2270
Vasanthakumar Thiagarajande0f6482010-05-17 18:57:54 -07002271 /*
2272 * Make sure null func frame is acked before configuring
2273 * hw into ps mode.
2274 */
2275 if (bf->bf_isnullfunc && txok) {
2276 if ((sc->ps_flags & PS_ENABLED))
2277 ath9k_enable_ps(sc);
2278 else
2279 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2280 }
2281
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002282 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002283 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2284 bf->bf_state.bf_type |= BUF_XRETRY;
2285 ath_tx_rc_status(bf, &txs, 0, txok, true);
2286 }
2287
2288 if (bf_isampdu(bf))
2289 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2290 else
2291 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2292 &txs, txok, 0);
2293
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002294 ath_wake_mac80211_queue(sc, txq);
2295
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002296 spin_lock_bh(&txq->axq_lock);
2297 if (!list_empty(&txq->txq_fifo_pending)) {
2298 INIT_LIST_HEAD(&bf_head);
2299 bf = list_first_entry(&txq->txq_fifo_pending,
2300 struct ath_buf, list);
2301 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2302 &bf->bf_lastbf->list);
2303 ath_tx_txqaddbuf(sc, txq, &bf_head);
2304 } else if (sc->sc_flags & SC_OP_TXAGGR)
2305 ath_txq_schedule(sc, txq);
2306 spin_unlock_bh(&txq->axq_lock);
2307 }
2308}
2309
Sujithe8324352009-01-16 21:38:42 +05302310/*****************/
2311/* Init, Cleanup */
2312/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002313
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002314static int ath_txstatus_setup(struct ath_softc *sc, int size)
2315{
2316 struct ath_descdma *dd = &sc->txsdma;
2317 u8 txs_len = sc->sc_ah->caps.txs_len;
2318
2319 dd->dd_desc_len = size * txs_len;
2320 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2321 &dd->dd_desc_paddr, GFP_KERNEL);
2322 if (!dd->dd_desc)
2323 return -ENOMEM;
2324
2325 return 0;
2326}
2327
2328static int ath_tx_edma_init(struct ath_softc *sc)
2329{
2330 int err;
2331
2332 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2333 if (!err)
2334 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2335 sc->txsdma.dd_desc_paddr,
2336 ATH_TXSTATUS_RING_SIZE);
2337
2338 return err;
2339}
2340
2341static void ath_tx_edma_cleanup(struct ath_softc *sc)
2342{
2343 struct ath_descdma *dd = &sc->txsdma;
2344
2345 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2346 dd->dd_desc_paddr);
2347}
2348
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349int ath_tx_init(struct ath_softc *sc, int nbufs)
2350{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002351 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352 int error = 0;
2353
Sujith797fe5cb2009-03-30 15:28:45 +05302354 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002355
Sujith797fe5cb2009-03-30 15:28:45 +05302356 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002357 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302358 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002359 ath_print(common, ATH_DBG_FATAL,
2360 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302361 goto err;
2362 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002363
Sujith797fe5cb2009-03-30 15:28:45 +05302364 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002365 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302366 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002367 ath_print(common, ATH_DBG_FATAL,
2368 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302369 goto err;
2370 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002371
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002372 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2373
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002374 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2375 error = ath_tx_edma_init(sc);
2376 if (error)
2377 goto err;
2378 }
2379
Sujith797fe5cb2009-03-30 15:28:45 +05302380err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002381 if (error != 0)
2382 ath_tx_cleanup(sc);
2383
2384 return error;
2385}
2386
Sujith797fe5cb2009-03-30 15:28:45 +05302387void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002388{
Sujithb77f4832008-12-07 21:44:03 +05302389 if (sc->beacon.bdma.dd_desc_len != 0)
2390 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391
Sujithb77f4832008-12-07 21:44:03 +05302392 if (sc->tx.txdma.dd_desc_len != 0)
2393 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002394
2395 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2396 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397}
2398
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002399void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2400{
Sujithc5170162008-10-29 10:13:59 +05302401 struct ath_atx_tid *tid;
2402 struct ath_atx_ac *ac;
2403 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002404
Sujith8ee5afb2008-12-07 21:43:36 +05302405 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302406 tidno < WME_NUM_TID;
2407 tidno++, tid++) {
2408 tid->an = an;
2409 tid->tidno = tidno;
2410 tid->seq_start = tid->seq_next = 0;
2411 tid->baw_size = WME_MAX_BA;
2412 tid->baw_head = tid->baw_tail = 0;
2413 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302414 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302415 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302416 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302417 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302418 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302419 tid->state &= ~AGGR_ADDBA_COMPLETE;
2420 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302421 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002422
Sujith8ee5afb2008-12-07 21:43:36 +05302423 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302424 acno < WME_NUM_AC; acno++, ac++) {
2425 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002426 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302427 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428 }
2429}
2430
Sujithb5aa9bf2008-10-29 10:13:31 +05302431void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002432{
2433 int i;
2434 struct ath_atx_ac *ac, *ac_tmp;
2435 struct ath_atx_tid *tid, *tid_tmp;
2436 struct ath_txq *txq;
Sujithe8324352009-01-16 21:38:42 +05302437
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002438 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2439 if (ATH_TXQ_SETUP(sc, i)) {
Sujithb77f4832008-12-07 21:44:03 +05302440 txq = &sc->tx.txq[i];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002441
Ming Leia9f042c2010-02-28 00:56:24 +08002442 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002443
2444 list_for_each_entry_safe(ac,
2445 ac_tmp, &txq->axq_acq, list) {
2446 tid = list_first_entry(&ac->tid_q,
2447 struct ath_atx_tid, list);
2448 if (tid && tid->an != an)
2449 continue;
2450 list_del(&ac->list);
2451 ac->sched = false;
2452
2453 list_for_each_entry_safe(tid,
2454 tid_tmp, &ac->tid_q, list) {
2455 list_del(&tid->list);
2456 tid->sched = false;
Sujithb5aa9bf2008-10-29 10:13:31 +05302457 ath_tid_drain(sc, txq, tid);
Sujitha37c2c72008-10-29 10:15:40 +05302458 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujitha37c2c72008-10-29 10:15:40 +05302459 tid->state &= ~AGGR_CLEANUP;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002460 }
2461 }
2462
Ming Leia9f042c2010-02-28 00:56:24 +08002463 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002464 }
2465 }
2466}