blob: 05ec36ac55f578847c87b3b25cde543f943cbd38 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
123static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
126
127 spin_lock_bh(&txq->axq_lock);
128 tid->paused++;
129 spin_unlock_bh(&txq->axq_lock);
130}
131
132static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
133{
134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
135
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700136 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530137 spin_lock_bh(&txq->axq_lock);
138
139 tid->paused--;
140
141 if (tid->paused > 0)
142 goto unlock;
143
144 if (list_empty(&tid->buf_q))
145 goto unlock;
146
147 ath_tx_queue_tid(txq, tid);
148 ath_txq_schedule(sc, txq);
149unlock:
150 spin_unlock_bh(&txq->axq_lock);
151}
152
153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
156 struct ath_buf *bf;
157 struct list_head bf_head;
158 INIT_LIST_HEAD(&bf_head);
159
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700160 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530161 spin_lock_bh(&txq->axq_lock);
162
163 tid->paused--;
164
165 if (tid->paused > 0) {
166 spin_unlock_bh(&txq->axq_lock);
167 return;
168 }
169
170 while (!list_empty(&tid->buf_q)) {
171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700172 BUG_ON(bf_isretried(bf));
Sujithd43f30152009-01-16 21:38:53 +0530173 list_move_tail(&bf->list, &bf_head);
Sujithc37452b2009-03-09 09:31:57 +0530174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
188 tid->tx_buf[cindex] = NULL;
189
190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 struct ath_buf *bf)
198{
199 int index, cindex;
200
201 if (bf_isretried(bf))
202 return;
203
204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
206
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700207 BUG_ON(tid->tx_buf[cindex] != NULL);
Sujithe8324352009-01-16 21:38:42 +0530208 tid->tx_buf[cindex] = bf;
209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
227 struct ath_buf *bf;
228 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530232 INIT_LIST_HEAD(&bf_head);
233
234 for (;;) {
235 if (list_empty(&tid->buf_q))
236 break;
Sujithe8324352009-01-16 21:38:42 +0530237
Sujithd43f30152009-01-16 21:38:53 +0530238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
239 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530240
241 if (bf_isretried(bf))
242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
243
244 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530246 spin_lock(&txq->axq_lock);
247 }
248
249 tid->seq_next = tid->seq_start;
250 tid->baw_tail = tid->baw_head;
251}
252
Sujithfec247c2009-07-27 12:08:16 +0530253static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
254 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530255{
256 struct sk_buff *skb;
257 struct ieee80211_hdr *hdr;
258
259 bf->bf_state.bf_type |= BUF_RETRY;
260 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530261 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530262
263 skb = bf->bf_mpdu;
264 hdr = (struct ieee80211_hdr *)skb->data;
265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
266}
267
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
269{
270 struct ath_buf *bf = NULL;
271
272 spin_lock_bh(&sc->tx.txbuflock);
273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
275 spin_unlock_bh(&sc->tx.txbuflock);
276 return NULL;
277 }
278
279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
282 spin_unlock_bh(&sc->tx.txbuflock);
283
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
Sujithd43f30152009-01-16 21:38:53 +0530294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530300 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530301
302 ATH_TXBUF_RESET(tbf);
303
Felix Fietkau827e69b2009-11-15 23:09:25 +0100304 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530305 tbf->bf_mpdu = bf->bf_mpdu;
306 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530308 tbf->bf_state = bf->bf_state;
309 tbf->bf_dmacontext = bf->bf_dmacontext;
310
311 return tbf;
312}
313
314static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
315 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700316 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530317{
318 struct ath_node *an = NULL;
319 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530320 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800321 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530322 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800323 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530324 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530326 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530328 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200331 struct ieee80211_tx_rate rates[4];
Felix Fietkau73e19462010-07-07 19:42:09 +0200332 unsigned long flags;
Sujithe8324352009-01-16 21:38:42 +0530333
Sujitha22be222009-03-30 15:28:36 +0530334 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530335 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530336
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800337 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100338 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800339
Felix Fietkau78c46532010-06-25 01:26:16 +0200340 memcpy(rates, tx_info->control.rates, sizeof(rates));
341
Sujith1286ec62009-01-27 13:30:37 +0530342 rcu_read_lock();
343
Johannes Berg5ed176e2009-11-04 14:42:28 +0100344 /* XXX: use ieee80211_find_sta! */
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800345 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
Sujith1286ec62009-01-27 13:30:37 +0530346 if (!sta) {
347 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200348
349 spin_lock_irqsave(&sc->tx.txbuflock, flags);
350 list_splice_tail_init(bf_q, &sc->tx.txbuf);
351 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
Sujith1286ec62009-01-27 13:30:37 +0530352 return;
Sujithe8324352009-01-16 21:38:42 +0530353 }
354
Sujith1286ec62009-01-27 13:30:37 +0530355 an = (struct ath_node *)sta->drv_priv;
356 tid = ATH_AN_2_TID(an, bf->bf_tidno);
357
Sujithe8324352009-01-16 21:38:42 +0530358 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530359 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530360
Sujithd43f30152009-01-16 21:38:53 +0530361 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700362 if (ts->ts_flags & ATH9K_TX_BA) {
363 seq_st = ts->ts_seqnum;
364 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530365 } else {
Sujithd43f30152009-01-16 21:38:53 +0530366 /*
367 * AR5416 can become deaf/mute when BA
368 * issue happens. Chip needs to be reset.
369 * But AP code may have sychronization issues
370 * when perform internal reset in this routine.
371 * Only enable reset in STA mode for now.
372 */
Sujith2660b812009-02-09 13:27:26 +0530373 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530374 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530375 }
376 }
377
378 INIT_LIST_HEAD(&bf_pending);
379 INIT_LIST_HEAD(&bf_head);
380
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700381 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530382 while (bf) {
383 txfail = txpending = 0;
384 bf_next = bf->bf_next;
385
Felix Fietkau78c46532010-06-25 01:26:16 +0200386 skb = bf->bf_mpdu;
387 tx_info = IEEE80211_SKB_CB(skb);
388
Sujithe8324352009-01-16 21:38:42 +0530389 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
390 /* transmit completion, subframe is
391 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530392 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530393 } else if (!isaggr && txok) {
394 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530395 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530396 } else {
Sujithe8324352009-01-16 21:38:42 +0530397 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400398 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530399 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530400 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530401 txpending = 1;
402 } else {
403 bf->bf_state.bf_type |= BUF_XRETRY;
404 txfail = 1;
405 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530406 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530407 }
408 } else {
409 /*
410 * cleanup in progress, just fail
411 * the un-acked sub-frames
412 */
413 txfail = 1;
414 }
415 }
416
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400417 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
418 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530419 /*
420 * Make sure the last desc is reclaimed if it
421 * not a holding desc.
422 */
423 if (!bf_last->bf_stale)
424 list_move_tail(&bf->list, &bf_head);
425 else
426 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530427 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700428 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530429 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530430 }
431
432 if (!txpending) {
433 /*
434 * complete the acked-ones/xretried ones; update
435 * block-ack window
436 */
437 spin_lock_bh(&txq->axq_lock);
438 ath_tx_update_baw(sc, tid, bf->bf_seqno);
439 spin_unlock_bh(&txq->axq_lock);
440
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530441 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200442 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700443 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530444 rc_update = false;
445 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700446 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530447 }
448
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700449 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
450 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530451 } else {
Sujithd43f30152009-01-16 21:38:53 +0530452 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400453 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
454 if (bf->bf_next == NULL && bf_last->bf_stale) {
455 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530456
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400457 tbf = ath_clone_txbuf(sc, bf_last);
458 /*
459 * Update tx baw and complete the
460 * frame with failed status if we
461 * run out of tx buf.
462 */
463 if (!tbf) {
464 spin_lock_bh(&txq->axq_lock);
465 ath_tx_update_baw(sc, tid,
466 bf->bf_seqno);
467 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400468
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400469 bf->bf_state.bf_type |=
470 BUF_XRETRY;
471 ath_tx_rc_status(bf, ts, nbad,
472 0, false);
473 ath_tx_complete_buf(sc, bf, txq,
474 &bf_head,
475 ts, 0, 0);
476 break;
477 }
478
479 ath9k_hw_cleartxdesc(sc->sc_ah,
480 tbf->bf_desc);
481 list_add_tail(&tbf->list, &bf_head);
482 } else {
483 /*
484 * Clear descriptor status words for
485 * software retry
486 */
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400489 }
Sujithe8324352009-01-16 21:38:42 +0530490 }
491
492 /*
493 * Put this buffer to the temporary pending
494 * queue to retain ordering
495 */
496 list_splice_tail_init(&bf_head, &bf_pending);
497 }
498
499 bf = bf_next;
500 }
501
502 if (tid->state & AGGR_CLEANUP) {
Sujithe8324352009-01-16 21:38:42 +0530503 if (tid->baw_head == tid->baw_tail) {
504 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530505 tid->state &= ~AGGR_CLEANUP;
506
507 /* send buffered frames as singles */
508 ath_tx_flush_tid(sc, tid);
Sujithd43f30152009-01-16 21:38:53 +0530509 }
Sujith1286ec62009-01-27 13:30:37 +0530510 rcu_read_unlock();
Sujithe8324352009-01-16 21:38:42 +0530511 return;
512 }
513
Sujithd43f30152009-01-16 21:38:53 +0530514 /* prepend un-acked frames to the beginning of the pending frame queue */
Sujithe8324352009-01-16 21:38:42 +0530515 if (!list_empty(&bf_pending)) {
516 spin_lock_bh(&txq->axq_lock);
517 list_splice(&bf_pending, &tid->buf_q);
518 ath_tx_queue_tid(txq, tid);
519 spin_unlock_bh(&txq->axq_lock);
520 }
521
Sujith1286ec62009-01-27 13:30:37 +0530522 rcu_read_unlock();
523
Sujithe8324352009-01-16 21:38:42 +0530524 if (needreset)
525 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530526}
527
528static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
529 struct ath_atx_tid *tid)
530{
Sujithe8324352009-01-16 21:38:42 +0530531 struct sk_buff *skb;
532 struct ieee80211_tx_info *tx_info;
533 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530534 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530535 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530536 int i;
537
Sujitha22be222009-03-30 15:28:36 +0530538 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530539 tx_info = IEEE80211_SKB_CB(skb);
540 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530541
542 /*
543 * Find the lowest frame length among the rate series that will have a
544 * 4ms transmit duration.
545 * TODO - TXOP limit needs to be considered.
546 */
547 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
548
549 for (i = 0; i < 4; i++) {
550 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100551 int modeidx;
552 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530553 legacy = 1;
554 break;
555 }
556
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200557 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100558 modeidx = MCS_HT40;
559 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200560 modeidx = MCS_HT20;
561
562 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
563 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100564
565 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530566 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530567 }
568 }
569
570 /*
571 * limit aggregate size by the minimum rate if rate selected is
572 * not a probe rate, if rate selected is a probe rate then
573 * avoid aggregation of this packet.
574 */
575 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
576 return 0;
577
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530578 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
579 aggr_limit = min((max_4ms_framelen * 3) / 8,
580 (u32)ATH_AMPDU_LIMIT_MAX);
581 else
582 aggr_limit = min(max_4ms_framelen,
583 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530584
585 /*
586 * h/w can accept aggregates upto 16 bit lengths (65535).
587 * The IE, however can hold upto 65536, which shows up here
588 * as zero. Ignore 65536 since we are constrained by hw.
589 */
Sujith4ef70842009-07-23 15:32:41 +0530590 if (tid->an->maxampdu)
591 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530592
593 return aggr_limit;
594}
595
596/*
Sujithd43f30152009-01-16 21:38:53 +0530597 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530598 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530599 */
600static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
601 struct ath_buf *bf, u16 frmlen)
602{
Sujithe8324352009-01-16 21:38:42 +0530603 struct sk_buff *skb = bf->bf_mpdu;
604 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530605 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530606 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100607 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200608 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530609
610 /* Select standard number of delimiters based on frame length alone */
611 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
612
613 /*
614 * If encryption enabled, hardware requires some more padding between
615 * subframes.
616 * TODO - this could be improved to be dependent on the rate.
617 * The hardware can keep up at lower rates, but not higher rates
618 */
619 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
620 ndelim += ATH_AGGR_ENCRYPTDELIM;
621
622 /*
623 * Convert desired mpdu density from microeconds to bytes based
624 * on highest rate in rate series (i.e. first rate) to determine
625 * required minimum length for subframe. Take into account
626 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530627 *
Sujithe8324352009-01-16 21:38:42 +0530628 * If there is no mpdu density restriction, no further calculation
629 * is needed.
630 */
Sujith4ef70842009-07-23 15:32:41 +0530631
632 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530633 return ndelim;
634
635 rix = tx_info->control.rates[0].idx;
636 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530637 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
638 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
639
640 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530641 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530642 else
Sujith4ef70842009-07-23 15:32:41 +0530643 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530644
645 if (nsymbols == 0)
646 nsymbols = 1;
647
Felix Fietkauc6663872010-04-19 19:57:33 +0200648 streams = HT_RC_2_STREAMS(rix);
649 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530650 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
651
Sujithe8324352009-01-16 21:38:42 +0530652 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530653 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
654 ndelim = max(mindelim, ndelim);
655 }
656
657 return ndelim;
658}
659
660static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530661 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530662 struct ath_atx_tid *tid,
663 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530664{
665#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530666 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
667 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530668 u16 aggr_limit = 0, al = 0, bpad = 0,
669 al_delta, h_baw = tid->baw_size / 2;
670 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujithe8324352009-01-16 21:38:42 +0530671
672 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
673
674 do {
675 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
676
Sujithd43f30152009-01-16 21:38:53 +0530677 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530678 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
679 status = ATH_AGGR_BAW_CLOSED;
680 break;
681 }
682
683 if (!rl) {
684 aggr_limit = ath_lookup_rate(sc, bf, tid);
685 rl = 1;
686 }
687
Sujithd43f30152009-01-16 21:38:53 +0530688 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530689 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
690
Sujithd43f30152009-01-16 21:38:53 +0530691 if (nframes &&
692 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530693 status = ATH_AGGR_LIMITED;
694 break;
695 }
696
Sujithd43f30152009-01-16 21:38:53 +0530697 /* do not exceed subframe limit */
698 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530699 status = ATH_AGGR_LIMITED;
700 break;
701 }
Sujithd43f30152009-01-16 21:38:53 +0530702 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530703
Sujithd43f30152009-01-16 21:38:53 +0530704 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530705 al += bpad + al_delta;
706
707 /*
708 * Get the delimiters needed to meet the MPDU
709 * density for this node.
710 */
711 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530712 bpad = PADBYTES(al_delta) + (ndelim << 2);
713
714 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400715 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530716
Sujithd43f30152009-01-16 21:38:53 +0530717 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530718 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530719 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
720 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530721 if (bf_prev) {
722 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400723 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
724 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530725 }
726 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530727
Sujithe8324352009-01-16 21:38:42 +0530728 } while (!list_empty(&tid->buf_q));
729
730 bf_first->bf_al = al;
731 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530732
Sujithe8324352009-01-16 21:38:42 +0530733 return status;
734#undef PADBYTES
735}
736
737static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
738 struct ath_atx_tid *tid)
739{
Sujithd43f30152009-01-16 21:38:53 +0530740 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530741 enum ATH_AGGR_STATUS status;
742 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530743
744 do {
745 if (list_empty(&tid->buf_q))
746 return;
747
748 INIT_LIST_HEAD(&bf_q);
749
Sujithfec247c2009-07-27 12:08:16 +0530750 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530751
752 /*
Sujithd43f30152009-01-16 21:38:53 +0530753 * no frames picked up to be aggregated;
754 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530755 */
756 if (list_empty(&bf_q))
757 break;
758
759 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530760 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530761
Sujithd43f30152009-01-16 21:38:53 +0530762 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530763 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530764 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530765 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530766 ath_buf_set_rate(sc, bf);
767 ath_tx_txqaddbuf(sc, txq, &bf_q);
768 continue;
769 }
770
Sujithd43f30152009-01-16 21:38:53 +0530771 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530772 bf->bf_state.bf_type |= BUF_AGGR;
773 ath_buf_set_rate(sc, bf);
774 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
775
Sujithd43f30152009-01-16 21:38:53 +0530776 /* anchor last desc of aggregate */
777 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530778
Sujithe8324352009-01-16 21:38:42 +0530779 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530780 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530781
782 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
783 status != ATH_AGGR_BAW_CLOSED);
784}
785
Sujithf83da962009-07-23 15:32:37 +0530786void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
787 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530788{
789 struct ath_atx_tid *txtid;
790 struct ath_node *an;
791
792 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530793 txtid = ATH_AN_2_TID(an, tid);
794 txtid->state |= AGGR_ADDBA_PROGRESS;
795 ath_tx_pause_tid(sc, txtid);
796 *ssn = txtid->seq_start;
Sujithe8324352009-01-16 21:38:42 +0530797}
798
Sujithf83da962009-07-23 15:32:37 +0530799void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530800{
801 struct ath_node *an = (struct ath_node *)sta->drv_priv;
802 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
803 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700804 struct ath_tx_status ts;
Sujithe8324352009-01-16 21:38:42 +0530805 struct ath_buf *bf;
806 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700807
808 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530809 INIT_LIST_HEAD(&bf_head);
810
811 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530812 return;
Sujithe8324352009-01-16 21:38:42 +0530813
814 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530815 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530816 return;
Sujithe8324352009-01-16 21:38:42 +0530817 }
818
819 ath_tx_pause_tid(sc, txtid);
820
821 /* drop all software retried frames and mark this TID */
822 spin_lock_bh(&txq->axq_lock);
823 while (!list_empty(&txtid->buf_q)) {
824 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
825 if (!bf_isretried(bf)) {
826 /*
827 * NB: it's based on the assumption that
828 * software retried frame will always stay
829 * at the head of software queue.
830 */
831 break;
832 }
Sujithd43f30152009-01-16 21:38:53 +0530833 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530834 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700835 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530836 }
Sujithd43f30152009-01-16 21:38:53 +0530837 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530838
839 if (txtid->baw_head != txtid->baw_tail) {
Sujithe8324352009-01-16 21:38:42 +0530840 txtid->state |= AGGR_CLEANUP;
841 } else {
842 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530843 ath_tx_flush_tid(sc, txtid);
844 }
Sujithe8324352009-01-16 21:38:42 +0530845}
846
847void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
848{
849 struct ath_atx_tid *txtid;
850 struct ath_node *an;
851
852 an = (struct ath_node *)sta->drv_priv;
853
854 if (sc->sc_flags & SC_OP_TXAGGR) {
855 txtid = ATH_AN_2_TID(an, tid);
856 txtid->baw_size =
857 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
858 txtid->state |= AGGR_ADDBA_COMPLETE;
859 txtid->state &= ~AGGR_ADDBA_PROGRESS;
860 ath_tx_resume_tid(sc, txtid);
861 }
862}
863
864bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
865{
866 struct ath_atx_tid *txtid;
867
868 if (!(sc->sc_flags & SC_OP_TXAGGR))
869 return false;
870
871 txtid = ATH_AN_2_TID(an, tidno);
872
Vasanthakumar Thiagarajanc3d8f022009-06-10 17:50:08 +0530873 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
Sujithe8324352009-01-16 21:38:42 +0530874 return true;
Sujithe8324352009-01-16 21:38:42 +0530875 return false;
876}
877
878/********************/
879/* Queue Management */
880/********************/
881
Sujithe8324352009-01-16 21:38:42 +0530882static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
883 struct ath_txq *txq)
884{
885 struct ath_atx_ac *ac, *ac_tmp;
886 struct ath_atx_tid *tid, *tid_tmp;
887
888 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
889 list_del(&ac->list);
890 ac->sched = false;
891 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
892 list_del(&tid->list);
893 tid->sched = false;
894 ath_tid_drain(sc, txq, tid);
895 }
896 }
897}
898
899struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
900{
Sujithcbe61d82009-02-09 13:27:12 +0530901 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700902 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530903 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400904 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530905
906 memset(&qi, 0, sizeof(qi));
907 qi.tqi_subtype = subtype;
908 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
909 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
910 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
911 qi.tqi_physCompBuf = 0;
912
913 /*
914 * Enable interrupts only for EOL and DESC conditions.
915 * We mark tx descriptors to receive a DESC interrupt
916 * when a tx queue gets deep; otherwise waiting for the
917 * EOL to reap descriptors. Note that this is done to
918 * reduce interrupt load and this only defers reaping
919 * descriptors, never transmitting frames. Aside from
920 * reducing interrupts this also permits more concurrency.
921 * The only potential downside is if the tx queue backs
922 * up in which case the top half of the kernel may backup
923 * due to a lack of tx descriptors.
924 *
925 * The UAPSD queue is an exception, since we take a desc-
926 * based intr on the EOSP frames.
927 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400928 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
929 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
930 TXQ_FLAG_TXERRINT_ENABLE;
931 } else {
932 if (qtype == ATH9K_TX_QUEUE_UAPSD)
933 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
934 else
935 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
936 TXQ_FLAG_TXDESCINT_ENABLE;
937 }
Sujithe8324352009-01-16 21:38:42 +0530938 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
939 if (qnum == -1) {
940 /*
941 * NB: don't print a message, this happens
942 * normally on parts with too few tx queues
943 */
944 return NULL;
945 }
946 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700947 ath_print(common, ATH_DBG_FATAL,
948 "qnum %u out of range, max %u!\n",
949 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530950 ath9k_hw_releasetxqueue(ah, qnum);
951 return NULL;
952 }
953 if (!ATH_TXQ_SETUP(sc, qnum)) {
954 struct ath_txq *txq = &sc->tx.txq[qnum];
955
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400956 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530957 txq->axq_qnum = qnum;
958 txq->axq_link = NULL;
959 INIT_LIST_HEAD(&txq->axq_q);
960 INIT_LIST_HEAD(&txq->axq_acq);
961 spin_lock_init(&txq->axq_lock);
962 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400963 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530964 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400965
966 txq->txq_headidx = txq->txq_tailidx = 0;
967 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
968 INIT_LIST_HEAD(&txq->txq_fifo[i]);
969 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530970 }
971 return &sc->tx.txq[qnum];
972}
973
Sujithe8324352009-01-16 21:38:42 +0530974int ath_txq_update(struct ath_softc *sc, int qnum,
975 struct ath9k_tx_queue_info *qinfo)
976{
Sujithcbe61d82009-02-09 13:27:12 +0530977 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530978 int error = 0;
979 struct ath9k_tx_queue_info qi;
980
981 if (qnum == sc->beacon.beaconq) {
982 /*
983 * XXX: for beacon queue, we just save the parameter.
984 * It will be picked up by ath_beaconq_config when
985 * it's necessary.
986 */
987 sc->beacon.beacon_qi = *qinfo;
988 return 0;
989 }
990
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700991 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530992
993 ath9k_hw_get_txq_props(ah, qnum, &qi);
994 qi.tqi_aifs = qinfo->tqi_aifs;
995 qi.tqi_cwmin = qinfo->tqi_cwmin;
996 qi.tqi_cwmax = qinfo->tqi_cwmax;
997 qi.tqi_burstTime = qinfo->tqi_burstTime;
998 qi.tqi_readyTime = qinfo->tqi_readyTime;
999
1000 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001001 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1002 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301003 error = -EIO;
1004 } else {
1005 ath9k_hw_resettxqueue(ah, qnum);
1006 }
1007
1008 return error;
1009}
1010
1011int ath_cabq_update(struct ath_softc *sc)
1012{
1013 struct ath9k_tx_queue_info qi;
1014 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301015
1016 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1017 /*
1018 * Ensure the readytime % is within the bounds.
1019 */
Sujith17d79042009-02-09 13:27:03 +05301020 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1021 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1022 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1023 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301024
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001025 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301026 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301027 ath_txq_update(sc, qnum, &qi);
1028
1029 return 0;
1030}
1031
Sujith043a0402009-01-16 21:38:47 +05301032/*
1033 * Drain a given TX queue (could be Beacon or Data)
1034 *
1035 * This assumes output has been stopped and
1036 * we do not need to block ath_tx_tasklet.
1037 */
1038void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301039{
1040 struct ath_buf *bf, *lastbf;
1041 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001042 struct ath_tx_status ts;
1043
1044 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301045 INIT_LIST_HEAD(&bf_head);
1046
Sujithe8324352009-01-16 21:38:42 +05301047 for (;;) {
1048 spin_lock_bh(&txq->axq_lock);
1049
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001050 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1051 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1052 txq->txq_headidx = txq->txq_tailidx = 0;
1053 spin_unlock_bh(&txq->axq_lock);
1054 break;
1055 } else {
1056 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1057 struct ath_buf, list);
1058 }
1059 } else {
1060 if (list_empty(&txq->axq_q)) {
1061 txq->axq_link = NULL;
1062 spin_unlock_bh(&txq->axq_lock);
1063 break;
1064 }
1065 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1066 list);
Sujithe8324352009-01-16 21:38:42 +05301067
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001068 if (bf->bf_stale) {
1069 list_del(&bf->list);
1070 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301071
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001072 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001073 continue;
1074 }
Sujithe8324352009-01-16 21:38:42 +05301075 }
1076
1077 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001078 if (!retry_tx)
1079 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301080
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001081 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1082 list_cut_position(&bf_head,
1083 &txq->txq_fifo[txq->txq_tailidx],
1084 &lastbf->list);
1085 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1086 } else {
1087 /* remove ath_buf's of the same mpdu from txq */
1088 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1089 }
1090
Sujithe8324352009-01-16 21:38:42 +05301091 txq->axq_depth--;
1092
1093 spin_unlock_bh(&txq->axq_lock);
1094
1095 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001096 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301097 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001098 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301099 }
1100
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001101 spin_lock_bh(&txq->axq_lock);
1102 txq->axq_tx_inprogress = false;
1103 spin_unlock_bh(&txq->axq_lock);
1104
Sujithe8324352009-01-16 21:38:42 +05301105 /* flush any pending frames if aggregation is enabled */
1106 if (sc->sc_flags & SC_OP_TXAGGR) {
1107 if (!retry_tx) {
1108 spin_lock_bh(&txq->axq_lock);
1109 ath_txq_drain_pending_buffers(sc, txq);
1110 spin_unlock_bh(&txq->axq_lock);
1111 }
1112 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001113
1114 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1115 spin_lock_bh(&txq->axq_lock);
1116 while (!list_empty(&txq->txq_fifo_pending)) {
1117 bf = list_first_entry(&txq->txq_fifo_pending,
1118 struct ath_buf, list);
1119 list_cut_position(&bf_head,
1120 &txq->txq_fifo_pending,
1121 &bf->bf_lastbf->list);
1122 spin_unlock_bh(&txq->axq_lock);
1123
1124 if (bf_isampdu(bf))
1125 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1126 &ts, 0);
1127 else
1128 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1129 &ts, 0, 0);
1130 spin_lock_bh(&txq->axq_lock);
1131 }
1132 spin_unlock_bh(&txq->axq_lock);
1133 }
Sujithe8324352009-01-16 21:38:42 +05301134}
1135
Sujith043a0402009-01-16 21:38:47 +05301136void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1137{
Sujithcbe61d82009-02-09 13:27:12 +05301138 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001139 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301140 struct ath_txq *txq;
1141 int i, npend = 0;
1142
1143 if (sc->sc_flags & SC_OP_INVALID)
1144 return;
1145
1146 /* Stop beacon queue */
1147 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1148
1149 /* Stop data queues */
1150 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1151 if (ATH_TXQ_SETUP(sc, i)) {
1152 txq = &sc->tx.txq[i];
1153 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1154 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1155 }
1156 }
1157
1158 if (npend) {
1159 int r;
1160
Sujithe8009e92009-12-14 14:57:08 +05301161 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001162 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301163
1164 spin_lock_bh(&sc->sc_resetlock);
Sujithe8009e92009-12-14 14:57:08 +05301165 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
Sujith043a0402009-01-16 21:38:47 +05301166 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001167 ath_print(common, ATH_DBG_FATAL,
1168 "Unable to reset hardware; reset status %d\n",
1169 r);
Sujith043a0402009-01-16 21:38:47 +05301170 spin_unlock_bh(&sc->sc_resetlock);
1171 }
1172
1173 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1174 if (ATH_TXQ_SETUP(sc, i))
1175 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1176 }
1177}
1178
Sujithe8324352009-01-16 21:38:42 +05301179void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1180{
1181 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1182 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1183}
1184
Sujithe8324352009-01-16 21:38:42 +05301185void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1186{
1187 struct ath_atx_ac *ac;
1188 struct ath_atx_tid *tid;
1189
1190 if (list_empty(&txq->axq_acq))
1191 return;
1192
1193 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1194 list_del(&ac->list);
1195 ac->sched = false;
1196
1197 do {
1198 if (list_empty(&ac->tid_q))
1199 return;
1200
1201 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1202 list_del(&tid->list);
1203 tid->sched = false;
1204
1205 if (tid->paused)
1206 continue;
1207
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001208 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301209
1210 /*
1211 * add tid to round-robin queue if more frames
1212 * are pending for the tid
1213 */
1214 if (!list_empty(&tid->buf_q))
1215 ath_tx_queue_tid(txq, tid);
1216
1217 break;
1218 } while (!list_empty(&ac->tid_q));
1219
1220 if (!list_empty(&ac->tid_q)) {
1221 if (!ac->sched) {
1222 ac->sched = true;
1223 list_add_tail(&ac->list, &txq->axq_acq);
1224 }
1225 }
1226}
1227
1228int ath_tx_setup(struct ath_softc *sc, int haltype)
1229{
1230 struct ath_txq *txq;
1231
1232 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001233 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1234 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301235 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1236 return 0;
1237 }
1238 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1239 if (txq != NULL) {
1240 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1241 return 1;
1242 } else
1243 return 0;
1244}
1245
1246/***********/
1247/* TX, DMA */
1248/***********/
1249
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001250/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001251 * Insert a chain of ath_buf (descriptors) on a txq and
1252 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001253 */
Sujith102e0572008-10-29 10:15:16 +05301254static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1255 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001256{
Sujithcbe61d82009-02-09 13:27:12 +05301257 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001258 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001259 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301260
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001261 /*
1262 * Insert the frame on the outbound list and
1263 * pass it on to the hardware.
1264 */
1265
1266 if (list_empty(head))
1267 return;
1268
1269 bf = list_first_entry(head, struct ath_buf, list);
1270
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001271 ath_print(common, ATH_DBG_QUEUE,
1272 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001273
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001274 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1275 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1276 list_splice_tail_init(head, &txq->txq_fifo_pending);
1277 return;
1278 }
1279 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1280 ath_print(common, ATH_DBG_XMIT,
1281 "Initializing tx fifo %d which "
1282 "is non-empty\n",
1283 txq->txq_headidx);
1284 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1285 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1286 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001287 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001288 ath_print(common, ATH_DBG_XMIT,
1289 "TXDP[%u] = %llx (%p)\n",
1290 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001291 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001292 list_splice_tail_init(head, &txq->axq_q);
1293
1294 if (txq->axq_link == NULL) {
1295 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1296 ath_print(common, ATH_DBG_XMIT,
1297 "TXDP[%u] = %llx (%p)\n",
1298 txq->axq_qnum, ito64(bf->bf_daddr),
1299 bf->bf_desc);
1300 } else {
1301 *txq->axq_link = bf->bf_daddr;
1302 ath_print(common, ATH_DBG_XMIT,
1303 "link[%u] (%p)=%llx (%p)\n",
1304 txq->axq_qnum, txq->axq_link,
1305 ito64(bf->bf_daddr), bf->bf_desc);
1306 }
1307 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1308 &txq->axq_link);
1309 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001310 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001311 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001312}
1313
Sujithe8324352009-01-16 21:38:42 +05301314static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1315 struct list_head *bf_head,
1316 struct ath_tx_control *txctl)
1317{
1318 struct ath_buf *bf;
1319
Sujithe8324352009-01-16 21:38:42 +05301320 bf = list_first_entry(bf_head, struct ath_buf, list);
1321 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301322 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301323
1324 /*
1325 * Do not queue to h/w when any of the following conditions is true:
1326 * - there are pending frames in software queue
1327 * - the TID is currently paused for ADDBA/BAR request
1328 * - seqno is not within block-ack window
1329 * - h/w queue depth exceeds low water mark
1330 */
1331 if (!list_empty(&tid->buf_q) || tid->paused ||
1332 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1333 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001334 /*
Sujithe8324352009-01-16 21:38:42 +05301335 * Add this frame to software queue for scheduling later
1336 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001337 */
Sujithd43f30152009-01-16 21:38:53 +05301338 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301339 ath_tx_queue_tid(txctl->txq, tid);
1340 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001341 }
1342
Sujithe8324352009-01-16 21:38:42 +05301343 /* Add sub-frame to BAW */
1344 ath_tx_addto_baw(sc, tid, bf);
1345
1346 /* Queue to h/w without aggregation */
1347 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301348 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301349 ath_buf_set_rate(sc, bf);
1350 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301351}
1352
Sujithc37452b2009-03-09 09:31:57 +05301353static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1354 struct ath_atx_tid *tid,
1355 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001356{
Sujithe8324352009-01-16 21:38:42 +05301357 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001358
Sujithe8324352009-01-16 21:38:42 +05301359 bf = list_first_entry(bf_head, struct ath_buf, list);
1360 bf->bf_state.bf_type &= ~BUF_AMPDU;
1361
1362 /* update starting sequence number for subsequent ADDBA request */
1363 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1364
1365 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301366 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301367 ath_buf_set_rate(sc, bf);
1368 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301369 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001370}
1371
Sujithc37452b2009-03-09 09:31:57 +05301372static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1373 struct list_head *bf_head)
1374{
1375 struct ath_buf *bf;
1376
1377 bf = list_first_entry(bf_head, struct ath_buf, list);
1378
1379 bf->bf_lastbf = bf;
1380 bf->bf_nframes = 1;
1381 ath_buf_set_rate(sc, bf);
1382 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301383 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301384}
1385
Sujith528f0c62008-10-29 10:14:26 +05301386static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001387{
Sujith528f0c62008-10-29 10:14:26 +05301388 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001389 enum ath9k_pkt_type htype;
1390 __le16 fc;
1391
Sujith528f0c62008-10-29 10:14:26 +05301392 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001393 fc = hdr->frame_control;
1394
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001395 if (ieee80211_is_beacon(fc))
1396 htype = ATH9K_PKT_TYPE_BEACON;
1397 else if (ieee80211_is_probe_resp(fc))
1398 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1399 else if (ieee80211_is_atim(fc))
1400 htype = ATH9K_PKT_TYPE_ATIM;
1401 else if (ieee80211_is_pspoll(fc))
1402 htype = ATH9K_PKT_TYPE_PSPOLL;
1403 else
1404 htype = ATH9K_PKT_TYPE_NORMAL;
1405
1406 return htype;
1407}
1408
Sujith528f0c62008-10-29 10:14:26 +05301409static int get_hw_crypto_keytype(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001410{
Sujith528f0c62008-10-29 10:14:26 +05301411 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1412
1413 if (tx_info->control.hw_key) {
1414 if (tx_info->control.hw_key->alg == ALG_WEP)
1415 return ATH9K_KEY_TYPE_WEP;
1416 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1417 return ATH9K_KEY_TYPE_TKIP;
1418 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1419 return ATH9K_KEY_TYPE_AES;
1420 }
1421
1422 return ATH9K_KEY_TYPE_CLEAR;
1423}
1424
Sujith528f0c62008-10-29 10:14:26 +05301425static void assign_aggr_tid_seqno(struct sk_buff *skb,
1426 struct ath_buf *bf)
1427{
1428 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1429 struct ieee80211_hdr *hdr;
1430 struct ath_node *an;
1431 struct ath_atx_tid *tid;
1432 __le16 fc;
1433 u8 *qc;
1434
1435 if (!tx_info->control.sta)
1436 return;
1437
1438 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1439 hdr = (struct ieee80211_hdr *)skb->data;
1440 fc = hdr->frame_control;
1441
Sujith528f0c62008-10-29 10:14:26 +05301442 if (ieee80211_is_data_qos(fc)) {
1443 qc = ieee80211_get_qos_ctl(hdr);
1444 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301445 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001446
Sujithe8324352009-01-16 21:38:42 +05301447 /*
1448 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301449 * We also override seqno set by upper layer with the one
1450 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301451 */
1452 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301453 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301454 bf->bf_seqno = tid->seq_next;
1455 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301456}
1457
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001458static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301459{
1460 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1461 int flags = 0;
1462
1463 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1464 flags |= ATH9K_TXDESC_INTREQ;
1465
1466 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1467 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301468
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001469 if (use_ldpc)
1470 flags |= ATH9K_TXDESC_LDPC;
1471
Sujith528f0c62008-10-29 10:14:26 +05301472 return flags;
1473}
1474
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001475/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001476 * rix - rate index
1477 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1478 * width - 0 for 20 MHz, 1 for 40 MHz
1479 * half_gi - to use 4us v/s 3.6 us for symbol time
1480 */
Sujith102e0572008-10-29 10:15:16 +05301481static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1482 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001483{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001484 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001485 int streams, pktlen;
1486
Sujithcd3d39a2008-08-11 14:03:34 +05301487 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301488
1489 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001490 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001491 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001492 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001493 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1494
1495 if (!half_gi)
1496 duration = SYMBOL_TIME(nsymbols);
1497 else
1498 duration = SYMBOL_TIME_HALFGI(nsymbols);
1499
Sujithe63835b2008-11-18 09:07:53 +05301500 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001501 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301502
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001503 return duration;
1504}
1505
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001506static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1507{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001508 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001509 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301510 struct sk_buff *skb;
1511 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301512 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001513 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301514 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301515 int i, flags = 0;
1516 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301517 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301518
1519 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301520
Sujitha22be222009-03-30 15:28:36 +05301521 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301522 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301523 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301524 hdr = (struct ieee80211_hdr *)skb->data;
1525 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301526
Sujithc89424d2009-01-30 14:29:28 +05301527 /*
1528 * We check if Short Preamble is needed for the CTS rate by
1529 * checking the BSS's global flag.
1530 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1531 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001532 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1533 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301534 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001535 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001536
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001537 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001538 bool is_40, is_sgi, is_sp;
1539 int phy;
1540
Sujithe63835b2008-11-18 09:07:53 +05301541 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001542 continue;
1543
Sujitha8efee42008-11-18 09:07:30 +05301544 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301545 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001546 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001547
Felix Fietkau27032052010-01-17 21:08:50 +01001548 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1549 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301550 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001551 flags |= ATH9K_TXDESC_RTSENA;
1552 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1553 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1554 flags |= ATH9K_TXDESC_CTSENA;
1555 }
1556
Sujithc89424d2009-01-30 14:29:28 +05301557 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1558 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1559 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1560 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001561
Felix Fietkau545750d2009-11-23 22:21:01 +01001562 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1563 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1564 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1565
1566 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1567 /* MCS rates */
1568 series[i].Rate = rix | 0x80;
1569 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1570 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001571 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1572 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001573 continue;
1574 }
1575
1576 /* legcay rates */
1577 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1578 !(rate->flags & IEEE80211_RATE_ERP_G))
1579 phy = WLAN_RC_PHY_CCK;
1580 else
1581 phy = WLAN_RC_PHY_OFDM;
1582
1583 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1584 series[i].Rate = rate->hw_value;
1585 if (rate->hw_value_short) {
1586 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1587 series[i].Rate |= rate->hw_value_short;
1588 } else {
1589 is_sp = false;
1590 }
1591
1592 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1593 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001594 }
1595
Felix Fietkau27032052010-01-17 21:08:50 +01001596 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1597 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1598 flags &= ~ATH9K_TXDESC_RTSENA;
1599
1600 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1601 if (flags & ATH9K_TXDESC_RTSENA)
1602 flags &= ~ATH9K_TXDESC_CTSENA;
1603
Sujithe63835b2008-11-18 09:07:53 +05301604 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301605 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1606 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301607 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301608 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301609
Sujith17d79042009-02-09 13:27:03 +05301610 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301611 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001612}
1613
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001614static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301615 struct sk_buff *skb,
1616 struct ath_tx_control *txctl)
1617{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001618 struct ath_wiphy *aphy = hw->priv;
1619 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301620 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1621 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301622 int hdrlen;
1623 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001624 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001625 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301626
Felix Fietkau827e69b2009-11-15 23:09:25 +01001627 tx_info->pad[0] = 0;
1628 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001629 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001630 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001631 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001632 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1633 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001634 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001635 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1636 break;
1637 }
Sujithe8324352009-01-16 21:38:42 +05301638 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1639 fc = hdr->frame_control;
1640
1641 ATH_TXBUF_RESET(bf);
1642
Felix Fietkau827e69b2009-11-15 23:09:25 +01001643 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001644 bf->bf_frmlen = skb->len + FCS_LEN;
1645 /* Remove the padding size from bf_frmlen, if any */
1646 padpos = ath9k_cmn_padpos(hdr->frame_control);
1647 padsize = padpos & 3;
1648 if (padsize && skb->len>padpos+padsize) {
1649 bf->bf_frmlen -= padsize;
1650 }
Sujithe8324352009-01-16 21:38:42 +05301651
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001652 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301653 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001654 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1655 use_ldpc = true;
1656 }
Sujithe8324352009-01-16 21:38:42 +05301657
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001658 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001659 if (txctl->paprd)
1660 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001661 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301662
1663 bf->bf_keytype = get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301664 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1665 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1666 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1667 } else {
1668 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1669 }
1670
Sujith17b182e2009-12-14 14:56:56 +05301671 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1672 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301673 assign_aggr_tid_seqno(skb, bf);
1674
1675 bf->bf_mpdu = skb;
1676
1677 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1678 skb->len, DMA_TO_DEVICE);
1679 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1680 bf->bf_mpdu = NULL;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001681 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1682 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301683 return -ENOMEM;
1684 }
1685
1686 bf->bf_buf_addr = bf->bf_dmacontext;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001687
1688 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1689 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1690 bf->bf_isnullfunc = true;
Sujith1b04b932010-01-08 10:36:05 +05301691 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001692 } else
1693 bf->bf_isnullfunc = false;
1694
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001695 bf->bf_tx_aborted = false;
1696
Sujithe8324352009-01-16 21:38:42 +05301697 return 0;
1698}
1699
1700/* FIXME: tx power */
1701static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1702 struct ath_tx_control *txctl)
1703{
Sujitha22be222009-03-30 15:28:36 +05301704 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301705 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301706 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301707 struct ath_node *an = NULL;
1708 struct list_head bf_head;
1709 struct ath_desc *ds;
1710 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301711 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301712 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301713 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301714
1715 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301716 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301717
1718 INIT_LIST_HEAD(&bf_head);
1719 list_add_tail(&bf->list, &bf_head);
1720
1721 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001722 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301723
1724 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1725 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1726
1727 ath9k_hw_filltxdesc(ah, ds,
1728 skb->len, /* segment length */
1729 true, /* first segment */
1730 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001731 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001732 bf->bf_buf_addr,
1733 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301734
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001735 if (bf->bf_state.bfs_paprd)
1736 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1737
Sujithe8324352009-01-16 21:38:42 +05301738 spin_lock_bh(&txctl->txq->axq_lock);
1739
1740 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1741 tx_info->control.sta) {
1742 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1743 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1744
Sujithc37452b2009-03-09 09:31:57 +05301745 if (!ieee80211_is_data_qos(fc)) {
1746 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1747 goto tx_done;
1748 }
1749
Felix Fietkau4fdec032010-03-12 04:02:43 +01001750 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301751 /*
1752 * Try aggregation if it's a unicast data frame
1753 * and the destination is HT capable.
1754 */
1755 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1756 } else {
1757 /*
1758 * Send this frame as regular when ADDBA
1759 * exchange is neither complete nor pending.
1760 */
Sujithc37452b2009-03-09 09:31:57 +05301761 ath_tx_send_ht_normal(sc, txctl->txq,
1762 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301763 }
1764 } else {
Sujithc37452b2009-03-09 09:31:57 +05301765 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301766 }
1767
Sujithc37452b2009-03-09 09:31:57 +05301768tx_done:
Sujithe8324352009-01-16 21:38:42 +05301769 spin_unlock_bh(&txctl->txq->axq_lock);
1770}
1771
1772/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001773int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301774 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001775{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001776 struct ath_wiphy *aphy = hw->priv;
1777 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001778 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001779 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001780 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001781 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001782
Sujithe8324352009-01-16 21:38:42 +05301783 bf = ath_tx_get_buffer(sc);
1784 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001785 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301786 return -1;
1787 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001788
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001789 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301790 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001791 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001792
Sujithe8324352009-01-16 21:38:42 +05301793 /* upon ath_tx_processq() this TX queue will be resumed, we
1794 * guarantee this will happen by knowing beforehand that
1795 * we will at least have to run TX completionon one buffer
1796 * on the queue */
1797 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001798 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001799 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301800 txq->stopped = 1;
1801 }
1802 spin_unlock_bh(&txq->axq_lock);
1803
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001804 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301805
1806 return r;
1807 }
1808
Felix Fietkau97923b12010-06-12 00:33:55 -04001809 q = skb_get_queue_mapping(skb);
1810 if (q >= 4)
1811 q = 0;
1812
1813 spin_lock_bh(&txq->axq_lock);
1814 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1815 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1816 txq->stopped = 1;
1817 }
1818 spin_unlock_bh(&txq->axq_lock);
1819
Sujithe8324352009-01-16 21:38:42 +05301820 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001821
1822 return 0;
1823}
1824
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001825void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001826{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001827 struct ath_wiphy *aphy = hw->priv;
1828 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001829 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001830 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1831 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301832 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1833 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001834
Sujithe8324352009-01-16 21:38:42 +05301835 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001836
Sujithe8324352009-01-16 21:38:42 +05301837 /*
1838 * As a temporary workaround, assign seq# here; this will likely need
1839 * to be cleaned up to work better with Beacon transmission and virtual
1840 * BSSes.
1841 */
1842 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301843 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1844 sc->tx.seq_no += 0x10;
1845 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1846 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001847 }
1848
Sujithe8324352009-01-16 21:38:42 +05301849 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001850 padpos = ath9k_cmn_padpos(hdr->frame_control);
1851 padsize = padpos & 3;
1852 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301853 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001854 ath_print(common, ATH_DBG_XMIT,
1855 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301856 dev_kfree_skb_any(skb);
1857 return;
1858 }
1859 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001860 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001861 }
1862
Sujithe8324352009-01-16 21:38:42 +05301863 txctl.txq = sc->beacon.cabq;
1864
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001865 ath_print(common, ATH_DBG_XMIT,
1866 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301867
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001868 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001869 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301870 goto exit;
1871 }
1872
1873 return;
1874exit:
1875 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001876}
1877
Sujithe8324352009-01-16 21:38:42 +05301878/*****************/
1879/* TX Completion */
1880/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001881
Sujithe8324352009-01-16 21:38:42 +05301882static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001883 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001884{
Sujithe8324352009-01-16 21:38:42 +05301885 struct ieee80211_hw *hw = sc->hw;
1886 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001887 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001888 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001889 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301890
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001891 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301892
Felix Fietkau827e69b2009-11-15 23:09:25 +01001893 if (aphy)
1894 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301895
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301896 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301897 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301898
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301899 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301900 /* Frame was ACKed */
1901 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1902 }
1903
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001904 padpos = ath9k_cmn_padpos(hdr->frame_control);
1905 padsize = padpos & 3;
1906 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301907 /*
1908 * Remove MAC header padding before giving the frame back to
1909 * mac80211.
1910 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001911 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301912 skb_pull(skb, padsize);
1913 }
1914
Sujith1b04b932010-01-08 10:36:05 +05301915 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1916 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001917 ath_print(common, ATH_DBG_PS,
1918 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001919 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301920 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1921 PS_WAIT_FOR_CAB |
1922 PS_WAIT_FOR_PSPOLL_DATA |
1923 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001924 }
1925
Felix Fietkau827e69b2009-11-15 23:09:25 +01001926 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001927 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001928 else {
1929 q = skb_get_queue_mapping(skb);
1930 if (q >= 4)
1931 q = 0;
1932
1933 if (--sc->tx.pending_frames[q] < 0)
1934 sc->tx.pending_frames[q] = 0;
1935
Felix Fietkau827e69b2009-11-15 23:09:25 +01001936 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001937 }
Sujithe8324352009-01-16 21:38:42 +05301938}
1939
1940static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001941 struct ath_txq *txq, struct list_head *bf_q,
1942 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301943{
1944 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301945 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301946 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301947
Sujithe8324352009-01-16 21:38:42 +05301948 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301949 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301950
1951 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301952 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301953
1954 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301955 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301956 }
1957
1958 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001959
1960 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001961 if (time_after(jiffies,
1962 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001963 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001964 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001965 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001966 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001967 } else {
1968 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1969 ath_debug_stat_tx(sc, txq, bf, ts);
1970 }
Sujithe8324352009-01-16 21:38:42 +05301971
1972 /*
1973 * Return the list of ath_buf of this mpdu to free queue
1974 */
1975 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1976 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1977 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1978}
1979
1980static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001981 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301982{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001983 u16 seq_st = 0;
1984 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301985 int ba_index;
1986 int nbad = 0;
1987 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001988
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001989 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301990 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301991
Sujithcd3d39a2008-08-11 14:03:34 +05301992 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001993 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001994 seq_st = ts->ts_seqnum;
1995 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001996 }
1997
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001998 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301999 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
2000 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
2001 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002002
Sujithe8324352009-01-16 21:38:42 +05302003 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002004 }
2005
Sujithe8324352009-01-16 21:38:42 +05302006 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002007}
2008
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002009static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302010 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302011{
Sujitha22be222009-03-30 15:28:36 +05302012 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302013 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302014 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01002015 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302016 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302017
Sujith95e4acb2009-03-13 08:56:09 +05302018 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002019 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302020
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002021 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302022 WARN_ON(tx_rateindex >= hw->max_rates);
2023
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002024 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302025 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Felix Fietkaud9698472010-03-01 13:32:11 +01002026 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2027 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302028
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002029 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302030 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05302031 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002032 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01002033 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2034 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002035 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2036 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002037 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2038 tx_info->status.ampdu_len = bf->bf_nframes;
2039 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
Sujithc4288392008-11-18 09:09:30 +05302040 }
2041 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302042
Felix Fietkau545750d2009-11-23 22:21:01 +01002043 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302044 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002045 tx_info->status.rates[i].idx = -1;
2046 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302047
Felix Fietkau78c46532010-06-25 01:26:16 +02002048 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302049}
2050
Sujith059d8062009-01-16 21:38:49 +05302051static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2052{
2053 int qnum;
2054
Felix Fietkau97923b12010-06-12 00:33:55 -04002055 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2056 if (qnum == -1)
2057 return;
2058
Sujith059d8062009-01-16 21:38:49 +05302059 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002060 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
2061 ath_mac80211_start_queue(sc, qnum);
2062 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302063 }
2064 spin_unlock_bh(&txq->axq_lock);
2065}
2066
Sujithc4288392008-11-18 09:09:30 +05302067static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002068{
Sujithcbe61d82009-02-09 13:27:12 +05302069 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002070 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002071 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2072 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302073 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002074 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302075 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076 int status;
2077
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002078 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2079 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2080 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002081
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002082 for (;;) {
2083 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002084 if (list_empty(&txq->axq_q)) {
2085 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086 spin_unlock_bh(&txq->axq_lock);
2087 break;
2088 }
2089 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2090
2091 /*
2092 * There is a race condition that a BH gets scheduled
2093 * after sw writes TxE and before hw re-load the last
2094 * descriptor to get the newly chained one.
2095 * Software must keep the last DONE descriptor as a
2096 * holding descriptor - software does so by marking
2097 * it with the STALE flag.
2098 */
2099 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302100 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002101 bf_held = bf;
2102 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302103 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002104 break;
2105 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002106 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302107 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002108 }
2109 }
2110
2111 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302112 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113
Felix Fietkau29bffa92010-03-29 20:14:23 -07002114 memset(&ts, 0, sizeof(ts));
2115 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 if (status == -EINPROGRESS) {
2117 spin_unlock_bh(&txq->axq_lock);
2118 break;
2119 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120
2121 /*
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002122 * We now know the nullfunc frame has been ACKed so we
2123 * can disable RX.
2124 */
2125 if (bf->bf_isnullfunc &&
Felix Fietkau29bffa92010-03-29 20:14:23 -07002126 (ts.ts_status & ATH9K_TX_ACKED)) {
Senthil Balasubramanian3f7c5c12010-02-03 22:51:13 +05302127 if ((sc->ps_flags & PS_ENABLED))
2128 ath9k_enable_ps(sc);
2129 else
Sujith1b04b932010-01-08 10:36:05 +05302130 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002131 }
2132
2133 /*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002134 * Remove ath_buf's of the same transmit unit from txq,
2135 * however leave the last descriptor back as the holding
2136 * descriptor for hw.
2137 */
Sujitha119cc42009-03-30 15:28:38 +05302138 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002139 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002140 if (!list_is_singular(&lastbf->list))
2141 list_cut_position(&bf_head,
2142 &txq->axq_q, lastbf->list.prev);
2143
2144 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002145 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002146 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002147 if (bf_held)
2148 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002149 spin_unlock_bh(&txq->axq_lock);
2150
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002151 if (bf_held)
2152 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002153
Sujithcd3d39a2008-08-11 14:03:34 +05302154 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155 /*
2156 * This frame is sent out as a single frame.
2157 * Use hardware retry status for this frame.
2158 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002159 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302160 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002161 ath_tx_rc_status(bf, &ts, 0, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162 }
Johannes Berge6a98542008-10-21 12:40:02 +02002163
Sujithcd3d39a2008-08-11 14:03:34 +05302164 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002165 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002167 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002168
Sujith059d8062009-01-16 21:38:49 +05302169 ath_wake_mac80211_queue(sc, txq);
2170
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302172 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002173 ath_txq_schedule(sc, txq);
2174 spin_unlock_bh(&txq->axq_lock);
2175 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002176}
2177
Sujith305fe472009-07-23 15:32:29 +05302178static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002179{
2180 struct ath_softc *sc = container_of(work, struct ath_softc,
2181 tx_complete_work.work);
2182 struct ath_txq *txq;
2183 int i;
2184 bool needreset = false;
2185
2186 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2187 if (ATH_TXQ_SETUP(sc, i)) {
2188 txq = &sc->tx.txq[i];
2189 spin_lock_bh(&txq->axq_lock);
2190 if (txq->axq_depth) {
2191 if (txq->axq_tx_inprogress) {
2192 needreset = true;
2193 spin_unlock_bh(&txq->axq_lock);
2194 break;
2195 } else {
2196 txq->axq_tx_inprogress = true;
2197 }
2198 }
2199 spin_unlock_bh(&txq->axq_lock);
2200 }
2201
2202 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002203 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2204 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302205 ath9k_ps_wakeup(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002206 ath_reset(sc, false);
Sujith332c5562009-10-09 09:51:28 +05302207 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002208 }
2209
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002210 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002211 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2212}
2213
2214
Sujithe8324352009-01-16 21:38:42 +05302215
2216void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002217{
Sujithe8324352009-01-16 21:38:42 +05302218 int i;
2219 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002220
Sujithe8324352009-01-16 21:38:42 +05302221 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002222
2223 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302224 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2225 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002226 }
2227}
2228
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002229void ath_tx_edma_tasklet(struct ath_softc *sc)
2230{
2231 struct ath_tx_status txs;
2232 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2233 struct ath_hw *ah = sc->sc_ah;
2234 struct ath_txq *txq;
2235 struct ath_buf *bf, *lastbf;
2236 struct list_head bf_head;
2237 int status;
2238 int txok;
2239
2240 for (;;) {
2241 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2242 if (status == -EINPROGRESS)
2243 break;
2244 if (status == -EIO) {
2245 ath_print(common, ATH_DBG_XMIT,
2246 "Error processing tx status\n");
2247 break;
2248 }
2249
2250 /* Skip beacon completions */
2251 if (txs.qid == sc->beacon.beaconq)
2252 continue;
2253
2254 txq = &sc->tx.txq[txs.qid];
2255
2256 spin_lock_bh(&txq->axq_lock);
2257 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2258 spin_unlock_bh(&txq->axq_lock);
2259 return;
2260 }
2261
2262 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2263 struct ath_buf, list);
2264 lastbf = bf->bf_lastbf;
2265
2266 INIT_LIST_HEAD(&bf_head);
2267 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2268 &lastbf->list);
2269 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2270 txq->axq_depth--;
2271 txq->axq_tx_inprogress = false;
2272 spin_unlock_bh(&txq->axq_lock);
2273
2274 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2275
Vasanthakumar Thiagarajande0f6482010-05-17 18:57:54 -07002276 /*
2277 * Make sure null func frame is acked before configuring
2278 * hw into ps mode.
2279 */
2280 if (bf->bf_isnullfunc && txok) {
2281 if ((sc->ps_flags & PS_ENABLED))
2282 ath9k_enable_ps(sc);
2283 else
2284 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2285 }
2286
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002287 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002288 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2289 bf->bf_state.bf_type |= BUF_XRETRY;
2290 ath_tx_rc_status(bf, &txs, 0, txok, true);
2291 }
2292
2293 if (bf_isampdu(bf))
2294 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2295 else
2296 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2297 &txs, txok, 0);
2298
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002299 ath_wake_mac80211_queue(sc, txq);
2300
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002301 spin_lock_bh(&txq->axq_lock);
2302 if (!list_empty(&txq->txq_fifo_pending)) {
2303 INIT_LIST_HEAD(&bf_head);
2304 bf = list_first_entry(&txq->txq_fifo_pending,
2305 struct ath_buf, list);
2306 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2307 &bf->bf_lastbf->list);
2308 ath_tx_txqaddbuf(sc, txq, &bf_head);
2309 } else if (sc->sc_flags & SC_OP_TXAGGR)
2310 ath_txq_schedule(sc, txq);
2311 spin_unlock_bh(&txq->axq_lock);
2312 }
2313}
2314
Sujithe8324352009-01-16 21:38:42 +05302315/*****************/
2316/* Init, Cleanup */
2317/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002318
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002319static int ath_txstatus_setup(struct ath_softc *sc, int size)
2320{
2321 struct ath_descdma *dd = &sc->txsdma;
2322 u8 txs_len = sc->sc_ah->caps.txs_len;
2323
2324 dd->dd_desc_len = size * txs_len;
2325 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2326 &dd->dd_desc_paddr, GFP_KERNEL);
2327 if (!dd->dd_desc)
2328 return -ENOMEM;
2329
2330 return 0;
2331}
2332
2333static int ath_tx_edma_init(struct ath_softc *sc)
2334{
2335 int err;
2336
2337 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2338 if (!err)
2339 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2340 sc->txsdma.dd_desc_paddr,
2341 ATH_TXSTATUS_RING_SIZE);
2342
2343 return err;
2344}
2345
2346static void ath_tx_edma_cleanup(struct ath_softc *sc)
2347{
2348 struct ath_descdma *dd = &sc->txsdma;
2349
2350 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2351 dd->dd_desc_paddr);
2352}
2353
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002354int ath_tx_init(struct ath_softc *sc, int nbufs)
2355{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002356 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002357 int error = 0;
2358
Sujith797fe5c2009-03-30 15:28:45 +05302359 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002360
Sujith797fe5c2009-03-30 15:28:45 +05302361 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002362 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302363 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002364 ath_print(common, ATH_DBG_FATAL,
2365 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302366 goto err;
2367 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002368
Sujith797fe5c2009-03-30 15:28:45 +05302369 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002370 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302371 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002372 ath_print(common, ATH_DBG_FATAL,
2373 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302374 goto err;
2375 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002377 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2378
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002379 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2380 error = ath_tx_edma_init(sc);
2381 if (error)
2382 goto err;
2383 }
2384
Sujith797fe5c2009-03-30 15:28:45 +05302385err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002386 if (error != 0)
2387 ath_tx_cleanup(sc);
2388
2389 return error;
2390}
2391
Sujith797fe5c2009-03-30 15:28:45 +05302392void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393{
Sujithb77f4832008-12-07 21:44:03 +05302394 if (sc->beacon.bdma.dd_desc_len != 0)
2395 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002396
Sujithb77f4832008-12-07 21:44:03 +05302397 if (sc->tx.txdma.dd_desc_len != 0)
2398 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002399
2400 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2401 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002402}
2403
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002404void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2405{
Sujithc5170162008-10-29 10:13:59 +05302406 struct ath_atx_tid *tid;
2407 struct ath_atx_ac *ac;
2408 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002409
Sujith8ee5afb2008-12-07 21:43:36 +05302410 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302411 tidno < WME_NUM_TID;
2412 tidno++, tid++) {
2413 tid->an = an;
2414 tid->tidno = tidno;
2415 tid->seq_start = tid->seq_next = 0;
2416 tid->baw_size = WME_MAX_BA;
2417 tid->baw_head = tid->baw_tail = 0;
2418 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302419 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302420 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302421 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302422 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302423 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302424 tid->state &= ~AGGR_ADDBA_COMPLETE;
2425 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302426 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002427
Sujith8ee5afb2008-12-07 21:43:36 +05302428 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302429 acno < WME_NUM_AC; acno++, ac++) {
2430 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002431 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302432 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002433 }
2434}
2435
Sujithb5aa9bf2008-10-29 10:13:31 +05302436void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002437{
Felix Fietkau2b409942010-07-07 19:42:08 +02002438 struct ath_atx_ac *ac;
2439 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002440 struct ath_txq *txq;
Felix Fietkau2b409942010-07-07 19:42:08 +02002441 int i, tidno;
Sujithe8324352009-01-16 21:38:42 +05302442
Felix Fietkau2b409942010-07-07 19:42:08 +02002443 for (tidno = 0, tid = &an->tid[tidno];
2444 tidno < WME_NUM_TID; tidno++, tid++) {
2445 i = tid->ac->qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002446
Felix Fietkau2b409942010-07-07 19:42:08 +02002447 if (!ATH_TXQ_SETUP(sc, i))
2448 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002449
Felix Fietkau2b409942010-07-07 19:42:08 +02002450 txq = &sc->tx.txq[i];
2451 ac = tid->ac;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002452
Felix Fietkau2b409942010-07-07 19:42:08 +02002453 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002454
Felix Fietkau2b409942010-07-07 19:42:08 +02002455 if (tid->sched) {
2456 list_del(&tid->list);
2457 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002458 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002459
2460 if (ac->sched) {
2461 list_del(&ac->list);
2462 tid->ac->sched = false;
2463 }
2464
2465 ath_tid_drain(sc, txq, tid);
2466 tid->state &= ~AGGR_ADDBA_COMPLETE;
2467 tid->state &= ~AGGR_CLEANUP;
2468
2469 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002470 }
2471}