blob: bd52ac1117954a47056ba9ad1e46ca6cad003f77 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
123static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
126
127 spin_lock_bh(&txq->axq_lock);
128 tid->paused++;
129 spin_unlock_bh(&txq->axq_lock);
130}
131
132static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
133{
134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
135
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700136 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530137 spin_lock_bh(&txq->axq_lock);
138
139 tid->paused--;
140
141 if (tid->paused > 0)
142 goto unlock;
143
144 if (list_empty(&tid->buf_q))
145 goto unlock;
146
147 ath_tx_queue_tid(txq, tid);
148 ath_txq_schedule(sc, txq);
149unlock:
150 spin_unlock_bh(&txq->axq_lock);
151}
152
153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
156 struct ath_buf *bf;
157 struct list_head bf_head;
158 INIT_LIST_HEAD(&bf_head);
159
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700160 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530161 spin_lock_bh(&txq->axq_lock);
162
163 tid->paused--;
164
165 if (tid->paused > 0) {
166 spin_unlock_bh(&txq->axq_lock);
167 return;
168 }
169
170 while (!list_empty(&tid->buf_q)) {
171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700172 BUG_ON(bf_isretried(bf));
Sujithd43f30152009-01-16 21:38:53 +0530173 list_move_tail(&bf->list, &bf_head);
Sujithc37452b2009-03-09 09:31:57 +0530174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
188 tid->tx_buf[cindex] = NULL;
189
190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 struct ath_buf *bf)
198{
199 int index, cindex;
200
201 if (bf_isretried(bf))
202 return;
203
204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
206
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700207 BUG_ON(tid->tx_buf[cindex] != NULL);
Sujithe8324352009-01-16 21:38:42 +0530208 tid->tx_buf[cindex] = bf;
209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
227 struct ath_buf *bf;
228 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530232 INIT_LIST_HEAD(&bf_head);
233
234 for (;;) {
235 if (list_empty(&tid->buf_q))
236 break;
Sujithe8324352009-01-16 21:38:42 +0530237
Sujithd43f30152009-01-16 21:38:53 +0530238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
239 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530240
241 if (bf_isretried(bf))
242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
243
244 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530246 spin_lock(&txq->axq_lock);
247 }
248
249 tid->seq_next = tid->seq_start;
250 tid->baw_tail = tid->baw_head;
251}
252
Sujithfec247c2009-07-27 12:08:16 +0530253static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
254 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530255{
256 struct sk_buff *skb;
257 struct ieee80211_hdr *hdr;
258
259 bf->bf_state.bf_type |= BUF_RETRY;
260 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530261 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530262
263 skb = bf->bf_mpdu;
264 hdr = (struct ieee80211_hdr *)skb->data;
265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
266}
267
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
269{
270 struct ath_buf *bf = NULL;
271
272 spin_lock_bh(&sc->tx.txbuflock);
273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
275 spin_unlock_bh(&sc->tx.txbuflock);
276 return NULL;
277 }
278
279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
282 spin_unlock_bh(&sc->tx.txbuflock);
283
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
Sujithd43f30152009-01-16 21:38:53 +0530294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530300 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530301
302 ATH_TXBUF_RESET(tbf);
303
Felix Fietkau827e69b2009-11-15 23:09:25 +0100304 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530305 tbf->bf_mpdu = bf->bf_mpdu;
306 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530308 tbf->bf_state = bf->bf_state;
309 tbf->bf_dmacontext = bf->bf_dmacontext;
310
311 return tbf;
312}
313
314static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
315 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700316 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530317{
318 struct ath_node *an = NULL;
319 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530320 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800321 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530322 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800323 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530324 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530326 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530328 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200331 struct ieee80211_tx_rate rates[4];
Felix Fietkau73e19462010-07-07 19:42:09 +0200332 unsigned long flags;
Sujithe8324352009-01-16 21:38:42 +0530333
Sujitha22be222009-03-30 15:28:36 +0530334 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530335 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530336
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800337 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100338 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800339
Felix Fietkau78c46532010-06-25 01:26:16 +0200340 memcpy(rates, tx_info->control.rates, sizeof(rates));
341
Sujith1286ec62009-01-27 13:30:37 +0530342 rcu_read_lock();
343
Johannes Berg5ed176e2009-11-04 14:42:28 +0100344 /* XXX: use ieee80211_find_sta! */
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800345 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
Sujith1286ec62009-01-27 13:30:37 +0530346 if (!sta) {
347 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200348
349 spin_lock_irqsave(&sc->tx.txbuflock, flags);
350 list_splice_tail_init(bf_q, &sc->tx.txbuf);
351 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
Sujith1286ec62009-01-27 13:30:37 +0530352 return;
Sujithe8324352009-01-16 21:38:42 +0530353 }
354
Sujith1286ec62009-01-27 13:30:37 +0530355 an = (struct ath_node *)sta->drv_priv;
356 tid = ATH_AN_2_TID(an, bf->bf_tidno);
357
Felix Fietkaub11b1602010-07-11 12:48:44 +0200358 /*
359 * The hardware occasionally sends a tx status for the wrong TID.
360 * In this case, the BA status cannot be considered valid and all
361 * subframes need to be retransmitted
362 */
363 if (bf->bf_tidno != ts->tid)
364 txok = false;
365
Sujithe8324352009-01-16 21:38:42 +0530366 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530367 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530368
Sujithd43f30152009-01-16 21:38:53 +0530369 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700370 if (ts->ts_flags & ATH9K_TX_BA) {
371 seq_st = ts->ts_seqnum;
372 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530373 } else {
Sujithd43f30152009-01-16 21:38:53 +0530374 /*
375 * AR5416 can become deaf/mute when BA
376 * issue happens. Chip needs to be reset.
377 * But AP code may have sychronization issues
378 * when perform internal reset in this routine.
379 * Only enable reset in STA mode for now.
380 */
Sujith2660b812009-02-09 13:27:26 +0530381 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530382 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530383 }
384 }
385
386 INIT_LIST_HEAD(&bf_pending);
387 INIT_LIST_HEAD(&bf_head);
388
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700389 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530390 while (bf) {
391 txfail = txpending = 0;
392 bf_next = bf->bf_next;
393
Felix Fietkau78c46532010-06-25 01:26:16 +0200394 skb = bf->bf_mpdu;
395 tx_info = IEEE80211_SKB_CB(skb);
396
Sujithe8324352009-01-16 21:38:42 +0530397 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
398 /* transmit completion, subframe is
399 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530400 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530401 } else if (!isaggr && txok) {
402 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530403 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530404 } else {
Sujithe8324352009-01-16 21:38:42 +0530405 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400406 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530407 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530408 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530409 txpending = 1;
410 } else {
411 bf->bf_state.bf_type |= BUF_XRETRY;
412 txfail = 1;
413 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530414 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530415 }
416 } else {
417 /*
418 * cleanup in progress, just fail
419 * the un-acked sub-frames
420 */
421 txfail = 1;
422 }
423 }
424
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400425 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
426 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530427 /*
428 * Make sure the last desc is reclaimed if it
429 * not a holding desc.
430 */
431 if (!bf_last->bf_stale)
432 list_move_tail(&bf->list, &bf_head);
433 else
434 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530435 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700436 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530437 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530438 }
439
440 if (!txpending) {
441 /*
442 * complete the acked-ones/xretried ones; update
443 * block-ack window
444 */
445 spin_lock_bh(&txq->axq_lock);
446 ath_tx_update_baw(sc, tid, bf->bf_seqno);
447 spin_unlock_bh(&txq->axq_lock);
448
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530449 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200450 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700451 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530452 rc_update = false;
453 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700454 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530455 }
456
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
458 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530459 } else {
Sujithd43f30152009-01-16 21:38:53 +0530460 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400461 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
462 if (bf->bf_next == NULL && bf_last->bf_stale) {
463 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530464
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400465 tbf = ath_clone_txbuf(sc, bf_last);
466 /*
467 * Update tx baw and complete the
468 * frame with failed status if we
469 * run out of tx buf.
470 */
471 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid,
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400476
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400477 bf->bf_state.bf_type |=
478 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad,
480 0, false);
481 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head,
483 ts, 0, 0);
484 break;
485 }
486
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 tbf->bf_desc);
489 list_add_tail(&tbf->list, &bf_head);
490 } else {
491 /*
492 * Clear descriptor status words for
493 * software retry
494 */
495 ath9k_hw_cleartxdesc(sc->sc_ah,
496 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400497 }
Sujithe8324352009-01-16 21:38:42 +0530498 }
499
500 /*
501 * Put this buffer to the temporary pending
502 * queue to retain ordering
503 */
504 list_splice_tail_init(&bf_head, &bf_pending);
505 }
506
507 bf = bf_next;
508 }
509
510 if (tid->state & AGGR_CLEANUP) {
Sujithe8324352009-01-16 21:38:42 +0530511 if (tid->baw_head == tid->baw_tail) {
512 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530513 tid->state &= ~AGGR_CLEANUP;
514
515 /* send buffered frames as singles */
516 ath_tx_flush_tid(sc, tid);
Sujithd43f30152009-01-16 21:38:53 +0530517 }
Sujith1286ec62009-01-27 13:30:37 +0530518 rcu_read_unlock();
Sujithe8324352009-01-16 21:38:42 +0530519 return;
520 }
521
Sujithd43f30152009-01-16 21:38:53 +0530522 /* prepend un-acked frames to the beginning of the pending frame queue */
Sujithe8324352009-01-16 21:38:42 +0530523 if (!list_empty(&bf_pending)) {
524 spin_lock_bh(&txq->axq_lock);
525 list_splice(&bf_pending, &tid->buf_q);
526 ath_tx_queue_tid(txq, tid);
527 spin_unlock_bh(&txq->axq_lock);
528 }
529
Sujith1286ec62009-01-27 13:30:37 +0530530 rcu_read_unlock();
531
Sujithe8324352009-01-16 21:38:42 +0530532 if (needreset)
533 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530534}
535
536static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
537 struct ath_atx_tid *tid)
538{
Sujithe8324352009-01-16 21:38:42 +0530539 struct sk_buff *skb;
540 struct ieee80211_tx_info *tx_info;
541 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530542 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530543 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530544 int i;
545
Sujitha22be222009-03-30 15:28:36 +0530546 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530547 tx_info = IEEE80211_SKB_CB(skb);
548 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530549
550 /*
551 * Find the lowest frame length among the rate series that will have a
552 * 4ms transmit duration.
553 * TODO - TXOP limit needs to be considered.
554 */
555 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
556
557 for (i = 0; i < 4; i++) {
558 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100559 int modeidx;
560 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530561 legacy = 1;
562 break;
563 }
564
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200565 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100566 modeidx = MCS_HT40;
567 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200568 modeidx = MCS_HT20;
569
570 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
571 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100572
573 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530574 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530575 }
576 }
577
578 /*
579 * limit aggregate size by the minimum rate if rate selected is
580 * not a probe rate, if rate selected is a probe rate then
581 * avoid aggregation of this packet.
582 */
583 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
584 return 0;
585
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530586 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
587 aggr_limit = min((max_4ms_framelen * 3) / 8,
588 (u32)ATH_AMPDU_LIMIT_MAX);
589 else
590 aggr_limit = min(max_4ms_framelen,
591 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530592
593 /*
594 * h/w can accept aggregates upto 16 bit lengths (65535).
595 * The IE, however can hold upto 65536, which shows up here
596 * as zero. Ignore 65536 since we are constrained by hw.
597 */
Sujith4ef70842009-07-23 15:32:41 +0530598 if (tid->an->maxampdu)
599 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530600
601 return aggr_limit;
602}
603
604/*
Sujithd43f30152009-01-16 21:38:53 +0530605 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530606 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530607 */
608static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
609 struct ath_buf *bf, u16 frmlen)
610{
Sujithe8324352009-01-16 21:38:42 +0530611 struct sk_buff *skb = bf->bf_mpdu;
612 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530613 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530614 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100615 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200616 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530617
618 /* Select standard number of delimiters based on frame length alone */
619 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
620
621 /*
622 * If encryption enabled, hardware requires some more padding between
623 * subframes.
624 * TODO - this could be improved to be dependent on the rate.
625 * The hardware can keep up at lower rates, but not higher rates
626 */
627 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
628 ndelim += ATH_AGGR_ENCRYPTDELIM;
629
630 /*
631 * Convert desired mpdu density from microeconds to bytes based
632 * on highest rate in rate series (i.e. first rate) to determine
633 * required minimum length for subframe. Take into account
634 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530635 *
Sujithe8324352009-01-16 21:38:42 +0530636 * If there is no mpdu density restriction, no further calculation
637 * is needed.
638 */
Sujith4ef70842009-07-23 15:32:41 +0530639
640 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530641 return ndelim;
642
643 rix = tx_info->control.rates[0].idx;
644 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530645 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
646 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
647
648 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530649 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530650 else
Sujith4ef70842009-07-23 15:32:41 +0530651 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530652
653 if (nsymbols == 0)
654 nsymbols = 1;
655
Felix Fietkauc6663872010-04-19 19:57:33 +0200656 streams = HT_RC_2_STREAMS(rix);
657 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530658 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
659
Sujithe8324352009-01-16 21:38:42 +0530660 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530661 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
662 ndelim = max(mindelim, ndelim);
663 }
664
665 return ndelim;
666}
667
668static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530669 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530670 struct ath_atx_tid *tid,
671 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530672{
673#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530674 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
675 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530676 u16 aggr_limit = 0, al = 0, bpad = 0,
677 al_delta, h_baw = tid->baw_size / 2;
678 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujithe8324352009-01-16 21:38:42 +0530679
680 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
681
682 do {
683 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
684
Sujithd43f30152009-01-16 21:38:53 +0530685 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530686 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
687 status = ATH_AGGR_BAW_CLOSED;
688 break;
689 }
690
691 if (!rl) {
692 aggr_limit = ath_lookup_rate(sc, bf, tid);
693 rl = 1;
694 }
695
Sujithd43f30152009-01-16 21:38:53 +0530696 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530697 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
698
Sujithd43f30152009-01-16 21:38:53 +0530699 if (nframes &&
700 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530701 status = ATH_AGGR_LIMITED;
702 break;
703 }
704
Sujithd43f30152009-01-16 21:38:53 +0530705 /* do not exceed subframe limit */
706 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530707 status = ATH_AGGR_LIMITED;
708 break;
709 }
Sujithd43f30152009-01-16 21:38:53 +0530710 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530711
Sujithd43f30152009-01-16 21:38:53 +0530712 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530713 al += bpad + al_delta;
714
715 /*
716 * Get the delimiters needed to meet the MPDU
717 * density for this node.
718 */
719 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530720 bpad = PADBYTES(al_delta) + (ndelim << 2);
721
722 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400723 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530724
Sujithd43f30152009-01-16 21:38:53 +0530725 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530726 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530727 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
728 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530729 if (bf_prev) {
730 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400731 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
732 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530733 }
734 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530735
Sujithe8324352009-01-16 21:38:42 +0530736 } while (!list_empty(&tid->buf_q));
737
738 bf_first->bf_al = al;
739 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530740
Sujithe8324352009-01-16 21:38:42 +0530741 return status;
742#undef PADBYTES
743}
744
745static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
746 struct ath_atx_tid *tid)
747{
Sujithd43f30152009-01-16 21:38:53 +0530748 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530749 enum ATH_AGGR_STATUS status;
750 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530751
752 do {
753 if (list_empty(&tid->buf_q))
754 return;
755
756 INIT_LIST_HEAD(&bf_q);
757
Sujithfec247c2009-07-27 12:08:16 +0530758 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530759
760 /*
Sujithd43f30152009-01-16 21:38:53 +0530761 * no frames picked up to be aggregated;
762 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530763 */
764 if (list_empty(&bf_q))
765 break;
766
767 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530768 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530769
Sujithd43f30152009-01-16 21:38:53 +0530770 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530771 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530772 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530773 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530774 ath_buf_set_rate(sc, bf);
775 ath_tx_txqaddbuf(sc, txq, &bf_q);
776 continue;
777 }
778
Sujithd43f30152009-01-16 21:38:53 +0530779 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530780 bf->bf_state.bf_type |= BUF_AGGR;
781 ath_buf_set_rate(sc, bf);
782 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
783
Sujithd43f30152009-01-16 21:38:53 +0530784 /* anchor last desc of aggregate */
785 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530786
Sujithe8324352009-01-16 21:38:42 +0530787 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530788 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530789
790 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
791 status != ATH_AGGR_BAW_CLOSED);
792}
793
Sujithf83da962009-07-23 15:32:37 +0530794void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
795 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530796{
797 struct ath_atx_tid *txtid;
798 struct ath_node *an;
799
800 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530801 txtid = ATH_AN_2_TID(an, tid);
802 txtid->state |= AGGR_ADDBA_PROGRESS;
803 ath_tx_pause_tid(sc, txtid);
804 *ssn = txtid->seq_start;
Sujithe8324352009-01-16 21:38:42 +0530805}
806
Sujithf83da962009-07-23 15:32:37 +0530807void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530808{
809 struct ath_node *an = (struct ath_node *)sta->drv_priv;
810 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
811 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700812 struct ath_tx_status ts;
Sujithe8324352009-01-16 21:38:42 +0530813 struct ath_buf *bf;
814 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700815
816 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530817 INIT_LIST_HEAD(&bf_head);
818
819 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530820 return;
Sujithe8324352009-01-16 21:38:42 +0530821
822 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530823 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530824 return;
Sujithe8324352009-01-16 21:38:42 +0530825 }
826
827 ath_tx_pause_tid(sc, txtid);
828
829 /* drop all software retried frames and mark this TID */
830 spin_lock_bh(&txq->axq_lock);
831 while (!list_empty(&txtid->buf_q)) {
832 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
833 if (!bf_isretried(bf)) {
834 /*
835 * NB: it's based on the assumption that
836 * software retried frame will always stay
837 * at the head of software queue.
838 */
839 break;
840 }
Sujithd43f30152009-01-16 21:38:53 +0530841 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530842 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700843 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530844 }
Sujithd43f30152009-01-16 21:38:53 +0530845 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530846
847 if (txtid->baw_head != txtid->baw_tail) {
Sujithe8324352009-01-16 21:38:42 +0530848 txtid->state |= AGGR_CLEANUP;
849 } else {
850 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530851 ath_tx_flush_tid(sc, txtid);
852 }
Sujithe8324352009-01-16 21:38:42 +0530853}
854
855void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
856{
857 struct ath_atx_tid *txtid;
858 struct ath_node *an;
859
860 an = (struct ath_node *)sta->drv_priv;
861
862 if (sc->sc_flags & SC_OP_TXAGGR) {
863 txtid = ATH_AN_2_TID(an, tid);
864 txtid->baw_size =
865 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
866 txtid->state |= AGGR_ADDBA_COMPLETE;
867 txtid->state &= ~AGGR_ADDBA_PROGRESS;
868 ath_tx_resume_tid(sc, txtid);
869 }
870}
871
872bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
873{
874 struct ath_atx_tid *txtid;
875
876 if (!(sc->sc_flags & SC_OP_TXAGGR))
877 return false;
878
879 txtid = ATH_AN_2_TID(an, tidno);
880
Vasanthakumar Thiagarajanc3d8f022009-06-10 17:50:08 +0530881 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
Sujithe8324352009-01-16 21:38:42 +0530882 return true;
Sujithe8324352009-01-16 21:38:42 +0530883 return false;
884}
885
886/********************/
887/* Queue Management */
888/********************/
889
Sujithe8324352009-01-16 21:38:42 +0530890static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
891 struct ath_txq *txq)
892{
893 struct ath_atx_ac *ac, *ac_tmp;
894 struct ath_atx_tid *tid, *tid_tmp;
895
896 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
897 list_del(&ac->list);
898 ac->sched = false;
899 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
900 list_del(&tid->list);
901 tid->sched = false;
902 ath_tid_drain(sc, txq, tid);
903 }
904 }
905}
906
907struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
908{
Sujithcbe61d82009-02-09 13:27:12 +0530909 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700910 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530911 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400912 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530913
914 memset(&qi, 0, sizeof(qi));
915 qi.tqi_subtype = subtype;
916 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
917 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
918 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
919 qi.tqi_physCompBuf = 0;
920
921 /*
922 * Enable interrupts only for EOL and DESC conditions.
923 * We mark tx descriptors to receive a DESC interrupt
924 * when a tx queue gets deep; otherwise waiting for the
925 * EOL to reap descriptors. Note that this is done to
926 * reduce interrupt load and this only defers reaping
927 * descriptors, never transmitting frames. Aside from
928 * reducing interrupts this also permits more concurrency.
929 * The only potential downside is if the tx queue backs
930 * up in which case the top half of the kernel may backup
931 * due to a lack of tx descriptors.
932 *
933 * The UAPSD queue is an exception, since we take a desc-
934 * based intr on the EOSP frames.
935 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400936 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
937 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
938 TXQ_FLAG_TXERRINT_ENABLE;
939 } else {
940 if (qtype == ATH9K_TX_QUEUE_UAPSD)
941 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
942 else
943 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
944 TXQ_FLAG_TXDESCINT_ENABLE;
945 }
Sujithe8324352009-01-16 21:38:42 +0530946 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
947 if (qnum == -1) {
948 /*
949 * NB: don't print a message, this happens
950 * normally on parts with too few tx queues
951 */
952 return NULL;
953 }
954 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700955 ath_print(common, ATH_DBG_FATAL,
956 "qnum %u out of range, max %u!\n",
957 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530958 ath9k_hw_releasetxqueue(ah, qnum);
959 return NULL;
960 }
961 if (!ATH_TXQ_SETUP(sc, qnum)) {
962 struct ath_txq *txq = &sc->tx.txq[qnum];
963
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400964 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530965 txq->axq_qnum = qnum;
966 txq->axq_link = NULL;
967 INIT_LIST_HEAD(&txq->axq_q);
968 INIT_LIST_HEAD(&txq->axq_acq);
969 spin_lock_init(&txq->axq_lock);
970 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400971 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530972 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400973
974 txq->txq_headidx = txq->txq_tailidx = 0;
975 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
976 INIT_LIST_HEAD(&txq->txq_fifo[i]);
977 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530978 }
979 return &sc->tx.txq[qnum];
980}
981
Sujithe8324352009-01-16 21:38:42 +0530982int ath_txq_update(struct ath_softc *sc, int qnum,
983 struct ath9k_tx_queue_info *qinfo)
984{
Sujithcbe61d82009-02-09 13:27:12 +0530985 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530986 int error = 0;
987 struct ath9k_tx_queue_info qi;
988
989 if (qnum == sc->beacon.beaconq) {
990 /*
991 * XXX: for beacon queue, we just save the parameter.
992 * It will be picked up by ath_beaconq_config when
993 * it's necessary.
994 */
995 sc->beacon.beacon_qi = *qinfo;
996 return 0;
997 }
998
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700999 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301000
1001 ath9k_hw_get_txq_props(ah, qnum, &qi);
1002 qi.tqi_aifs = qinfo->tqi_aifs;
1003 qi.tqi_cwmin = qinfo->tqi_cwmin;
1004 qi.tqi_cwmax = qinfo->tqi_cwmax;
1005 qi.tqi_burstTime = qinfo->tqi_burstTime;
1006 qi.tqi_readyTime = qinfo->tqi_readyTime;
1007
1008 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001009 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1010 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301011 error = -EIO;
1012 } else {
1013 ath9k_hw_resettxqueue(ah, qnum);
1014 }
1015
1016 return error;
1017}
1018
1019int ath_cabq_update(struct ath_softc *sc)
1020{
1021 struct ath9k_tx_queue_info qi;
1022 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301023
1024 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1025 /*
1026 * Ensure the readytime % is within the bounds.
1027 */
Sujith17d79042009-02-09 13:27:03 +05301028 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1029 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1030 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1031 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301032
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001033 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301034 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301035 ath_txq_update(sc, qnum, &qi);
1036
1037 return 0;
1038}
1039
Sujith043a0402009-01-16 21:38:47 +05301040/*
1041 * Drain a given TX queue (could be Beacon or Data)
1042 *
1043 * This assumes output has been stopped and
1044 * we do not need to block ath_tx_tasklet.
1045 */
1046void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301047{
1048 struct ath_buf *bf, *lastbf;
1049 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001050 struct ath_tx_status ts;
1051
1052 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301053 INIT_LIST_HEAD(&bf_head);
1054
Sujithe8324352009-01-16 21:38:42 +05301055 for (;;) {
1056 spin_lock_bh(&txq->axq_lock);
1057
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001058 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1059 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1060 txq->txq_headidx = txq->txq_tailidx = 0;
1061 spin_unlock_bh(&txq->axq_lock);
1062 break;
1063 } else {
1064 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1065 struct ath_buf, list);
1066 }
1067 } else {
1068 if (list_empty(&txq->axq_q)) {
1069 txq->axq_link = NULL;
1070 spin_unlock_bh(&txq->axq_lock);
1071 break;
1072 }
1073 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1074 list);
Sujithe8324352009-01-16 21:38:42 +05301075
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001076 if (bf->bf_stale) {
1077 list_del(&bf->list);
1078 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301079
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001080 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001081 continue;
1082 }
Sujithe8324352009-01-16 21:38:42 +05301083 }
1084
1085 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001086 if (!retry_tx)
1087 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301088
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001089 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1090 list_cut_position(&bf_head,
1091 &txq->txq_fifo[txq->txq_tailidx],
1092 &lastbf->list);
1093 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1094 } else {
1095 /* remove ath_buf's of the same mpdu from txq */
1096 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1097 }
1098
Sujithe8324352009-01-16 21:38:42 +05301099 txq->axq_depth--;
1100
1101 spin_unlock_bh(&txq->axq_lock);
1102
1103 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001104 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301105 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001106 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301107 }
1108
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001109 spin_lock_bh(&txq->axq_lock);
1110 txq->axq_tx_inprogress = false;
1111 spin_unlock_bh(&txq->axq_lock);
1112
Sujithe8324352009-01-16 21:38:42 +05301113 /* flush any pending frames if aggregation is enabled */
1114 if (sc->sc_flags & SC_OP_TXAGGR) {
1115 if (!retry_tx) {
1116 spin_lock_bh(&txq->axq_lock);
1117 ath_txq_drain_pending_buffers(sc, txq);
1118 spin_unlock_bh(&txq->axq_lock);
1119 }
1120 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001121
1122 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1123 spin_lock_bh(&txq->axq_lock);
1124 while (!list_empty(&txq->txq_fifo_pending)) {
1125 bf = list_first_entry(&txq->txq_fifo_pending,
1126 struct ath_buf, list);
1127 list_cut_position(&bf_head,
1128 &txq->txq_fifo_pending,
1129 &bf->bf_lastbf->list);
1130 spin_unlock_bh(&txq->axq_lock);
1131
1132 if (bf_isampdu(bf))
1133 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1134 &ts, 0);
1135 else
1136 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1137 &ts, 0, 0);
1138 spin_lock_bh(&txq->axq_lock);
1139 }
1140 spin_unlock_bh(&txq->axq_lock);
1141 }
Sujithe8324352009-01-16 21:38:42 +05301142}
1143
Sujith043a0402009-01-16 21:38:47 +05301144void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1145{
Sujithcbe61d82009-02-09 13:27:12 +05301146 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001147 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301148 struct ath_txq *txq;
1149 int i, npend = 0;
1150
1151 if (sc->sc_flags & SC_OP_INVALID)
1152 return;
1153
1154 /* Stop beacon queue */
1155 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1156
1157 /* Stop data queues */
1158 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1159 if (ATH_TXQ_SETUP(sc, i)) {
1160 txq = &sc->tx.txq[i];
1161 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1162 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1163 }
1164 }
1165
1166 if (npend) {
1167 int r;
1168
Sujithe8009e92009-12-14 14:57:08 +05301169 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001170 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301171
1172 spin_lock_bh(&sc->sc_resetlock);
Sujithe8009e92009-12-14 14:57:08 +05301173 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
Sujith043a0402009-01-16 21:38:47 +05301174 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001175 ath_print(common, ATH_DBG_FATAL,
1176 "Unable to reset hardware; reset status %d\n",
1177 r);
Sujith043a0402009-01-16 21:38:47 +05301178 spin_unlock_bh(&sc->sc_resetlock);
1179 }
1180
1181 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1182 if (ATH_TXQ_SETUP(sc, i))
1183 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1184 }
1185}
1186
Sujithe8324352009-01-16 21:38:42 +05301187void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1188{
1189 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1190 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1191}
1192
Sujithe8324352009-01-16 21:38:42 +05301193void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1194{
1195 struct ath_atx_ac *ac;
1196 struct ath_atx_tid *tid;
1197
1198 if (list_empty(&txq->axq_acq))
1199 return;
1200
1201 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1202 list_del(&ac->list);
1203 ac->sched = false;
1204
1205 do {
1206 if (list_empty(&ac->tid_q))
1207 return;
1208
1209 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1210 list_del(&tid->list);
1211 tid->sched = false;
1212
1213 if (tid->paused)
1214 continue;
1215
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001216 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301217
1218 /*
1219 * add tid to round-robin queue if more frames
1220 * are pending for the tid
1221 */
1222 if (!list_empty(&tid->buf_q))
1223 ath_tx_queue_tid(txq, tid);
1224
1225 break;
1226 } while (!list_empty(&ac->tid_q));
1227
1228 if (!list_empty(&ac->tid_q)) {
1229 if (!ac->sched) {
1230 ac->sched = true;
1231 list_add_tail(&ac->list, &txq->axq_acq);
1232 }
1233 }
1234}
1235
1236int ath_tx_setup(struct ath_softc *sc, int haltype)
1237{
1238 struct ath_txq *txq;
1239
1240 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001241 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1242 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301243 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1244 return 0;
1245 }
1246 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1247 if (txq != NULL) {
1248 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1249 return 1;
1250 } else
1251 return 0;
1252}
1253
1254/***********/
1255/* TX, DMA */
1256/***********/
1257
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001258/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001259 * Insert a chain of ath_buf (descriptors) on a txq and
1260 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001261 */
Sujith102e0572008-10-29 10:15:16 +05301262static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1263 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001264{
Sujithcbe61d82009-02-09 13:27:12 +05301265 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001266 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001267 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301268
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001269 /*
1270 * Insert the frame on the outbound list and
1271 * pass it on to the hardware.
1272 */
1273
1274 if (list_empty(head))
1275 return;
1276
1277 bf = list_first_entry(head, struct ath_buf, list);
1278
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001279 ath_print(common, ATH_DBG_QUEUE,
1280 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001281
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001282 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1283 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1284 list_splice_tail_init(head, &txq->txq_fifo_pending);
1285 return;
1286 }
1287 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1288 ath_print(common, ATH_DBG_XMIT,
1289 "Initializing tx fifo %d which "
1290 "is non-empty\n",
1291 txq->txq_headidx);
1292 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1293 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1294 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001295 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001296 ath_print(common, ATH_DBG_XMIT,
1297 "TXDP[%u] = %llx (%p)\n",
1298 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001299 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001300 list_splice_tail_init(head, &txq->axq_q);
1301
1302 if (txq->axq_link == NULL) {
1303 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1304 ath_print(common, ATH_DBG_XMIT,
1305 "TXDP[%u] = %llx (%p)\n",
1306 txq->axq_qnum, ito64(bf->bf_daddr),
1307 bf->bf_desc);
1308 } else {
1309 *txq->axq_link = bf->bf_daddr;
1310 ath_print(common, ATH_DBG_XMIT,
1311 "link[%u] (%p)=%llx (%p)\n",
1312 txq->axq_qnum, txq->axq_link,
1313 ito64(bf->bf_daddr), bf->bf_desc);
1314 }
1315 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1316 &txq->axq_link);
1317 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001318 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001319 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001320}
1321
Sujithe8324352009-01-16 21:38:42 +05301322static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1323 struct list_head *bf_head,
1324 struct ath_tx_control *txctl)
1325{
1326 struct ath_buf *bf;
1327
Sujithe8324352009-01-16 21:38:42 +05301328 bf = list_first_entry(bf_head, struct ath_buf, list);
1329 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301330 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301331
1332 /*
1333 * Do not queue to h/w when any of the following conditions is true:
1334 * - there are pending frames in software queue
1335 * - the TID is currently paused for ADDBA/BAR request
1336 * - seqno is not within block-ack window
1337 * - h/w queue depth exceeds low water mark
1338 */
1339 if (!list_empty(&tid->buf_q) || tid->paused ||
1340 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1341 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001342 /*
Sujithe8324352009-01-16 21:38:42 +05301343 * Add this frame to software queue for scheduling later
1344 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001345 */
Sujithd43f30152009-01-16 21:38:53 +05301346 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301347 ath_tx_queue_tid(txctl->txq, tid);
1348 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001349 }
1350
Sujithe8324352009-01-16 21:38:42 +05301351 /* Add sub-frame to BAW */
1352 ath_tx_addto_baw(sc, tid, bf);
1353
1354 /* Queue to h/w without aggregation */
1355 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301356 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301357 ath_buf_set_rate(sc, bf);
1358 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301359}
1360
Sujithc37452b2009-03-09 09:31:57 +05301361static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1362 struct ath_atx_tid *tid,
1363 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001364{
Sujithe8324352009-01-16 21:38:42 +05301365 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001366
Sujithe8324352009-01-16 21:38:42 +05301367 bf = list_first_entry(bf_head, struct ath_buf, list);
1368 bf->bf_state.bf_type &= ~BUF_AMPDU;
1369
1370 /* update starting sequence number for subsequent ADDBA request */
1371 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1372
1373 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301374 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301375 ath_buf_set_rate(sc, bf);
1376 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301377 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001378}
1379
Sujithc37452b2009-03-09 09:31:57 +05301380static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1381 struct list_head *bf_head)
1382{
1383 struct ath_buf *bf;
1384
1385 bf = list_first_entry(bf_head, struct ath_buf, list);
1386
1387 bf->bf_lastbf = bf;
1388 bf->bf_nframes = 1;
1389 ath_buf_set_rate(sc, bf);
1390 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301391 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301392}
1393
Sujith528f0c62008-10-29 10:14:26 +05301394static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001395{
Sujith528f0c62008-10-29 10:14:26 +05301396 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001397 enum ath9k_pkt_type htype;
1398 __le16 fc;
1399
Sujith528f0c62008-10-29 10:14:26 +05301400 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001401 fc = hdr->frame_control;
1402
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001403 if (ieee80211_is_beacon(fc))
1404 htype = ATH9K_PKT_TYPE_BEACON;
1405 else if (ieee80211_is_probe_resp(fc))
1406 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1407 else if (ieee80211_is_atim(fc))
1408 htype = ATH9K_PKT_TYPE_ATIM;
1409 else if (ieee80211_is_pspoll(fc))
1410 htype = ATH9K_PKT_TYPE_PSPOLL;
1411 else
1412 htype = ATH9K_PKT_TYPE_NORMAL;
1413
1414 return htype;
1415}
1416
Sujith528f0c62008-10-29 10:14:26 +05301417static int get_hw_crypto_keytype(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001418{
Sujith528f0c62008-10-29 10:14:26 +05301419 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1420
1421 if (tx_info->control.hw_key) {
1422 if (tx_info->control.hw_key->alg == ALG_WEP)
1423 return ATH9K_KEY_TYPE_WEP;
1424 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1425 return ATH9K_KEY_TYPE_TKIP;
1426 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1427 return ATH9K_KEY_TYPE_AES;
1428 }
1429
1430 return ATH9K_KEY_TYPE_CLEAR;
1431}
1432
Sujith528f0c62008-10-29 10:14:26 +05301433static void assign_aggr_tid_seqno(struct sk_buff *skb,
1434 struct ath_buf *bf)
1435{
1436 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1437 struct ieee80211_hdr *hdr;
1438 struct ath_node *an;
1439 struct ath_atx_tid *tid;
1440 __le16 fc;
1441 u8 *qc;
1442
1443 if (!tx_info->control.sta)
1444 return;
1445
1446 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1447 hdr = (struct ieee80211_hdr *)skb->data;
1448 fc = hdr->frame_control;
1449
Sujith528f0c62008-10-29 10:14:26 +05301450 if (ieee80211_is_data_qos(fc)) {
1451 qc = ieee80211_get_qos_ctl(hdr);
1452 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301453 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001454
Sujithe8324352009-01-16 21:38:42 +05301455 /*
1456 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301457 * We also override seqno set by upper layer with the one
1458 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301459 */
1460 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301461 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301462 bf->bf_seqno = tid->seq_next;
1463 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301464}
1465
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001466static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301467{
1468 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1469 int flags = 0;
1470
1471 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1472 flags |= ATH9K_TXDESC_INTREQ;
1473
1474 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1475 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301476
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001477 if (use_ldpc)
1478 flags |= ATH9K_TXDESC_LDPC;
1479
Sujith528f0c62008-10-29 10:14:26 +05301480 return flags;
1481}
1482
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001483/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001484 * rix - rate index
1485 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1486 * width - 0 for 20 MHz, 1 for 40 MHz
1487 * half_gi - to use 4us v/s 3.6 us for symbol time
1488 */
Sujith102e0572008-10-29 10:15:16 +05301489static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1490 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001491{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001492 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001493 int streams, pktlen;
1494
Sujithcd3d39a2008-08-11 14:03:34 +05301495 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301496
1497 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001498 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001499 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001500 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001501 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1502
1503 if (!half_gi)
1504 duration = SYMBOL_TIME(nsymbols);
1505 else
1506 duration = SYMBOL_TIME_HALFGI(nsymbols);
1507
Sujithe63835b2008-11-18 09:07:53 +05301508 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001509 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301510
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001511 return duration;
1512}
1513
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001514static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1515{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001516 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001517 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301518 struct sk_buff *skb;
1519 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301520 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001521 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301522 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301523 int i, flags = 0;
1524 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301525 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301526
1527 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301528
Sujitha22be222009-03-30 15:28:36 +05301529 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301530 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301531 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301532 hdr = (struct ieee80211_hdr *)skb->data;
1533 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301534
Sujithc89424d2009-01-30 14:29:28 +05301535 /*
1536 * We check if Short Preamble is needed for the CTS rate by
1537 * checking the BSS's global flag.
1538 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1539 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001540 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1541 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301542 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001543 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001544
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001545 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001546 bool is_40, is_sgi, is_sp;
1547 int phy;
1548
Sujithe63835b2008-11-18 09:07:53 +05301549 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001550 continue;
1551
Sujitha8efee42008-11-18 09:07:30 +05301552 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301553 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001554 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001555
Felix Fietkau27032052010-01-17 21:08:50 +01001556 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1557 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301558 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001559 flags |= ATH9K_TXDESC_RTSENA;
1560 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1561 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1562 flags |= ATH9K_TXDESC_CTSENA;
1563 }
1564
Sujithc89424d2009-01-30 14:29:28 +05301565 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1566 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1567 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1568 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001569
Felix Fietkau545750d2009-11-23 22:21:01 +01001570 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1571 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1572 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1573
1574 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1575 /* MCS rates */
1576 series[i].Rate = rix | 0x80;
1577 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1578 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001579 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1580 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001581 continue;
1582 }
1583
1584 /* legcay rates */
1585 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1586 !(rate->flags & IEEE80211_RATE_ERP_G))
1587 phy = WLAN_RC_PHY_CCK;
1588 else
1589 phy = WLAN_RC_PHY_OFDM;
1590
1591 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1592 series[i].Rate = rate->hw_value;
1593 if (rate->hw_value_short) {
1594 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1595 series[i].Rate |= rate->hw_value_short;
1596 } else {
1597 is_sp = false;
1598 }
1599
1600 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1601 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001602 }
1603
Felix Fietkau27032052010-01-17 21:08:50 +01001604 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1605 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1606 flags &= ~ATH9K_TXDESC_RTSENA;
1607
1608 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1609 if (flags & ATH9K_TXDESC_RTSENA)
1610 flags &= ~ATH9K_TXDESC_CTSENA;
1611
Sujithe63835b2008-11-18 09:07:53 +05301612 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301613 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1614 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301615 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301616 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301617
Sujith17d79042009-02-09 13:27:03 +05301618 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301619 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001620}
1621
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001622static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301623 struct sk_buff *skb,
1624 struct ath_tx_control *txctl)
1625{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001626 struct ath_wiphy *aphy = hw->priv;
1627 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301628 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1629 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301630 int hdrlen;
1631 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001632 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001633 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301634
Felix Fietkau827e69b2009-11-15 23:09:25 +01001635 tx_info->pad[0] = 0;
1636 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001637 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001638 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001639 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001640 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1641 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001642 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001643 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1644 break;
1645 }
Sujithe8324352009-01-16 21:38:42 +05301646 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1647 fc = hdr->frame_control;
1648
1649 ATH_TXBUF_RESET(bf);
1650
Felix Fietkau827e69b2009-11-15 23:09:25 +01001651 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001652 bf->bf_frmlen = skb->len + FCS_LEN;
1653 /* Remove the padding size from bf_frmlen, if any */
1654 padpos = ath9k_cmn_padpos(hdr->frame_control);
1655 padsize = padpos & 3;
1656 if (padsize && skb->len>padpos+padsize) {
1657 bf->bf_frmlen -= padsize;
1658 }
Sujithe8324352009-01-16 21:38:42 +05301659
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001660 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301661 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001662 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1663 use_ldpc = true;
1664 }
Sujithe8324352009-01-16 21:38:42 +05301665
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001666 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001667 if (txctl->paprd)
1668 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001669 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301670
1671 bf->bf_keytype = get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301672 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1673 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1674 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1675 } else {
1676 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1677 }
1678
Sujith17b182e2009-12-14 14:56:56 +05301679 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1680 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301681 assign_aggr_tid_seqno(skb, bf);
1682
1683 bf->bf_mpdu = skb;
1684
1685 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1686 skb->len, DMA_TO_DEVICE);
1687 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1688 bf->bf_mpdu = NULL;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001689 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1690 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301691 return -ENOMEM;
1692 }
1693
1694 bf->bf_buf_addr = bf->bf_dmacontext;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001695
1696 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1697 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1698 bf->bf_isnullfunc = true;
Sujith1b04b932010-01-08 10:36:05 +05301699 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001700 } else
1701 bf->bf_isnullfunc = false;
1702
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001703 bf->bf_tx_aborted = false;
1704
Sujithe8324352009-01-16 21:38:42 +05301705 return 0;
1706}
1707
1708/* FIXME: tx power */
1709static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1710 struct ath_tx_control *txctl)
1711{
Sujitha22be222009-03-30 15:28:36 +05301712 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301713 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301714 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301715 struct ath_node *an = NULL;
1716 struct list_head bf_head;
1717 struct ath_desc *ds;
1718 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301719 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301720 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301721 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301722
1723 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301724 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301725
1726 INIT_LIST_HEAD(&bf_head);
1727 list_add_tail(&bf->list, &bf_head);
1728
1729 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001730 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301731
1732 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1733 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1734
1735 ath9k_hw_filltxdesc(ah, ds,
1736 skb->len, /* segment length */
1737 true, /* first segment */
1738 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001739 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001740 bf->bf_buf_addr,
1741 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301742
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001743 if (bf->bf_state.bfs_paprd)
1744 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1745
Sujithe8324352009-01-16 21:38:42 +05301746 spin_lock_bh(&txctl->txq->axq_lock);
1747
1748 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1749 tx_info->control.sta) {
1750 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1751 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1752
Sujithc37452b2009-03-09 09:31:57 +05301753 if (!ieee80211_is_data_qos(fc)) {
1754 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1755 goto tx_done;
1756 }
1757
Felix Fietkau4fdec032010-03-12 04:02:43 +01001758 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301759 /*
1760 * Try aggregation if it's a unicast data frame
1761 * and the destination is HT capable.
1762 */
1763 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1764 } else {
1765 /*
1766 * Send this frame as regular when ADDBA
1767 * exchange is neither complete nor pending.
1768 */
Sujithc37452b2009-03-09 09:31:57 +05301769 ath_tx_send_ht_normal(sc, txctl->txq,
1770 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301771 }
1772 } else {
Sujithc37452b2009-03-09 09:31:57 +05301773 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301774 }
1775
Sujithc37452b2009-03-09 09:31:57 +05301776tx_done:
Sujithe8324352009-01-16 21:38:42 +05301777 spin_unlock_bh(&txctl->txq->axq_lock);
1778}
1779
1780/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001781int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301782 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001783{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001784 struct ath_wiphy *aphy = hw->priv;
1785 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001786 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001787 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001788 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001789 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001790
Sujithe8324352009-01-16 21:38:42 +05301791 bf = ath_tx_get_buffer(sc);
1792 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001793 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301794 return -1;
1795 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001796
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001797 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301798 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001799 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001800
Sujithe8324352009-01-16 21:38:42 +05301801 /* upon ath_tx_processq() this TX queue will be resumed, we
1802 * guarantee this will happen by knowing beforehand that
1803 * we will at least have to run TX completionon one buffer
1804 * on the queue */
1805 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001806 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001807 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301808 txq->stopped = 1;
1809 }
1810 spin_unlock_bh(&txq->axq_lock);
1811
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001812 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301813
1814 return r;
1815 }
1816
Felix Fietkau97923b12010-06-12 00:33:55 -04001817 q = skb_get_queue_mapping(skb);
1818 if (q >= 4)
1819 q = 0;
1820
1821 spin_lock_bh(&txq->axq_lock);
1822 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1823 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1824 txq->stopped = 1;
1825 }
1826 spin_unlock_bh(&txq->axq_lock);
1827
Sujithe8324352009-01-16 21:38:42 +05301828 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001829
1830 return 0;
1831}
1832
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001833void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001834{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001835 struct ath_wiphy *aphy = hw->priv;
1836 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001837 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001838 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1839 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301840 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1841 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001842
Sujithe8324352009-01-16 21:38:42 +05301843 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001844
Sujithe8324352009-01-16 21:38:42 +05301845 /*
1846 * As a temporary workaround, assign seq# here; this will likely need
1847 * to be cleaned up to work better with Beacon transmission and virtual
1848 * BSSes.
1849 */
1850 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301851 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1852 sc->tx.seq_no += 0x10;
1853 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1854 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001855 }
1856
Sujithe8324352009-01-16 21:38:42 +05301857 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001858 padpos = ath9k_cmn_padpos(hdr->frame_control);
1859 padsize = padpos & 3;
1860 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301861 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001862 ath_print(common, ATH_DBG_XMIT,
1863 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301864 dev_kfree_skb_any(skb);
1865 return;
1866 }
1867 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001868 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001869 }
1870
Sujithe8324352009-01-16 21:38:42 +05301871 txctl.txq = sc->beacon.cabq;
1872
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001873 ath_print(common, ATH_DBG_XMIT,
1874 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301875
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001876 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001877 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301878 goto exit;
1879 }
1880
1881 return;
1882exit:
1883 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001884}
1885
Sujithe8324352009-01-16 21:38:42 +05301886/*****************/
1887/* TX Completion */
1888/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001889
Sujithe8324352009-01-16 21:38:42 +05301890static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001891 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001892{
Sujithe8324352009-01-16 21:38:42 +05301893 struct ieee80211_hw *hw = sc->hw;
1894 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001895 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001896 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001897 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301898
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001899 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301900
Felix Fietkau827e69b2009-11-15 23:09:25 +01001901 if (aphy)
1902 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301903
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301904 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301905 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301906
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301907 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301908 /* Frame was ACKed */
1909 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1910 }
1911
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001912 padpos = ath9k_cmn_padpos(hdr->frame_control);
1913 padsize = padpos & 3;
1914 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301915 /*
1916 * Remove MAC header padding before giving the frame back to
1917 * mac80211.
1918 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001919 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301920 skb_pull(skb, padsize);
1921 }
1922
Sujith1b04b932010-01-08 10:36:05 +05301923 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1924 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001925 ath_print(common, ATH_DBG_PS,
1926 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001927 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301928 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1929 PS_WAIT_FOR_CAB |
1930 PS_WAIT_FOR_PSPOLL_DATA |
1931 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001932 }
1933
Felix Fietkau827e69b2009-11-15 23:09:25 +01001934 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001935 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001936 else {
1937 q = skb_get_queue_mapping(skb);
1938 if (q >= 4)
1939 q = 0;
1940
1941 if (--sc->tx.pending_frames[q] < 0)
1942 sc->tx.pending_frames[q] = 0;
1943
Felix Fietkau827e69b2009-11-15 23:09:25 +01001944 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001945 }
Sujithe8324352009-01-16 21:38:42 +05301946}
1947
1948static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001949 struct ath_txq *txq, struct list_head *bf_q,
1950 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301951{
1952 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301953 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301954 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301955
Sujithe8324352009-01-16 21:38:42 +05301956 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301957 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301958
1959 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301960 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301961
1962 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301963 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301964 }
1965
1966 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001967
1968 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001969 if (time_after(jiffies,
1970 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001971 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001972 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001973 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001974 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001975 } else {
1976 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1977 ath_debug_stat_tx(sc, txq, bf, ts);
1978 }
Sujithe8324352009-01-16 21:38:42 +05301979
1980 /*
1981 * Return the list of ath_buf of this mpdu to free queue
1982 */
1983 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1984 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1985 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1986}
1987
1988static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001989 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301990{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001991 u16 seq_st = 0;
1992 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301993 int ba_index;
1994 int nbad = 0;
1995 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001996
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001997 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301998 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301999
Sujithcd3d39a2008-08-11 14:03:34 +05302000 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002001 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002002 seq_st = ts->ts_seqnum;
2003 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002004 }
2005
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002006 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05302007 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
2008 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
2009 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002010
Sujithe8324352009-01-16 21:38:42 +05302011 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002012 }
2013
Sujithe8324352009-01-16 21:38:42 +05302014 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002015}
2016
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002017static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302018 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302019{
Sujitha22be222009-03-30 15:28:36 +05302020 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302021 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302022 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01002023 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302024 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302025
Sujith95e4acb2009-03-13 08:56:09 +05302026 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002027 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302028
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002029 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302030 WARN_ON(tx_rateindex >= hw->max_rates);
2031
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002032 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302033 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Felix Fietkaud9698472010-03-01 13:32:11 +01002034 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2035 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302036
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002037 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302038 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05302039 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002040 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01002041 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2042 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002043 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2044 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002045 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2046 tx_info->status.ampdu_len = bf->bf_nframes;
2047 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
Sujithc4288392008-11-18 09:09:30 +05302048 }
2049 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302050
Felix Fietkau545750d2009-11-23 22:21:01 +01002051 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302052 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002053 tx_info->status.rates[i].idx = -1;
2054 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302055
Felix Fietkau78c46532010-06-25 01:26:16 +02002056 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302057}
2058
Sujith059d8062009-01-16 21:38:49 +05302059static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2060{
2061 int qnum;
2062
Felix Fietkau97923b12010-06-12 00:33:55 -04002063 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2064 if (qnum == -1)
2065 return;
2066
Sujith059d8062009-01-16 21:38:49 +05302067 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002068 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
2069 ath_mac80211_start_queue(sc, qnum);
2070 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302071 }
2072 spin_unlock_bh(&txq->axq_lock);
2073}
2074
Sujithc4288392008-11-18 09:09:30 +05302075static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076{
Sujithcbe61d82009-02-09 13:27:12 +05302077 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002078 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002079 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2080 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302081 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002082 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302083 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002084 int status;
2085
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002086 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2087 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2088 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002090 for (;;) {
2091 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092 if (list_empty(&txq->axq_q)) {
2093 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002094 spin_unlock_bh(&txq->axq_lock);
2095 break;
2096 }
2097 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2098
2099 /*
2100 * There is a race condition that a BH gets scheduled
2101 * after sw writes TxE and before hw re-load the last
2102 * descriptor to get the newly chained one.
2103 * Software must keep the last DONE descriptor as a
2104 * holding descriptor - software does so by marking
2105 * it with the STALE flag.
2106 */
2107 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302108 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109 bf_held = bf;
2110 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302111 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112 break;
2113 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002114 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302115 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 }
2117 }
2118
2119 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302120 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002121
Felix Fietkau29bffa92010-03-29 20:14:23 -07002122 memset(&ts, 0, sizeof(ts));
2123 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 if (status == -EINPROGRESS) {
2125 spin_unlock_bh(&txq->axq_lock);
2126 break;
2127 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128
2129 /*
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002130 * We now know the nullfunc frame has been ACKed so we
2131 * can disable RX.
2132 */
2133 if (bf->bf_isnullfunc &&
Felix Fietkau29bffa92010-03-29 20:14:23 -07002134 (ts.ts_status & ATH9K_TX_ACKED)) {
Senthil Balasubramanian3f7c5c12010-02-03 22:51:13 +05302135 if ((sc->ps_flags & PS_ENABLED))
2136 ath9k_enable_ps(sc);
2137 else
Sujith1b04b932010-01-08 10:36:05 +05302138 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002139 }
2140
2141 /*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002142 * Remove ath_buf's of the same transmit unit from txq,
2143 * however leave the last descriptor back as the holding
2144 * descriptor for hw.
2145 */
Sujitha119cc42009-03-30 15:28:38 +05302146 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002147 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148 if (!list_is_singular(&lastbf->list))
2149 list_cut_position(&bf_head,
2150 &txq->axq_q, lastbf->list.prev);
2151
2152 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002153 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002154 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002155 if (bf_held)
2156 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002157 spin_unlock_bh(&txq->axq_lock);
2158
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002159 if (bf_held)
2160 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002161
Sujithcd3d39a2008-08-11 14:03:34 +05302162 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163 /*
2164 * This frame is sent out as a single frame.
2165 * Use hardware retry status for this frame.
2166 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002167 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302168 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002169 ath_tx_rc_status(bf, &ts, 0, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002170 }
Johannes Berge6a98542008-10-21 12:40:02 +02002171
Sujithcd3d39a2008-08-11 14:03:34 +05302172 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002173 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002176
Sujith059d8062009-01-16 21:38:49 +05302177 ath_wake_mac80211_queue(sc, txq);
2178
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002179 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302180 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181 ath_txq_schedule(sc, txq);
2182 spin_unlock_bh(&txq->axq_lock);
2183 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002184}
2185
Sujith305fe472009-07-23 15:32:29 +05302186static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002187{
2188 struct ath_softc *sc = container_of(work, struct ath_softc,
2189 tx_complete_work.work);
2190 struct ath_txq *txq;
2191 int i;
2192 bool needreset = false;
2193
2194 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2195 if (ATH_TXQ_SETUP(sc, i)) {
2196 txq = &sc->tx.txq[i];
2197 spin_lock_bh(&txq->axq_lock);
2198 if (txq->axq_depth) {
2199 if (txq->axq_tx_inprogress) {
2200 needreset = true;
2201 spin_unlock_bh(&txq->axq_lock);
2202 break;
2203 } else {
2204 txq->axq_tx_inprogress = true;
2205 }
2206 }
2207 spin_unlock_bh(&txq->axq_lock);
2208 }
2209
2210 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002211 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2212 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302213 ath9k_ps_wakeup(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002214 ath_reset(sc, false);
Sujith332c5562009-10-09 09:51:28 +05302215 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002216 }
2217
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002218 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002219 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2220}
2221
2222
Sujithe8324352009-01-16 21:38:42 +05302223
2224void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002225{
Sujithe8324352009-01-16 21:38:42 +05302226 int i;
2227 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002228
Sujithe8324352009-01-16 21:38:42 +05302229 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002230
2231 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302232 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2233 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002234 }
2235}
2236
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002237void ath_tx_edma_tasklet(struct ath_softc *sc)
2238{
2239 struct ath_tx_status txs;
2240 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2241 struct ath_hw *ah = sc->sc_ah;
2242 struct ath_txq *txq;
2243 struct ath_buf *bf, *lastbf;
2244 struct list_head bf_head;
2245 int status;
2246 int txok;
2247
2248 for (;;) {
2249 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2250 if (status == -EINPROGRESS)
2251 break;
2252 if (status == -EIO) {
2253 ath_print(common, ATH_DBG_XMIT,
2254 "Error processing tx status\n");
2255 break;
2256 }
2257
2258 /* Skip beacon completions */
2259 if (txs.qid == sc->beacon.beaconq)
2260 continue;
2261
2262 txq = &sc->tx.txq[txs.qid];
2263
2264 spin_lock_bh(&txq->axq_lock);
2265 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2266 spin_unlock_bh(&txq->axq_lock);
2267 return;
2268 }
2269
2270 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2271 struct ath_buf, list);
2272 lastbf = bf->bf_lastbf;
2273
2274 INIT_LIST_HEAD(&bf_head);
2275 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2276 &lastbf->list);
2277 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2278 txq->axq_depth--;
2279 txq->axq_tx_inprogress = false;
2280 spin_unlock_bh(&txq->axq_lock);
2281
2282 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2283
Vasanthakumar Thiagarajande0f6482010-05-17 18:57:54 -07002284 /*
2285 * Make sure null func frame is acked before configuring
2286 * hw into ps mode.
2287 */
2288 if (bf->bf_isnullfunc && txok) {
2289 if ((sc->ps_flags & PS_ENABLED))
2290 ath9k_enable_ps(sc);
2291 else
2292 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2293 }
2294
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002295 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002296 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2297 bf->bf_state.bf_type |= BUF_XRETRY;
2298 ath_tx_rc_status(bf, &txs, 0, txok, true);
2299 }
2300
2301 if (bf_isampdu(bf))
2302 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2303 else
2304 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2305 &txs, txok, 0);
2306
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002307 ath_wake_mac80211_queue(sc, txq);
2308
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002309 spin_lock_bh(&txq->axq_lock);
2310 if (!list_empty(&txq->txq_fifo_pending)) {
2311 INIT_LIST_HEAD(&bf_head);
2312 bf = list_first_entry(&txq->txq_fifo_pending,
2313 struct ath_buf, list);
2314 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2315 &bf->bf_lastbf->list);
2316 ath_tx_txqaddbuf(sc, txq, &bf_head);
2317 } else if (sc->sc_flags & SC_OP_TXAGGR)
2318 ath_txq_schedule(sc, txq);
2319 spin_unlock_bh(&txq->axq_lock);
2320 }
2321}
2322
Sujithe8324352009-01-16 21:38:42 +05302323/*****************/
2324/* Init, Cleanup */
2325/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002326
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002327static int ath_txstatus_setup(struct ath_softc *sc, int size)
2328{
2329 struct ath_descdma *dd = &sc->txsdma;
2330 u8 txs_len = sc->sc_ah->caps.txs_len;
2331
2332 dd->dd_desc_len = size * txs_len;
2333 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2334 &dd->dd_desc_paddr, GFP_KERNEL);
2335 if (!dd->dd_desc)
2336 return -ENOMEM;
2337
2338 return 0;
2339}
2340
2341static int ath_tx_edma_init(struct ath_softc *sc)
2342{
2343 int err;
2344
2345 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2346 if (!err)
2347 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2348 sc->txsdma.dd_desc_paddr,
2349 ATH_TXSTATUS_RING_SIZE);
2350
2351 return err;
2352}
2353
2354static void ath_tx_edma_cleanup(struct ath_softc *sc)
2355{
2356 struct ath_descdma *dd = &sc->txsdma;
2357
2358 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2359 dd->dd_desc_paddr);
2360}
2361
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002362int ath_tx_init(struct ath_softc *sc, int nbufs)
2363{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002364 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002365 int error = 0;
2366
Sujith797fe5cb2009-03-30 15:28:45 +05302367 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002368
Sujith797fe5cb2009-03-30 15:28:45 +05302369 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002370 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302371 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002372 ath_print(common, ATH_DBG_FATAL,
2373 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302374 goto err;
2375 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376
Sujith797fe5cb2009-03-30 15:28:45 +05302377 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002378 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302379 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002380 ath_print(common, ATH_DBG_FATAL,
2381 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302382 goto err;
2383 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002385 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2386
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002387 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2388 error = ath_tx_edma_init(sc);
2389 if (error)
2390 goto err;
2391 }
2392
Sujith797fe5cb2009-03-30 15:28:45 +05302393err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002394 if (error != 0)
2395 ath_tx_cleanup(sc);
2396
2397 return error;
2398}
2399
Sujith797fe5cb2009-03-30 15:28:45 +05302400void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401{
Sujithb77f4832008-12-07 21:44:03 +05302402 if (sc->beacon.bdma.dd_desc_len != 0)
2403 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002404
Sujithb77f4832008-12-07 21:44:03 +05302405 if (sc->tx.txdma.dd_desc_len != 0)
2406 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002407
2408 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2409 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410}
2411
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002412void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2413{
Sujithc5170162008-10-29 10:13:59 +05302414 struct ath_atx_tid *tid;
2415 struct ath_atx_ac *ac;
2416 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417
Sujith8ee5afb2008-12-07 21:43:36 +05302418 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302419 tidno < WME_NUM_TID;
2420 tidno++, tid++) {
2421 tid->an = an;
2422 tid->tidno = tidno;
2423 tid->seq_start = tid->seq_next = 0;
2424 tid->baw_size = WME_MAX_BA;
2425 tid->baw_head = tid->baw_tail = 0;
2426 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302427 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302428 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302429 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302430 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302431 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302432 tid->state &= ~AGGR_ADDBA_COMPLETE;
2433 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302434 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002435
Sujith8ee5afb2008-12-07 21:43:36 +05302436 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302437 acno < WME_NUM_AC; acno++, ac++) {
2438 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002439 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302440 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002441 }
2442}
2443
Sujithb5aa9bf2008-10-29 10:13:31 +05302444void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002445{
Felix Fietkau2b409942010-07-07 19:42:08 +02002446 struct ath_atx_ac *ac;
2447 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002448 struct ath_txq *txq;
Felix Fietkau2b409942010-07-07 19:42:08 +02002449 int i, tidno;
Sujithe8324352009-01-16 21:38:42 +05302450
Felix Fietkau2b409942010-07-07 19:42:08 +02002451 for (tidno = 0, tid = &an->tid[tidno];
2452 tidno < WME_NUM_TID; tidno++, tid++) {
2453 i = tid->ac->qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002454
Felix Fietkau2b409942010-07-07 19:42:08 +02002455 if (!ATH_TXQ_SETUP(sc, i))
2456 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002457
Felix Fietkau2b409942010-07-07 19:42:08 +02002458 txq = &sc->tx.txq[i];
2459 ac = tid->ac;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002460
Felix Fietkau2b409942010-07-07 19:42:08 +02002461 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002462
Felix Fietkau2b409942010-07-07 19:42:08 +02002463 if (tid->sched) {
2464 list_del(&tid->list);
2465 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002466 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002467
2468 if (ac->sched) {
2469 list_del(&ac->list);
2470 tid->ac->sched = false;
2471 }
2472
2473 ath_tid_drain(sc, txq, tid);
2474 tid->state &= ~AGGR_ADDBA_COMPLETE;
2475 tid->state &= ~AGGR_CLEANUP;
2476
2477 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002478 }
2479}