blob: 0644f1e91887bb19ceff5cb4b71ced0d9981fbf3 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
123static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
126
127 spin_lock_bh(&txq->axq_lock);
128 tid->paused++;
129 spin_unlock_bh(&txq->axq_lock);
130}
131
132static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
133{
134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
135
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700136 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530137 spin_lock_bh(&txq->axq_lock);
138
139 tid->paused--;
140
141 if (tid->paused > 0)
142 goto unlock;
143
144 if (list_empty(&tid->buf_q))
145 goto unlock;
146
147 ath_tx_queue_tid(txq, tid);
148 ath_txq_schedule(sc, txq);
149unlock:
150 spin_unlock_bh(&txq->axq_lock);
151}
152
153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
156 struct ath_buf *bf;
157 struct list_head bf_head;
158 INIT_LIST_HEAD(&bf_head);
159
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700160 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530161 spin_lock_bh(&txq->axq_lock);
162
163 tid->paused--;
164
165 if (tid->paused > 0) {
166 spin_unlock_bh(&txq->axq_lock);
167 return;
168 }
169
170 while (!list_empty(&tid->buf_q)) {
171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700172 BUG_ON(bf_isretried(bf));
Sujithd43f30152009-01-16 21:38:53 +0530173 list_move_tail(&bf->list, &bf_head);
Sujithc37452b2009-03-09 09:31:57 +0530174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
188 tid->tx_buf[cindex] = NULL;
189
190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 struct ath_buf *bf)
198{
199 int index, cindex;
200
201 if (bf_isretried(bf))
202 return;
203
204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
206
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700207 BUG_ON(tid->tx_buf[cindex] != NULL);
Sujithe8324352009-01-16 21:38:42 +0530208 tid->tx_buf[cindex] = bf;
209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
227 struct ath_buf *bf;
228 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530232 INIT_LIST_HEAD(&bf_head);
233
234 for (;;) {
235 if (list_empty(&tid->buf_q))
236 break;
Sujithe8324352009-01-16 21:38:42 +0530237
Sujithd43f30152009-01-16 21:38:53 +0530238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
239 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530240
241 if (bf_isretried(bf))
242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
243
244 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530246 spin_lock(&txq->axq_lock);
247 }
248
249 tid->seq_next = tid->seq_start;
250 tid->baw_tail = tid->baw_head;
251}
252
Sujithfec247c2009-07-27 12:08:16 +0530253static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
254 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530255{
256 struct sk_buff *skb;
257 struct ieee80211_hdr *hdr;
258
259 bf->bf_state.bf_type |= BUF_RETRY;
260 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530261 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530262
263 skb = bf->bf_mpdu;
264 hdr = (struct ieee80211_hdr *)skb->data;
265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
266}
267
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
269{
270 struct ath_buf *bf = NULL;
271
272 spin_lock_bh(&sc->tx.txbuflock);
273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
275 spin_unlock_bh(&sc->tx.txbuflock);
276 return NULL;
277 }
278
279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
282 spin_unlock_bh(&sc->tx.txbuflock);
283
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
Sujithd43f30152009-01-16 21:38:53 +0530294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530300 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530301
302 ATH_TXBUF_RESET(tbf);
303
Felix Fietkau827e69b2009-11-15 23:09:25 +0100304 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530305 tbf->bf_mpdu = bf->bf_mpdu;
306 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530308 tbf->bf_state = bf->bf_state;
309 tbf->bf_dmacontext = bf->bf_dmacontext;
310
311 return tbf;
312}
313
314static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
315 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700316 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530317{
318 struct ath_node *an = NULL;
319 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530320 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800321 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530322 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800323 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530324 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530326 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530328 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200331 struct ieee80211_tx_rate rates[4];
Sujithe8324352009-01-16 21:38:42 +0530332
Sujitha22be222009-03-30 15:28:36 +0530333 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530334 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530335
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800336 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100337 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800338
Felix Fietkau78c46532010-06-25 01:26:16 +0200339 memcpy(rates, tx_info->control.rates, sizeof(rates));
340
Sujith1286ec62009-01-27 13:30:37 +0530341 rcu_read_lock();
342
Johannes Berg5ed176e2009-11-04 14:42:28 +0100343 /* XXX: use ieee80211_find_sta! */
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800344 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
Sujith1286ec62009-01-27 13:30:37 +0530345 if (!sta) {
346 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200347
Felix Fietkau31e79a52010-07-12 23:16:34 +0200348 INIT_LIST_HEAD(&bf_head);
349 while (bf) {
350 bf_next = bf->bf_next;
351
352 bf->bf_state.bf_type |= BUF_XRETRY;
353 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
354 !bf->bf_stale || bf_next != NULL)
355 list_move_tail(&bf->list, &bf_head);
356
357 ath_tx_rc_status(bf, ts, 0, 0, false);
358 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
359 0, 0);
360
361 bf = bf_next;
362 }
Sujith1286ec62009-01-27 13:30:37 +0530363 return;
Sujithe8324352009-01-16 21:38:42 +0530364 }
365
Sujith1286ec62009-01-27 13:30:37 +0530366 an = (struct ath_node *)sta->drv_priv;
367 tid = ATH_AN_2_TID(an, bf->bf_tidno);
368
Felix Fietkaub11b1602010-07-11 12:48:44 +0200369 /*
370 * The hardware occasionally sends a tx status for the wrong TID.
371 * In this case, the BA status cannot be considered valid and all
372 * subframes need to be retransmitted
373 */
374 if (bf->bf_tidno != ts->tid)
375 txok = false;
376
Sujithe8324352009-01-16 21:38:42 +0530377 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530378 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530379
Sujithd43f30152009-01-16 21:38:53 +0530380 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700381 if (ts->ts_flags & ATH9K_TX_BA) {
382 seq_st = ts->ts_seqnum;
383 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530384 } else {
Sujithd43f30152009-01-16 21:38:53 +0530385 /*
386 * AR5416 can become deaf/mute when BA
387 * issue happens. Chip needs to be reset.
388 * But AP code may have sychronization issues
389 * when perform internal reset in this routine.
390 * Only enable reset in STA mode for now.
391 */
Sujith2660b812009-02-09 13:27:26 +0530392 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530393 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530394 }
395 }
396
397 INIT_LIST_HEAD(&bf_pending);
398 INIT_LIST_HEAD(&bf_head);
399
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700400 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530401 while (bf) {
402 txfail = txpending = 0;
403 bf_next = bf->bf_next;
404
Felix Fietkau78c46532010-06-25 01:26:16 +0200405 skb = bf->bf_mpdu;
406 tx_info = IEEE80211_SKB_CB(skb);
407
Sujithe8324352009-01-16 21:38:42 +0530408 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
409 /* transmit completion, subframe is
410 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530411 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530412 } else if (!isaggr && txok) {
413 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530414 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530415 } else {
Sujithe8324352009-01-16 21:38:42 +0530416 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400417 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530418 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530419 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530420 txpending = 1;
421 } else {
422 bf->bf_state.bf_type |= BUF_XRETRY;
423 txfail = 1;
424 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530425 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530426 }
427 } else {
428 /*
429 * cleanup in progress, just fail
430 * the un-acked sub-frames
431 */
432 txfail = 1;
433 }
434 }
435
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400436 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
437 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530438 /*
439 * Make sure the last desc is reclaimed if it
440 * not a holding desc.
441 */
442 if (!bf_last->bf_stale)
443 list_move_tail(&bf->list, &bf_head);
444 else
445 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530446 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700447 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530448 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530449 }
450
451 if (!txpending) {
452 /*
453 * complete the acked-ones/xretried ones; update
454 * block-ack window
455 */
456 spin_lock_bh(&txq->axq_lock);
457 ath_tx_update_baw(sc, tid, bf->bf_seqno);
458 spin_unlock_bh(&txq->axq_lock);
459
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530460 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200461 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700462 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530463 rc_update = false;
464 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700465 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530466 }
467
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700468 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
469 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530470 } else {
Sujithd43f30152009-01-16 21:38:53 +0530471 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400472 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
473 if (bf->bf_next == NULL && bf_last->bf_stale) {
474 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530475
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400476 tbf = ath_clone_txbuf(sc, bf_last);
477 /*
478 * Update tx baw and complete the
479 * frame with failed status if we
480 * run out of tx buf.
481 */
482 if (!tbf) {
483 spin_lock_bh(&txq->axq_lock);
484 ath_tx_update_baw(sc, tid,
485 bf->bf_seqno);
486 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400487
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400488 bf->bf_state.bf_type |=
489 BUF_XRETRY;
490 ath_tx_rc_status(bf, ts, nbad,
491 0, false);
492 ath_tx_complete_buf(sc, bf, txq,
493 &bf_head,
494 ts, 0, 0);
495 break;
496 }
497
498 ath9k_hw_cleartxdesc(sc->sc_ah,
499 tbf->bf_desc);
500 list_add_tail(&tbf->list, &bf_head);
501 } else {
502 /*
503 * Clear descriptor status words for
504 * software retry
505 */
506 ath9k_hw_cleartxdesc(sc->sc_ah,
507 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400508 }
Sujithe8324352009-01-16 21:38:42 +0530509 }
510
511 /*
512 * Put this buffer to the temporary pending
513 * queue to retain ordering
514 */
515 list_splice_tail_init(&bf_head, &bf_pending);
516 }
517
518 bf = bf_next;
519 }
520
521 if (tid->state & AGGR_CLEANUP) {
Sujithe8324352009-01-16 21:38:42 +0530522 if (tid->baw_head == tid->baw_tail) {
523 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530524 tid->state &= ~AGGR_CLEANUP;
525
526 /* send buffered frames as singles */
527 ath_tx_flush_tid(sc, tid);
Sujithd43f30152009-01-16 21:38:53 +0530528 }
Sujith1286ec62009-01-27 13:30:37 +0530529 rcu_read_unlock();
Sujithe8324352009-01-16 21:38:42 +0530530 return;
531 }
532
Sujithd43f30152009-01-16 21:38:53 +0530533 /* prepend un-acked frames to the beginning of the pending frame queue */
Sujithe8324352009-01-16 21:38:42 +0530534 if (!list_empty(&bf_pending)) {
535 spin_lock_bh(&txq->axq_lock);
536 list_splice(&bf_pending, &tid->buf_q);
537 ath_tx_queue_tid(txq, tid);
538 spin_unlock_bh(&txq->axq_lock);
539 }
540
Sujith1286ec62009-01-27 13:30:37 +0530541 rcu_read_unlock();
542
Sujithe8324352009-01-16 21:38:42 +0530543 if (needreset)
544 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530545}
546
547static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
548 struct ath_atx_tid *tid)
549{
Sujithe8324352009-01-16 21:38:42 +0530550 struct sk_buff *skb;
551 struct ieee80211_tx_info *tx_info;
552 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530553 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530554 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530555 int i;
556
Sujitha22be222009-03-30 15:28:36 +0530557 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530558 tx_info = IEEE80211_SKB_CB(skb);
559 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530560
561 /*
562 * Find the lowest frame length among the rate series that will have a
563 * 4ms transmit duration.
564 * TODO - TXOP limit needs to be considered.
565 */
566 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
567
568 for (i = 0; i < 4; i++) {
569 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100570 int modeidx;
571 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530572 legacy = 1;
573 break;
574 }
575
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200576 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100577 modeidx = MCS_HT40;
578 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200579 modeidx = MCS_HT20;
580
581 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
582 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100583
584 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530585 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530586 }
587 }
588
589 /*
590 * limit aggregate size by the minimum rate if rate selected is
591 * not a probe rate, if rate selected is a probe rate then
592 * avoid aggregation of this packet.
593 */
594 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
595 return 0;
596
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530597 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
598 aggr_limit = min((max_4ms_framelen * 3) / 8,
599 (u32)ATH_AMPDU_LIMIT_MAX);
600 else
601 aggr_limit = min(max_4ms_framelen,
602 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530603
604 /*
605 * h/w can accept aggregates upto 16 bit lengths (65535).
606 * The IE, however can hold upto 65536, which shows up here
607 * as zero. Ignore 65536 since we are constrained by hw.
608 */
Sujith4ef70842009-07-23 15:32:41 +0530609 if (tid->an->maxampdu)
610 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530611
612 return aggr_limit;
613}
614
615/*
Sujithd43f30152009-01-16 21:38:53 +0530616 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530617 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530618 */
619static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
620 struct ath_buf *bf, u16 frmlen)
621{
Sujithe8324352009-01-16 21:38:42 +0530622 struct sk_buff *skb = bf->bf_mpdu;
623 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530624 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530625 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100626 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200627 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530628
629 /* Select standard number of delimiters based on frame length alone */
630 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
631
632 /*
633 * If encryption enabled, hardware requires some more padding between
634 * subframes.
635 * TODO - this could be improved to be dependent on the rate.
636 * The hardware can keep up at lower rates, but not higher rates
637 */
638 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
639 ndelim += ATH_AGGR_ENCRYPTDELIM;
640
641 /*
642 * Convert desired mpdu density from microeconds to bytes based
643 * on highest rate in rate series (i.e. first rate) to determine
644 * required minimum length for subframe. Take into account
645 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530646 *
Sujithe8324352009-01-16 21:38:42 +0530647 * If there is no mpdu density restriction, no further calculation
648 * is needed.
649 */
Sujith4ef70842009-07-23 15:32:41 +0530650
651 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530652 return ndelim;
653
654 rix = tx_info->control.rates[0].idx;
655 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530656 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
657 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
658
659 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530660 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530661 else
Sujith4ef70842009-07-23 15:32:41 +0530662 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530663
664 if (nsymbols == 0)
665 nsymbols = 1;
666
Felix Fietkauc6663872010-04-19 19:57:33 +0200667 streams = HT_RC_2_STREAMS(rix);
668 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530669 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
670
Sujithe8324352009-01-16 21:38:42 +0530671 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530672 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
673 ndelim = max(mindelim, ndelim);
674 }
675
676 return ndelim;
677}
678
679static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530680 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530681 struct ath_atx_tid *tid,
682 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530683{
684#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530685 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
686 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530687 u16 aggr_limit = 0, al = 0, bpad = 0,
688 al_delta, h_baw = tid->baw_size / 2;
689 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujithe8324352009-01-16 21:38:42 +0530690
691 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
692
693 do {
694 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
695
Sujithd43f30152009-01-16 21:38:53 +0530696 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530697 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
698 status = ATH_AGGR_BAW_CLOSED;
699 break;
700 }
701
702 if (!rl) {
703 aggr_limit = ath_lookup_rate(sc, bf, tid);
704 rl = 1;
705 }
706
Sujithd43f30152009-01-16 21:38:53 +0530707 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530708 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
709
Sujithd43f30152009-01-16 21:38:53 +0530710 if (nframes &&
711 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530712 status = ATH_AGGR_LIMITED;
713 break;
714 }
715
Sujithd43f30152009-01-16 21:38:53 +0530716 /* do not exceed subframe limit */
717 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530718 status = ATH_AGGR_LIMITED;
719 break;
720 }
Sujithd43f30152009-01-16 21:38:53 +0530721 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530722
Sujithd43f30152009-01-16 21:38:53 +0530723 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530724 al += bpad + al_delta;
725
726 /*
727 * Get the delimiters needed to meet the MPDU
728 * density for this node.
729 */
730 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530731 bpad = PADBYTES(al_delta) + (ndelim << 2);
732
733 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400734 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530735
Sujithd43f30152009-01-16 21:38:53 +0530736 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530737 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530738 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
739 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530740 if (bf_prev) {
741 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400742 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
743 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530744 }
745 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530746
Sujithe8324352009-01-16 21:38:42 +0530747 } while (!list_empty(&tid->buf_q));
748
749 bf_first->bf_al = al;
750 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530751
Sujithe8324352009-01-16 21:38:42 +0530752 return status;
753#undef PADBYTES
754}
755
756static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
757 struct ath_atx_tid *tid)
758{
Sujithd43f30152009-01-16 21:38:53 +0530759 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530760 enum ATH_AGGR_STATUS status;
761 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530762
763 do {
764 if (list_empty(&tid->buf_q))
765 return;
766
767 INIT_LIST_HEAD(&bf_q);
768
Sujithfec247c2009-07-27 12:08:16 +0530769 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530770
771 /*
Sujithd43f30152009-01-16 21:38:53 +0530772 * no frames picked up to be aggregated;
773 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530774 */
775 if (list_empty(&bf_q))
776 break;
777
778 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530779 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530780
Sujithd43f30152009-01-16 21:38:53 +0530781 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530782 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530783 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530784 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530785 ath_buf_set_rate(sc, bf);
786 ath_tx_txqaddbuf(sc, txq, &bf_q);
787 continue;
788 }
789
Sujithd43f30152009-01-16 21:38:53 +0530790 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530791 bf->bf_state.bf_type |= BUF_AGGR;
792 ath_buf_set_rate(sc, bf);
793 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
794
Sujithd43f30152009-01-16 21:38:53 +0530795 /* anchor last desc of aggregate */
796 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530797
Sujithe8324352009-01-16 21:38:42 +0530798 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530799 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530800
801 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
802 status != ATH_AGGR_BAW_CLOSED);
803}
804
Sujithf83da962009-07-23 15:32:37 +0530805void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
806 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530807{
808 struct ath_atx_tid *txtid;
809 struct ath_node *an;
810
811 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530812 txtid = ATH_AN_2_TID(an, tid);
813 txtid->state |= AGGR_ADDBA_PROGRESS;
814 ath_tx_pause_tid(sc, txtid);
815 *ssn = txtid->seq_start;
Sujithe8324352009-01-16 21:38:42 +0530816}
817
Sujithf83da962009-07-23 15:32:37 +0530818void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530819{
820 struct ath_node *an = (struct ath_node *)sta->drv_priv;
821 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
822 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700823 struct ath_tx_status ts;
Sujithe8324352009-01-16 21:38:42 +0530824 struct ath_buf *bf;
825 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700826
827 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530828 INIT_LIST_HEAD(&bf_head);
829
830 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530831 return;
Sujithe8324352009-01-16 21:38:42 +0530832
833 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530834 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530835 return;
Sujithe8324352009-01-16 21:38:42 +0530836 }
837
838 ath_tx_pause_tid(sc, txtid);
839
840 /* drop all software retried frames and mark this TID */
841 spin_lock_bh(&txq->axq_lock);
842 while (!list_empty(&txtid->buf_q)) {
843 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
844 if (!bf_isretried(bf)) {
845 /*
846 * NB: it's based on the assumption that
847 * software retried frame will always stay
848 * at the head of software queue.
849 */
850 break;
851 }
Sujithd43f30152009-01-16 21:38:53 +0530852 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530853 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700854 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530855 }
Sujithd43f30152009-01-16 21:38:53 +0530856 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530857
858 if (txtid->baw_head != txtid->baw_tail) {
Sujithe8324352009-01-16 21:38:42 +0530859 txtid->state |= AGGR_CLEANUP;
860 } else {
861 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530862 ath_tx_flush_tid(sc, txtid);
863 }
Sujithe8324352009-01-16 21:38:42 +0530864}
865
866void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
867{
868 struct ath_atx_tid *txtid;
869 struct ath_node *an;
870
871 an = (struct ath_node *)sta->drv_priv;
872
873 if (sc->sc_flags & SC_OP_TXAGGR) {
874 txtid = ATH_AN_2_TID(an, tid);
875 txtid->baw_size =
876 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
877 txtid->state |= AGGR_ADDBA_COMPLETE;
878 txtid->state &= ~AGGR_ADDBA_PROGRESS;
879 ath_tx_resume_tid(sc, txtid);
880 }
881}
882
883bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
884{
885 struct ath_atx_tid *txtid;
886
887 if (!(sc->sc_flags & SC_OP_TXAGGR))
888 return false;
889
890 txtid = ATH_AN_2_TID(an, tidno);
891
Vasanthakumar Thiagarajanc3d8f022009-06-10 17:50:08 +0530892 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
Sujithe8324352009-01-16 21:38:42 +0530893 return true;
Sujithe8324352009-01-16 21:38:42 +0530894 return false;
895}
896
897/********************/
898/* Queue Management */
899/********************/
900
Sujithe8324352009-01-16 21:38:42 +0530901static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
902 struct ath_txq *txq)
903{
904 struct ath_atx_ac *ac, *ac_tmp;
905 struct ath_atx_tid *tid, *tid_tmp;
906
907 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
908 list_del(&ac->list);
909 ac->sched = false;
910 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
911 list_del(&tid->list);
912 tid->sched = false;
913 ath_tid_drain(sc, txq, tid);
914 }
915 }
916}
917
918struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
919{
Sujithcbe61d82009-02-09 13:27:12 +0530920 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700921 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530922 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400923 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530924
925 memset(&qi, 0, sizeof(qi));
926 qi.tqi_subtype = subtype;
927 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
928 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
929 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
930 qi.tqi_physCompBuf = 0;
931
932 /*
933 * Enable interrupts only for EOL and DESC conditions.
934 * We mark tx descriptors to receive a DESC interrupt
935 * when a tx queue gets deep; otherwise waiting for the
936 * EOL to reap descriptors. Note that this is done to
937 * reduce interrupt load and this only defers reaping
938 * descriptors, never transmitting frames. Aside from
939 * reducing interrupts this also permits more concurrency.
940 * The only potential downside is if the tx queue backs
941 * up in which case the top half of the kernel may backup
942 * due to a lack of tx descriptors.
943 *
944 * The UAPSD queue is an exception, since we take a desc-
945 * based intr on the EOSP frames.
946 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400947 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
948 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
949 TXQ_FLAG_TXERRINT_ENABLE;
950 } else {
951 if (qtype == ATH9K_TX_QUEUE_UAPSD)
952 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
953 else
954 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
955 TXQ_FLAG_TXDESCINT_ENABLE;
956 }
Sujithe8324352009-01-16 21:38:42 +0530957 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
958 if (qnum == -1) {
959 /*
960 * NB: don't print a message, this happens
961 * normally on parts with too few tx queues
962 */
963 return NULL;
964 }
965 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700966 ath_print(common, ATH_DBG_FATAL,
967 "qnum %u out of range, max %u!\n",
968 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530969 ath9k_hw_releasetxqueue(ah, qnum);
970 return NULL;
971 }
972 if (!ATH_TXQ_SETUP(sc, qnum)) {
973 struct ath_txq *txq = &sc->tx.txq[qnum];
974
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400975 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530976 txq->axq_qnum = qnum;
977 txq->axq_link = NULL;
978 INIT_LIST_HEAD(&txq->axq_q);
979 INIT_LIST_HEAD(&txq->axq_acq);
980 spin_lock_init(&txq->axq_lock);
981 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400982 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530983 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400984
985 txq->txq_headidx = txq->txq_tailidx = 0;
986 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
987 INIT_LIST_HEAD(&txq->txq_fifo[i]);
988 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530989 }
990 return &sc->tx.txq[qnum];
991}
992
Sujithe8324352009-01-16 21:38:42 +0530993int ath_txq_update(struct ath_softc *sc, int qnum,
994 struct ath9k_tx_queue_info *qinfo)
995{
Sujithcbe61d82009-02-09 13:27:12 +0530996 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530997 int error = 0;
998 struct ath9k_tx_queue_info qi;
999
1000 if (qnum == sc->beacon.beaconq) {
1001 /*
1002 * XXX: for beacon queue, we just save the parameter.
1003 * It will be picked up by ath_beaconq_config when
1004 * it's necessary.
1005 */
1006 sc->beacon.beacon_qi = *qinfo;
1007 return 0;
1008 }
1009
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001010 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301011
1012 ath9k_hw_get_txq_props(ah, qnum, &qi);
1013 qi.tqi_aifs = qinfo->tqi_aifs;
1014 qi.tqi_cwmin = qinfo->tqi_cwmin;
1015 qi.tqi_cwmax = qinfo->tqi_cwmax;
1016 qi.tqi_burstTime = qinfo->tqi_burstTime;
1017 qi.tqi_readyTime = qinfo->tqi_readyTime;
1018
1019 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001020 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1021 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301022 error = -EIO;
1023 } else {
1024 ath9k_hw_resettxqueue(ah, qnum);
1025 }
1026
1027 return error;
1028}
1029
1030int ath_cabq_update(struct ath_softc *sc)
1031{
1032 struct ath9k_tx_queue_info qi;
1033 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301034
1035 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1036 /*
1037 * Ensure the readytime % is within the bounds.
1038 */
Sujith17d79042009-02-09 13:27:03 +05301039 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1040 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1041 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1042 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301043
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001044 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301045 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301046 ath_txq_update(sc, qnum, &qi);
1047
1048 return 0;
1049}
1050
Sujith043a0402009-01-16 21:38:47 +05301051/*
1052 * Drain a given TX queue (could be Beacon or Data)
1053 *
1054 * This assumes output has been stopped and
1055 * we do not need to block ath_tx_tasklet.
1056 */
1057void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301058{
1059 struct ath_buf *bf, *lastbf;
1060 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001061 struct ath_tx_status ts;
1062
1063 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301064 INIT_LIST_HEAD(&bf_head);
1065
Sujithe8324352009-01-16 21:38:42 +05301066 for (;;) {
1067 spin_lock_bh(&txq->axq_lock);
1068
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001069 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1070 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1071 txq->txq_headidx = txq->txq_tailidx = 0;
1072 spin_unlock_bh(&txq->axq_lock);
1073 break;
1074 } else {
1075 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1076 struct ath_buf, list);
1077 }
1078 } else {
1079 if (list_empty(&txq->axq_q)) {
1080 txq->axq_link = NULL;
1081 spin_unlock_bh(&txq->axq_lock);
1082 break;
1083 }
1084 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1085 list);
Sujithe8324352009-01-16 21:38:42 +05301086
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001087 if (bf->bf_stale) {
1088 list_del(&bf->list);
1089 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301090
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001091 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001092 continue;
1093 }
Sujithe8324352009-01-16 21:38:42 +05301094 }
1095
1096 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001097 if (!retry_tx)
1098 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301099
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001100 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1101 list_cut_position(&bf_head,
1102 &txq->txq_fifo[txq->txq_tailidx],
1103 &lastbf->list);
1104 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1105 } else {
1106 /* remove ath_buf's of the same mpdu from txq */
1107 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1108 }
1109
Sujithe8324352009-01-16 21:38:42 +05301110 txq->axq_depth--;
1111
1112 spin_unlock_bh(&txq->axq_lock);
1113
1114 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001115 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301116 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001117 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301118 }
1119
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001120 spin_lock_bh(&txq->axq_lock);
1121 txq->axq_tx_inprogress = false;
1122 spin_unlock_bh(&txq->axq_lock);
1123
Sujithe8324352009-01-16 21:38:42 +05301124 /* flush any pending frames if aggregation is enabled */
1125 if (sc->sc_flags & SC_OP_TXAGGR) {
1126 if (!retry_tx) {
1127 spin_lock_bh(&txq->axq_lock);
1128 ath_txq_drain_pending_buffers(sc, txq);
1129 spin_unlock_bh(&txq->axq_lock);
1130 }
1131 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001132
1133 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1134 spin_lock_bh(&txq->axq_lock);
1135 while (!list_empty(&txq->txq_fifo_pending)) {
1136 bf = list_first_entry(&txq->txq_fifo_pending,
1137 struct ath_buf, list);
1138 list_cut_position(&bf_head,
1139 &txq->txq_fifo_pending,
1140 &bf->bf_lastbf->list);
1141 spin_unlock_bh(&txq->axq_lock);
1142
1143 if (bf_isampdu(bf))
1144 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1145 &ts, 0);
1146 else
1147 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1148 &ts, 0, 0);
1149 spin_lock_bh(&txq->axq_lock);
1150 }
1151 spin_unlock_bh(&txq->axq_lock);
1152 }
Sujithe8324352009-01-16 21:38:42 +05301153}
1154
Sujith043a0402009-01-16 21:38:47 +05301155void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1156{
Sujithcbe61d82009-02-09 13:27:12 +05301157 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001158 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301159 struct ath_txq *txq;
1160 int i, npend = 0;
1161
1162 if (sc->sc_flags & SC_OP_INVALID)
1163 return;
1164
1165 /* Stop beacon queue */
1166 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1167
1168 /* Stop data queues */
1169 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1170 if (ATH_TXQ_SETUP(sc, i)) {
1171 txq = &sc->tx.txq[i];
1172 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1173 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1174 }
1175 }
1176
1177 if (npend) {
1178 int r;
1179
Sujithe8009e92009-12-14 14:57:08 +05301180 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001181 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301182
1183 spin_lock_bh(&sc->sc_resetlock);
Sujithe8009e92009-12-14 14:57:08 +05301184 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
Sujith043a0402009-01-16 21:38:47 +05301185 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001186 ath_print(common, ATH_DBG_FATAL,
1187 "Unable to reset hardware; reset status %d\n",
1188 r);
Sujith043a0402009-01-16 21:38:47 +05301189 spin_unlock_bh(&sc->sc_resetlock);
1190 }
1191
1192 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1193 if (ATH_TXQ_SETUP(sc, i))
1194 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1195 }
1196}
1197
Sujithe8324352009-01-16 21:38:42 +05301198void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1199{
1200 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1201 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1202}
1203
Sujithe8324352009-01-16 21:38:42 +05301204void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1205{
1206 struct ath_atx_ac *ac;
1207 struct ath_atx_tid *tid;
1208
1209 if (list_empty(&txq->axq_acq))
1210 return;
1211
1212 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1213 list_del(&ac->list);
1214 ac->sched = false;
1215
1216 do {
1217 if (list_empty(&ac->tid_q))
1218 return;
1219
1220 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1221 list_del(&tid->list);
1222 tid->sched = false;
1223
1224 if (tid->paused)
1225 continue;
1226
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001227 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301228
1229 /*
1230 * add tid to round-robin queue if more frames
1231 * are pending for the tid
1232 */
1233 if (!list_empty(&tid->buf_q))
1234 ath_tx_queue_tid(txq, tid);
1235
1236 break;
1237 } while (!list_empty(&ac->tid_q));
1238
1239 if (!list_empty(&ac->tid_q)) {
1240 if (!ac->sched) {
1241 ac->sched = true;
1242 list_add_tail(&ac->list, &txq->axq_acq);
1243 }
1244 }
1245}
1246
1247int ath_tx_setup(struct ath_softc *sc, int haltype)
1248{
1249 struct ath_txq *txq;
1250
1251 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001252 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1253 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301254 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1255 return 0;
1256 }
1257 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1258 if (txq != NULL) {
1259 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1260 return 1;
1261 } else
1262 return 0;
1263}
1264
1265/***********/
1266/* TX, DMA */
1267/***********/
1268
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001269/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001270 * Insert a chain of ath_buf (descriptors) on a txq and
1271 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001272 */
Sujith102e0572008-10-29 10:15:16 +05301273static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1274 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001275{
Sujithcbe61d82009-02-09 13:27:12 +05301276 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001277 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001278 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301279
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001280 /*
1281 * Insert the frame on the outbound list and
1282 * pass it on to the hardware.
1283 */
1284
1285 if (list_empty(head))
1286 return;
1287
1288 bf = list_first_entry(head, struct ath_buf, list);
1289
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001290 ath_print(common, ATH_DBG_QUEUE,
1291 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001292
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001293 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1294 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1295 list_splice_tail_init(head, &txq->txq_fifo_pending);
1296 return;
1297 }
1298 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1299 ath_print(common, ATH_DBG_XMIT,
1300 "Initializing tx fifo %d which "
1301 "is non-empty\n",
1302 txq->txq_headidx);
1303 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1304 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1305 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001306 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001307 ath_print(common, ATH_DBG_XMIT,
1308 "TXDP[%u] = %llx (%p)\n",
1309 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001310 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001311 list_splice_tail_init(head, &txq->axq_q);
1312
1313 if (txq->axq_link == NULL) {
1314 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1315 ath_print(common, ATH_DBG_XMIT,
1316 "TXDP[%u] = %llx (%p)\n",
1317 txq->axq_qnum, ito64(bf->bf_daddr),
1318 bf->bf_desc);
1319 } else {
1320 *txq->axq_link = bf->bf_daddr;
1321 ath_print(common, ATH_DBG_XMIT,
1322 "link[%u] (%p)=%llx (%p)\n",
1323 txq->axq_qnum, txq->axq_link,
1324 ito64(bf->bf_daddr), bf->bf_desc);
1325 }
1326 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1327 &txq->axq_link);
1328 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001329 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001330 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001331}
1332
Sujithe8324352009-01-16 21:38:42 +05301333static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1334 struct list_head *bf_head,
1335 struct ath_tx_control *txctl)
1336{
1337 struct ath_buf *bf;
1338
Sujithe8324352009-01-16 21:38:42 +05301339 bf = list_first_entry(bf_head, struct ath_buf, list);
1340 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301341 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301342
1343 /*
1344 * Do not queue to h/w when any of the following conditions is true:
1345 * - there are pending frames in software queue
1346 * - the TID is currently paused for ADDBA/BAR request
1347 * - seqno is not within block-ack window
1348 * - h/w queue depth exceeds low water mark
1349 */
1350 if (!list_empty(&tid->buf_q) || tid->paused ||
1351 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1352 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001353 /*
Sujithe8324352009-01-16 21:38:42 +05301354 * Add this frame to software queue for scheduling later
1355 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001356 */
Sujithd43f30152009-01-16 21:38:53 +05301357 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301358 ath_tx_queue_tid(txctl->txq, tid);
1359 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001360 }
1361
Sujithe8324352009-01-16 21:38:42 +05301362 /* Add sub-frame to BAW */
1363 ath_tx_addto_baw(sc, tid, bf);
1364
1365 /* Queue to h/w without aggregation */
1366 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301367 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301368 ath_buf_set_rate(sc, bf);
1369 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301370}
1371
Sujithc37452b2009-03-09 09:31:57 +05301372static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1373 struct ath_atx_tid *tid,
1374 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001375{
Sujithe8324352009-01-16 21:38:42 +05301376 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001377
Sujithe8324352009-01-16 21:38:42 +05301378 bf = list_first_entry(bf_head, struct ath_buf, list);
1379 bf->bf_state.bf_type &= ~BUF_AMPDU;
1380
1381 /* update starting sequence number for subsequent ADDBA request */
1382 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1383
1384 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301385 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301386 ath_buf_set_rate(sc, bf);
1387 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301388 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001389}
1390
Sujithc37452b2009-03-09 09:31:57 +05301391static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1392 struct list_head *bf_head)
1393{
1394 struct ath_buf *bf;
1395
1396 bf = list_first_entry(bf_head, struct ath_buf, list);
1397
1398 bf->bf_lastbf = bf;
1399 bf->bf_nframes = 1;
1400 ath_buf_set_rate(sc, bf);
1401 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301402 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301403}
1404
Sujith528f0c62008-10-29 10:14:26 +05301405static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001406{
Sujith528f0c62008-10-29 10:14:26 +05301407 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001408 enum ath9k_pkt_type htype;
1409 __le16 fc;
1410
Sujith528f0c62008-10-29 10:14:26 +05301411 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001412 fc = hdr->frame_control;
1413
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001414 if (ieee80211_is_beacon(fc))
1415 htype = ATH9K_PKT_TYPE_BEACON;
1416 else if (ieee80211_is_probe_resp(fc))
1417 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1418 else if (ieee80211_is_atim(fc))
1419 htype = ATH9K_PKT_TYPE_ATIM;
1420 else if (ieee80211_is_pspoll(fc))
1421 htype = ATH9K_PKT_TYPE_PSPOLL;
1422 else
1423 htype = ATH9K_PKT_TYPE_NORMAL;
1424
1425 return htype;
1426}
1427
Sujith528f0c62008-10-29 10:14:26 +05301428static int get_hw_crypto_keytype(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001429{
Sujith528f0c62008-10-29 10:14:26 +05301430 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1431
1432 if (tx_info->control.hw_key) {
1433 if (tx_info->control.hw_key->alg == ALG_WEP)
1434 return ATH9K_KEY_TYPE_WEP;
1435 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1436 return ATH9K_KEY_TYPE_TKIP;
1437 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1438 return ATH9K_KEY_TYPE_AES;
1439 }
1440
1441 return ATH9K_KEY_TYPE_CLEAR;
1442}
1443
Sujith528f0c62008-10-29 10:14:26 +05301444static void assign_aggr_tid_seqno(struct sk_buff *skb,
1445 struct ath_buf *bf)
1446{
1447 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1448 struct ieee80211_hdr *hdr;
1449 struct ath_node *an;
1450 struct ath_atx_tid *tid;
1451 __le16 fc;
1452 u8 *qc;
1453
1454 if (!tx_info->control.sta)
1455 return;
1456
1457 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1458 hdr = (struct ieee80211_hdr *)skb->data;
1459 fc = hdr->frame_control;
1460
Sujith528f0c62008-10-29 10:14:26 +05301461 if (ieee80211_is_data_qos(fc)) {
1462 qc = ieee80211_get_qos_ctl(hdr);
1463 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301464 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001465
Sujithe8324352009-01-16 21:38:42 +05301466 /*
1467 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301468 * We also override seqno set by upper layer with the one
1469 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301470 */
1471 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301472 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301473 bf->bf_seqno = tid->seq_next;
1474 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301475}
1476
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001477static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301478{
1479 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1480 int flags = 0;
1481
1482 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1483 flags |= ATH9K_TXDESC_INTREQ;
1484
1485 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1486 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301487
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001488 if (use_ldpc)
1489 flags |= ATH9K_TXDESC_LDPC;
1490
Sujith528f0c62008-10-29 10:14:26 +05301491 return flags;
1492}
1493
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001494/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001495 * rix - rate index
1496 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1497 * width - 0 for 20 MHz, 1 for 40 MHz
1498 * half_gi - to use 4us v/s 3.6 us for symbol time
1499 */
Sujith102e0572008-10-29 10:15:16 +05301500static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1501 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001502{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001503 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001504 int streams, pktlen;
1505
Sujithcd3d39a2008-08-11 14:03:34 +05301506 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301507
1508 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001509 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001510 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001511 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001512 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1513
1514 if (!half_gi)
1515 duration = SYMBOL_TIME(nsymbols);
1516 else
1517 duration = SYMBOL_TIME_HALFGI(nsymbols);
1518
Sujithe63835b2008-11-18 09:07:53 +05301519 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001520 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301521
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001522 return duration;
1523}
1524
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001525static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1526{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001527 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001528 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301529 struct sk_buff *skb;
1530 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301531 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001532 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301533 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301534 int i, flags = 0;
1535 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301536 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301537
1538 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301539
Sujitha22be222009-03-30 15:28:36 +05301540 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301541 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301542 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301543 hdr = (struct ieee80211_hdr *)skb->data;
1544 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301545
Sujithc89424d2009-01-30 14:29:28 +05301546 /*
1547 * We check if Short Preamble is needed for the CTS rate by
1548 * checking the BSS's global flag.
1549 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1550 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001551 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1552 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301553 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001554 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001555
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001556 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001557 bool is_40, is_sgi, is_sp;
1558 int phy;
1559
Sujithe63835b2008-11-18 09:07:53 +05301560 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001561 continue;
1562
Sujitha8efee42008-11-18 09:07:30 +05301563 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301564 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001565 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001566
Felix Fietkau27032052010-01-17 21:08:50 +01001567 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1568 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301569 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001570 flags |= ATH9K_TXDESC_RTSENA;
1571 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1572 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1573 flags |= ATH9K_TXDESC_CTSENA;
1574 }
1575
Sujithc89424d2009-01-30 14:29:28 +05301576 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1577 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1578 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1579 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580
Felix Fietkau545750d2009-11-23 22:21:01 +01001581 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1582 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1583 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1584
1585 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1586 /* MCS rates */
1587 series[i].Rate = rix | 0x80;
1588 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1589 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001590 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1591 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001592 continue;
1593 }
1594
1595 /* legcay rates */
1596 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1597 !(rate->flags & IEEE80211_RATE_ERP_G))
1598 phy = WLAN_RC_PHY_CCK;
1599 else
1600 phy = WLAN_RC_PHY_OFDM;
1601
1602 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1603 series[i].Rate = rate->hw_value;
1604 if (rate->hw_value_short) {
1605 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1606 series[i].Rate |= rate->hw_value_short;
1607 } else {
1608 is_sp = false;
1609 }
1610
1611 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1612 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001613 }
1614
Felix Fietkau27032052010-01-17 21:08:50 +01001615 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1616 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1617 flags &= ~ATH9K_TXDESC_RTSENA;
1618
1619 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1620 if (flags & ATH9K_TXDESC_RTSENA)
1621 flags &= ~ATH9K_TXDESC_CTSENA;
1622
Sujithe63835b2008-11-18 09:07:53 +05301623 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301624 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1625 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301626 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301627 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301628
Sujith17d79042009-02-09 13:27:03 +05301629 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301630 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001631}
1632
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001633static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301634 struct sk_buff *skb,
1635 struct ath_tx_control *txctl)
1636{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001637 struct ath_wiphy *aphy = hw->priv;
1638 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301639 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301641 int hdrlen;
1642 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001643 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001644 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301645
Felix Fietkau827e69b2009-11-15 23:09:25 +01001646 tx_info->pad[0] = 0;
1647 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001648 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001649 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001650 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001651 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1652 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001653 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001654 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1655 break;
1656 }
Sujithe8324352009-01-16 21:38:42 +05301657 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1658 fc = hdr->frame_control;
1659
1660 ATH_TXBUF_RESET(bf);
1661
Felix Fietkau827e69b2009-11-15 23:09:25 +01001662 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001663 bf->bf_frmlen = skb->len + FCS_LEN;
1664 /* Remove the padding size from bf_frmlen, if any */
1665 padpos = ath9k_cmn_padpos(hdr->frame_control);
1666 padsize = padpos & 3;
1667 if (padsize && skb->len>padpos+padsize) {
1668 bf->bf_frmlen -= padsize;
1669 }
Sujithe8324352009-01-16 21:38:42 +05301670
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001671 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301672 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001673 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1674 use_ldpc = true;
1675 }
Sujithe8324352009-01-16 21:38:42 +05301676
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001677 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001678 if (txctl->paprd)
1679 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001680 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301681
1682 bf->bf_keytype = get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301683 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1684 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1685 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1686 } else {
1687 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1688 }
1689
Sujith17b182e2009-12-14 14:56:56 +05301690 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1691 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301692 assign_aggr_tid_seqno(skb, bf);
1693
1694 bf->bf_mpdu = skb;
1695
1696 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1697 skb->len, DMA_TO_DEVICE);
1698 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1699 bf->bf_mpdu = NULL;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001700 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1701 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301702 return -ENOMEM;
1703 }
1704
1705 bf->bf_buf_addr = bf->bf_dmacontext;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001706
1707 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1708 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1709 bf->bf_isnullfunc = true;
Sujith1b04b932010-01-08 10:36:05 +05301710 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001711 } else
1712 bf->bf_isnullfunc = false;
1713
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001714 bf->bf_tx_aborted = false;
1715
Sujithe8324352009-01-16 21:38:42 +05301716 return 0;
1717}
1718
1719/* FIXME: tx power */
1720static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1721 struct ath_tx_control *txctl)
1722{
Sujitha22be222009-03-30 15:28:36 +05301723 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301724 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301725 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301726 struct ath_node *an = NULL;
1727 struct list_head bf_head;
1728 struct ath_desc *ds;
1729 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301730 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301731 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301732 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301733
1734 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301735 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301736
1737 INIT_LIST_HEAD(&bf_head);
1738 list_add_tail(&bf->list, &bf_head);
1739
1740 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001741 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301742
1743 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1744 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1745
1746 ath9k_hw_filltxdesc(ah, ds,
1747 skb->len, /* segment length */
1748 true, /* first segment */
1749 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001750 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001751 bf->bf_buf_addr,
1752 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301753
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001754 if (bf->bf_state.bfs_paprd)
1755 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1756
Sujithe8324352009-01-16 21:38:42 +05301757 spin_lock_bh(&txctl->txq->axq_lock);
1758
1759 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1760 tx_info->control.sta) {
1761 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1762 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1763
Sujithc37452b2009-03-09 09:31:57 +05301764 if (!ieee80211_is_data_qos(fc)) {
1765 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1766 goto tx_done;
1767 }
1768
Felix Fietkau4fdec032010-03-12 04:02:43 +01001769 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301770 /*
1771 * Try aggregation if it's a unicast data frame
1772 * and the destination is HT capable.
1773 */
1774 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1775 } else {
1776 /*
1777 * Send this frame as regular when ADDBA
1778 * exchange is neither complete nor pending.
1779 */
Sujithc37452b2009-03-09 09:31:57 +05301780 ath_tx_send_ht_normal(sc, txctl->txq,
1781 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301782 }
1783 } else {
Sujithc37452b2009-03-09 09:31:57 +05301784 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301785 }
1786
Sujithc37452b2009-03-09 09:31:57 +05301787tx_done:
Sujithe8324352009-01-16 21:38:42 +05301788 spin_unlock_bh(&txctl->txq->axq_lock);
1789}
1790
1791/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001792int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301793 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001794{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001795 struct ath_wiphy *aphy = hw->priv;
1796 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001797 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001798 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001799 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001800 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001801
Sujithe8324352009-01-16 21:38:42 +05301802 bf = ath_tx_get_buffer(sc);
1803 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001804 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301805 return -1;
1806 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001807
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001808 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301809 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001810 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001811
Sujithe8324352009-01-16 21:38:42 +05301812 /* upon ath_tx_processq() this TX queue will be resumed, we
1813 * guarantee this will happen by knowing beforehand that
1814 * we will at least have to run TX completionon one buffer
1815 * on the queue */
1816 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001817 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001818 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301819 txq->stopped = 1;
1820 }
1821 spin_unlock_bh(&txq->axq_lock);
1822
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001823 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301824
1825 return r;
1826 }
1827
Felix Fietkau97923b12010-06-12 00:33:55 -04001828 q = skb_get_queue_mapping(skb);
1829 if (q >= 4)
1830 q = 0;
1831
1832 spin_lock_bh(&txq->axq_lock);
1833 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1834 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1835 txq->stopped = 1;
1836 }
1837 spin_unlock_bh(&txq->axq_lock);
1838
Sujithe8324352009-01-16 21:38:42 +05301839 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001840
1841 return 0;
1842}
1843
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001844void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001845{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001846 struct ath_wiphy *aphy = hw->priv;
1847 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001848 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001849 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1850 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301851 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1852 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001853
Sujithe8324352009-01-16 21:38:42 +05301854 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001855
Sujithe8324352009-01-16 21:38:42 +05301856 /*
1857 * As a temporary workaround, assign seq# here; this will likely need
1858 * to be cleaned up to work better with Beacon transmission and virtual
1859 * BSSes.
1860 */
1861 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301862 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1863 sc->tx.seq_no += 0x10;
1864 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1865 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001866 }
1867
Sujithe8324352009-01-16 21:38:42 +05301868 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001869 padpos = ath9k_cmn_padpos(hdr->frame_control);
1870 padsize = padpos & 3;
1871 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301872 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001873 ath_print(common, ATH_DBG_XMIT,
1874 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301875 dev_kfree_skb_any(skb);
1876 return;
1877 }
1878 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001879 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001880 }
1881
Sujithe8324352009-01-16 21:38:42 +05301882 txctl.txq = sc->beacon.cabq;
1883
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001884 ath_print(common, ATH_DBG_XMIT,
1885 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301886
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001887 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001888 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301889 goto exit;
1890 }
1891
1892 return;
1893exit:
1894 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001895}
1896
Sujithe8324352009-01-16 21:38:42 +05301897/*****************/
1898/* TX Completion */
1899/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001900
Sujithe8324352009-01-16 21:38:42 +05301901static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001902 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001903{
Sujithe8324352009-01-16 21:38:42 +05301904 struct ieee80211_hw *hw = sc->hw;
1905 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001906 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001907 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001908 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301909
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001910 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301911
Felix Fietkau827e69b2009-11-15 23:09:25 +01001912 if (aphy)
1913 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301914
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301915 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301916 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301917
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301918 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301919 /* Frame was ACKed */
1920 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1921 }
1922
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001923 padpos = ath9k_cmn_padpos(hdr->frame_control);
1924 padsize = padpos & 3;
1925 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301926 /*
1927 * Remove MAC header padding before giving the frame back to
1928 * mac80211.
1929 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001930 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301931 skb_pull(skb, padsize);
1932 }
1933
Sujith1b04b932010-01-08 10:36:05 +05301934 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1935 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001936 ath_print(common, ATH_DBG_PS,
1937 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001938 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301939 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1940 PS_WAIT_FOR_CAB |
1941 PS_WAIT_FOR_PSPOLL_DATA |
1942 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001943 }
1944
Felix Fietkau827e69b2009-11-15 23:09:25 +01001945 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001946 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001947 else {
1948 q = skb_get_queue_mapping(skb);
1949 if (q >= 4)
1950 q = 0;
1951
1952 if (--sc->tx.pending_frames[q] < 0)
1953 sc->tx.pending_frames[q] = 0;
1954
Felix Fietkau827e69b2009-11-15 23:09:25 +01001955 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001956 }
Sujithe8324352009-01-16 21:38:42 +05301957}
1958
1959static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001960 struct ath_txq *txq, struct list_head *bf_q,
1961 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301962{
1963 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301964 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301965 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301966
Sujithe8324352009-01-16 21:38:42 +05301967 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301968 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301969
1970 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301971 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301972
1973 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301974 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301975 }
1976
1977 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001978
1979 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001980 if (time_after(jiffies,
1981 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001982 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001983 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001984 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001985 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001986 } else {
1987 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1988 ath_debug_stat_tx(sc, txq, bf, ts);
1989 }
Sujithe8324352009-01-16 21:38:42 +05301990
1991 /*
1992 * Return the list of ath_buf of this mpdu to free queue
1993 */
1994 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1995 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1996 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1997}
1998
1999static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002000 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05302001{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002002 u16 seq_st = 0;
2003 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05302004 int ba_index;
2005 int nbad = 0;
2006 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002007
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07002008 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05302009 return 0;
Sujith528f0c62008-10-29 10:14:26 +05302010
Sujithcd3d39a2008-08-11 14:03:34 +05302011 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002012 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002013 seq_st = ts->ts_seqnum;
2014 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002015 }
2016
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002017 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05302018 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
2019 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
2020 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002021
Sujithe8324352009-01-16 21:38:42 +05302022 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002023 }
2024
Sujithe8324352009-01-16 21:38:42 +05302025 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002026}
2027
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002028static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302029 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302030{
Sujitha22be222009-03-30 15:28:36 +05302031 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302032 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302033 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01002034 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302035 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302036
Sujith95e4acb2009-03-13 08:56:09 +05302037 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002038 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302039
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002040 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302041 WARN_ON(tx_rateindex >= hw->max_rates);
2042
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002043 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302044 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Felix Fietkaud9698472010-03-01 13:32:11 +01002045 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2046 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302047
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002048 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302049 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05302050 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002051 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01002052 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2053 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002054 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2055 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002056 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2057 tx_info->status.ampdu_len = bf->bf_nframes;
2058 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
Sujithc4288392008-11-18 09:09:30 +05302059 }
2060 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302061
Felix Fietkau545750d2009-11-23 22:21:01 +01002062 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302063 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002064 tx_info->status.rates[i].idx = -1;
2065 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302066
Felix Fietkau78c46532010-06-25 01:26:16 +02002067 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302068}
2069
Sujith059d8062009-01-16 21:38:49 +05302070static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2071{
2072 int qnum;
2073
Felix Fietkau97923b12010-06-12 00:33:55 -04002074 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2075 if (qnum == -1)
2076 return;
2077
Sujith059d8062009-01-16 21:38:49 +05302078 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002079 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
2080 ath_mac80211_start_queue(sc, qnum);
2081 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302082 }
2083 spin_unlock_bh(&txq->axq_lock);
2084}
2085
Sujithc4288392008-11-18 09:09:30 +05302086static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002087{
Sujithcbe61d82009-02-09 13:27:12 +05302088 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002089 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002090 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2091 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302092 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002093 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302094 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002095 int status;
2096
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002097 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2098 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2099 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002100
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002101 for (;;) {
2102 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002103 if (list_empty(&txq->axq_q)) {
2104 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002105 spin_unlock_bh(&txq->axq_lock);
2106 break;
2107 }
2108 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2109
2110 /*
2111 * There is a race condition that a BH gets scheduled
2112 * after sw writes TxE and before hw re-load the last
2113 * descriptor to get the newly chained one.
2114 * Software must keep the last DONE descriptor as a
2115 * holding descriptor - software does so by marking
2116 * it with the STALE flag.
2117 */
2118 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302119 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120 bf_held = bf;
2121 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302122 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002123 break;
2124 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002125 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302126 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 }
2128 }
2129
2130 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302131 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002132
Felix Fietkau29bffa92010-03-29 20:14:23 -07002133 memset(&ts, 0, sizeof(ts));
2134 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002135 if (status == -EINPROGRESS) {
2136 spin_unlock_bh(&txq->axq_lock);
2137 break;
2138 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002139
2140 /*
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002141 * We now know the nullfunc frame has been ACKed so we
2142 * can disable RX.
2143 */
2144 if (bf->bf_isnullfunc &&
Felix Fietkau29bffa92010-03-29 20:14:23 -07002145 (ts.ts_status & ATH9K_TX_ACKED)) {
Senthil Balasubramanian3f7c5c12010-02-03 22:51:13 +05302146 if ((sc->ps_flags & PS_ENABLED))
2147 ath9k_enable_ps(sc);
2148 else
Sujith1b04b932010-01-08 10:36:05 +05302149 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002150 }
2151
2152 /*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002153 * Remove ath_buf's of the same transmit unit from txq,
2154 * however leave the last descriptor back as the holding
2155 * descriptor for hw.
2156 */
Sujitha119cc42009-03-30 15:28:38 +05302157 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002158 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159 if (!list_is_singular(&lastbf->list))
2160 list_cut_position(&bf_head,
2161 &txq->axq_q, lastbf->list.prev);
2162
2163 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002164 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002165 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002166 if (bf_held)
2167 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002168 spin_unlock_bh(&txq->axq_lock);
2169
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002170 if (bf_held)
2171 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002172
Sujithcd3d39a2008-08-11 14:03:34 +05302173 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174 /*
2175 * This frame is sent out as a single frame.
2176 * Use hardware retry status for this frame.
2177 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002178 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302179 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002180 ath_tx_rc_status(bf, &ts, 0, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181 }
Johannes Berge6a98542008-10-21 12:40:02 +02002182
Sujithcd3d39a2008-08-11 14:03:34 +05302183 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002184 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002185 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002186 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002187
Sujith059d8062009-01-16 21:38:49 +05302188 ath_wake_mac80211_queue(sc, txq);
2189
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002190 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302191 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002192 ath_txq_schedule(sc, txq);
2193 spin_unlock_bh(&txq->axq_lock);
2194 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002195}
2196
Sujith305fe472009-07-23 15:32:29 +05302197static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002198{
2199 struct ath_softc *sc = container_of(work, struct ath_softc,
2200 tx_complete_work.work);
2201 struct ath_txq *txq;
2202 int i;
2203 bool needreset = false;
2204
2205 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2206 if (ATH_TXQ_SETUP(sc, i)) {
2207 txq = &sc->tx.txq[i];
2208 spin_lock_bh(&txq->axq_lock);
2209 if (txq->axq_depth) {
2210 if (txq->axq_tx_inprogress) {
2211 needreset = true;
2212 spin_unlock_bh(&txq->axq_lock);
2213 break;
2214 } else {
2215 txq->axq_tx_inprogress = true;
2216 }
2217 }
2218 spin_unlock_bh(&txq->axq_lock);
2219 }
2220
2221 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002222 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2223 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302224 ath9k_ps_wakeup(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002225 ath_reset(sc, false);
Sujith332c5562009-10-09 09:51:28 +05302226 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002227 }
2228
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002229 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002230 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2231}
2232
2233
Sujithe8324352009-01-16 21:38:42 +05302234
2235void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002236{
Sujithe8324352009-01-16 21:38:42 +05302237 int i;
2238 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002239
Sujithe8324352009-01-16 21:38:42 +05302240 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002241
2242 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302243 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2244 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002245 }
2246}
2247
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002248void ath_tx_edma_tasklet(struct ath_softc *sc)
2249{
2250 struct ath_tx_status txs;
2251 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2252 struct ath_hw *ah = sc->sc_ah;
2253 struct ath_txq *txq;
2254 struct ath_buf *bf, *lastbf;
2255 struct list_head bf_head;
2256 int status;
2257 int txok;
2258
2259 for (;;) {
2260 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2261 if (status == -EINPROGRESS)
2262 break;
2263 if (status == -EIO) {
2264 ath_print(common, ATH_DBG_XMIT,
2265 "Error processing tx status\n");
2266 break;
2267 }
2268
2269 /* Skip beacon completions */
2270 if (txs.qid == sc->beacon.beaconq)
2271 continue;
2272
2273 txq = &sc->tx.txq[txs.qid];
2274
2275 spin_lock_bh(&txq->axq_lock);
2276 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2277 spin_unlock_bh(&txq->axq_lock);
2278 return;
2279 }
2280
2281 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2282 struct ath_buf, list);
2283 lastbf = bf->bf_lastbf;
2284
2285 INIT_LIST_HEAD(&bf_head);
2286 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2287 &lastbf->list);
2288 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2289 txq->axq_depth--;
2290 txq->axq_tx_inprogress = false;
2291 spin_unlock_bh(&txq->axq_lock);
2292
2293 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2294
Vasanthakumar Thiagarajande0f6482010-05-17 18:57:54 -07002295 /*
2296 * Make sure null func frame is acked before configuring
2297 * hw into ps mode.
2298 */
2299 if (bf->bf_isnullfunc && txok) {
2300 if ((sc->ps_flags & PS_ENABLED))
2301 ath9k_enable_ps(sc);
2302 else
2303 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2304 }
2305
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002306 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002307 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2308 bf->bf_state.bf_type |= BUF_XRETRY;
2309 ath_tx_rc_status(bf, &txs, 0, txok, true);
2310 }
2311
2312 if (bf_isampdu(bf))
2313 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2314 else
2315 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2316 &txs, txok, 0);
2317
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002318 ath_wake_mac80211_queue(sc, txq);
2319
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002320 spin_lock_bh(&txq->axq_lock);
2321 if (!list_empty(&txq->txq_fifo_pending)) {
2322 INIT_LIST_HEAD(&bf_head);
2323 bf = list_first_entry(&txq->txq_fifo_pending,
2324 struct ath_buf, list);
2325 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2326 &bf->bf_lastbf->list);
2327 ath_tx_txqaddbuf(sc, txq, &bf_head);
2328 } else if (sc->sc_flags & SC_OP_TXAGGR)
2329 ath_txq_schedule(sc, txq);
2330 spin_unlock_bh(&txq->axq_lock);
2331 }
2332}
2333
Sujithe8324352009-01-16 21:38:42 +05302334/*****************/
2335/* Init, Cleanup */
2336/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002337
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002338static int ath_txstatus_setup(struct ath_softc *sc, int size)
2339{
2340 struct ath_descdma *dd = &sc->txsdma;
2341 u8 txs_len = sc->sc_ah->caps.txs_len;
2342
2343 dd->dd_desc_len = size * txs_len;
2344 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2345 &dd->dd_desc_paddr, GFP_KERNEL);
2346 if (!dd->dd_desc)
2347 return -ENOMEM;
2348
2349 return 0;
2350}
2351
2352static int ath_tx_edma_init(struct ath_softc *sc)
2353{
2354 int err;
2355
2356 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2357 if (!err)
2358 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2359 sc->txsdma.dd_desc_paddr,
2360 ATH_TXSTATUS_RING_SIZE);
2361
2362 return err;
2363}
2364
2365static void ath_tx_edma_cleanup(struct ath_softc *sc)
2366{
2367 struct ath_descdma *dd = &sc->txsdma;
2368
2369 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2370 dd->dd_desc_paddr);
2371}
2372
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373int ath_tx_init(struct ath_softc *sc, int nbufs)
2374{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002375 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376 int error = 0;
2377
Sujith797fe5cb2009-03-30 15:28:45 +05302378 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379
Sujith797fe5cb2009-03-30 15:28:45 +05302380 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002381 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302382 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002383 ath_print(common, ATH_DBG_FATAL,
2384 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302385 goto err;
2386 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387
Sujith797fe5cb2009-03-30 15:28:45 +05302388 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002389 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302390 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002391 ath_print(common, ATH_DBG_FATAL,
2392 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302393 goto err;
2394 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002395
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002396 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2397
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002398 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2399 error = ath_tx_edma_init(sc);
2400 if (error)
2401 goto err;
2402 }
2403
Sujith797fe5cb2009-03-30 15:28:45 +05302404err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002405 if (error != 0)
2406 ath_tx_cleanup(sc);
2407
2408 return error;
2409}
2410
Sujith797fe5cb2009-03-30 15:28:45 +05302411void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002412{
Sujithb77f4832008-12-07 21:44:03 +05302413 if (sc->beacon.bdma.dd_desc_len != 0)
2414 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002415
Sujithb77f4832008-12-07 21:44:03 +05302416 if (sc->tx.txdma.dd_desc_len != 0)
2417 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002418
2419 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2420 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002421}
2422
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002423void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2424{
Sujithc5170162008-10-29 10:13:59 +05302425 struct ath_atx_tid *tid;
2426 struct ath_atx_ac *ac;
2427 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428
Sujith8ee5afb2008-12-07 21:43:36 +05302429 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302430 tidno < WME_NUM_TID;
2431 tidno++, tid++) {
2432 tid->an = an;
2433 tid->tidno = tidno;
2434 tid->seq_start = tid->seq_next = 0;
2435 tid->baw_size = WME_MAX_BA;
2436 tid->baw_head = tid->baw_tail = 0;
2437 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302438 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302439 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302440 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302441 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302442 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302443 tid->state &= ~AGGR_ADDBA_COMPLETE;
2444 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302445 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002446
Sujith8ee5afb2008-12-07 21:43:36 +05302447 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302448 acno < WME_NUM_AC; acno++, ac++) {
2449 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002450 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302451 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002452 }
2453}
2454
Sujithb5aa9bf2008-10-29 10:13:31 +05302455void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002456{
Felix Fietkau2b409942010-07-07 19:42:08 +02002457 struct ath_atx_ac *ac;
2458 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002459 struct ath_txq *txq;
Felix Fietkau2b409942010-07-07 19:42:08 +02002460 int i, tidno;
Sujithe8324352009-01-16 21:38:42 +05302461
Felix Fietkau2b409942010-07-07 19:42:08 +02002462 for (tidno = 0, tid = &an->tid[tidno];
2463 tidno < WME_NUM_TID; tidno++, tid++) {
2464 i = tid->ac->qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002465
Felix Fietkau2b409942010-07-07 19:42:08 +02002466 if (!ATH_TXQ_SETUP(sc, i))
2467 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002468
Felix Fietkau2b409942010-07-07 19:42:08 +02002469 txq = &sc->tx.txq[i];
2470 ac = tid->ac;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002471
Felix Fietkau2b409942010-07-07 19:42:08 +02002472 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002473
Felix Fietkau2b409942010-07-07 19:42:08 +02002474 if (tid->sched) {
2475 list_del(&tid->list);
2476 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002477 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002478
2479 if (ac->sched) {
2480 list_del(&ac->list);
2481 tid->ac->sched = false;
2482 }
2483
2484 ath_tid_drain(sc, txq, tid);
2485 tid->state &= ~AGGR_ADDBA_COMPLETE;
2486 tid->state &= ~AGGR_CLEANUP;
2487
2488 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002489 }
2490}