blob: 457f07692ac7d2fcd3d6230c86d7acea4dc1424c [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
141static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
142{
143 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
144 struct ath_buf *bf;
145 struct list_head bf_head;
146 INIT_LIST_HEAD(&bf_head);
147
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200148 WARN_ON(!tid->paused);
149
Sujithe8324352009-01-16 21:38:42 +0530150 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200151 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530152
153 while (!list_empty(&tid->buf_q)) {
154 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700155 BUG_ON(bf_isretried(bf));
Sujithd43f30152009-01-16 21:38:53 +0530156 list_move_tail(&bf->list, &bf_head);
Sujithc37452b2009-03-09 09:31:57 +0530157 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530158 }
159
160 spin_unlock_bh(&txq->axq_lock);
161}
162
163static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
164 int seqno)
165{
166 int index, cindex;
167
168 index = ATH_BA_INDEX(tid->seq_start, seqno);
169 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
170
171 tid->tx_buf[cindex] = NULL;
172
173 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
174 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
175 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
176 }
177}
178
179static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 struct ath_buf *bf)
181{
182 int index, cindex;
183
184 if (bf_isretried(bf))
185 return;
186
187 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
188 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
189
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700190 BUG_ON(tid->tx_buf[cindex] != NULL);
Sujithe8324352009-01-16 21:38:42 +0530191 tid->tx_buf[cindex] = bf;
192
193 if (index >= ((tid->baw_tail - tid->baw_head) &
194 (ATH_TID_MAX_BUFS - 1))) {
195 tid->baw_tail = cindex;
196 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
197 }
198}
199
200/*
201 * TODO: For frame(s) that are in the retry state, we will reuse the
202 * sequence number(s) without setting the retry bit. The
203 * alternative is to give up on these and BAR the receiver's window
204 * forward.
205 */
206static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
207 struct ath_atx_tid *tid)
208
209{
210 struct ath_buf *bf;
211 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700212 struct ath_tx_status ts;
213
214 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530215 INIT_LIST_HEAD(&bf_head);
216
217 for (;;) {
218 if (list_empty(&tid->buf_q))
219 break;
Sujithe8324352009-01-16 21:38:42 +0530220
Sujithd43f30152009-01-16 21:38:53 +0530221 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
222 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530223
224 if (bf_isretried(bf))
225 ath_tx_update_baw(sc, tid, bf->bf_seqno);
226
227 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700228 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530229 spin_lock(&txq->axq_lock);
230 }
231
232 tid->seq_next = tid->seq_start;
233 tid->baw_tail = tid->baw_head;
234}
235
Sujithfec247c2009-07-27 12:08:16 +0530236static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
237 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530238{
239 struct sk_buff *skb;
240 struct ieee80211_hdr *hdr;
241
242 bf->bf_state.bf_type |= BUF_RETRY;
243 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530244 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530245
246 skb = bf->bf_mpdu;
247 hdr = (struct ieee80211_hdr *)skb->data;
248 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
249}
250
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200251static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
252{
253 struct ath_buf *bf = NULL;
254
255 spin_lock_bh(&sc->tx.txbuflock);
256
257 if (unlikely(list_empty(&sc->tx.txbuf))) {
258 spin_unlock_bh(&sc->tx.txbuflock);
259 return NULL;
260 }
261
262 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
263 list_del(&bf->list);
264
265 spin_unlock_bh(&sc->tx.txbuflock);
266
267 return bf;
268}
269
270static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
271{
272 spin_lock_bh(&sc->tx.txbuflock);
273 list_add_tail(&bf->list, &sc->tx.txbuf);
274 spin_unlock_bh(&sc->tx.txbuflock);
275}
276
Sujithd43f30152009-01-16 21:38:53 +0530277static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
278{
279 struct ath_buf *tbf;
280
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200281 tbf = ath_tx_get_buffer(sc);
282 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530283 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530284
285 ATH_TXBUF_RESET(tbf);
286
Felix Fietkau827e69b2009-11-15 23:09:25 +0100287 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530288 tbf->bf_mpdu = bf->bf_mpdu;
289 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400290 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530291 tbf->bf_state = bf->bf_state;
292 tbf->bf_dmacontext = bf->bf_dmacontext;
293
294 return tbf;
295}
296
297static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
298 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700299 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530300{
301 struct ath_node *an = NULL;
302 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530303 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800304 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530305 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800306 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530307 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530308 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530309 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530310 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530311 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530312 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
313 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200314 struct ieee80211_tx_rate rates[4];
Sujithe8324352009-01-16 21:38:42 +0530315
Sujitha22be222009-03-30 15:28:36 +0530316 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530317 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530318
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800319 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100320 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800321
Felix Fietkau78c46532010-06-25 01:26:16 +0200322 memcpy(rates, tx_info->control.rates, sizeof(rates));
323
Sujith1286ec62009-01-27 13:30:37 +0530324 rcu_read_lock();
325
Johannes Berg5ed176e2009-11-04 14:42:28 +0100326 /* XXX: use ieee80211_find_sta! */
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800327 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
Sujith1286ec62009-01-27 13:30:37 +0530328 if (!sta) {
329 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200330
Felix Fietkau31e79a52010-07-12 23:16:34 +0200331 INIT_LIST_HEAD(&bf_head);
332 while (bf) {
333 bf_next = bf->bf_next;
334
335 bf->bf_state.bf_type |= BUF_XRETRY;
336 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
337 !bf->bf_stale || bf_next != NULL)
338 list_move_tail(&bf->list, &bf_head);
339
340 ath_tx_rc_status(bf, ts, 0, 0, false);
341 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
342 0, 0);
343
344 bf = bf_next;
345 }
Sujith1286ec62009-01-27 13:30:37 +0530346 return;
Sujithe8324352009-01-16 21:38:42 +0530347 }
348
Sujith1286ec62009-01-27 13:30:37 +0530349 an = (struct ath_node *)sta->drv_priv;
350 tid = ATH_AN_2_TID(an, bf->bf_tidno);
351
Felix Fietkaub11b1602010-07-11 12:48:44 +0200352 /*
353 * The hardware occasionally sends a tx status for the wrong TID.
354 * In this case, the BA status cannot be considered valid and all
355 * subframes need to be retransmitted
356 */
357 if (bf->bf_tidno != ts->tid)
358 txok = false;
359
Sujithe8324352009-01-16 21:38:42 +0530360 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530361 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530362
Sujithd43f30152009-01-16 21:38:53 +0530363 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700364 if (ts->ts_flags & ATH9K_TX_BA) {
365 seq_st = ts->ts_seqnum;
366 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530367 } else {
Sujithd43f30152009-01-16 21:38:53 +0530368 /*
369 * AR5416 can become deaf/mute when BA
370 * issue happens. Chip needs to be reset.
371 * But AP code may have sychronization issues
372 * when perform internal reset in this routine.
373 * Only enable reset in STA mode for now.
374 */
Sujith2660b812009-02-09 13:27:26 +0530375 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530376 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530377 }
378 }
379
380 INIT_LIST_HEAD(&bf_pending);
381 INIT_LIST_HEAD(&bf_head);
382
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700383 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530384 while (bf) {
385 txfail = txpending = 0;
386 bf_next = bf->bf_next;
387
Felix Fietkau78c46532010-06-25 01:26:16 +0200388 skb = bf->bf_mpdu;
389 tx_info = IEEE80211_SKB_CB(skb);
390
Sujithe8324352009-01-16 21:38:42 +0530391 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
392 /* transmit completion, subframe is
393 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530394 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530395 } else if (!isaggr && txok) {
396 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530397 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530398 } else {
Sujithe8324352009-01-16 21:38:42 +0530399 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400400 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530401 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530402 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530403 txpending = 1;
404 } else {
405 bf->bf_state.bf_type |= BUF_XRETRY;
406 txfail = 1;
407 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530408 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530409 }
410 } else {
411 /*
412 * cleanup in progress, just fail
413 * the un-acked sub-frames
414 */
415 txfail = 1;
416 }
417 }
418
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400419 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
420 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530421 /*
422 * Make sure the last desc is reclaimed if it
423 * not a holding desc.
424 */
425 if (!bf_last->bf_stale)
426 list_move_tail(&bf->list, &bf_head);
427 else
428 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530429 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700430 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530431 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530432 }
433
434 if (!txpending) {
435 /*
436 * complete the acked-ones/xretried ones; update
437 * block-ack window
438 */
439 spin_lock_bh(&txq->axq_lock);
440 ath_tx_update_baw(sc, tid, bf->bf_seqno);
441 spin_unlock_bh(&txq->axq_lock);
442
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530443 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200444 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700445 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530446 rc_update = false;
447 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700448 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530449 }
450
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700451 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
452 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530453 } else {
Sujithd43f30152009-01-16 21:38:53 +0530454 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400455 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
456 if (bf->bf_next == NULL && bf_last->bf_stale) {
457 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530458
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400459 tbf = ath_clone_txbuf(sc, bf_last);
460 /*
461 * Update tx baw and complete the
462 * frame with failed status if we
463 * run out of tx buf.
464 */
465 if (!tbf) {
466 spin_lock_bh(&txq->axq_lock);
467 ath_tx_update_baw(sc, tid,
468 bf->bf_seqno);
469 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400470
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400471 bf->bf_state.bf_type |=
472 BUF_XRETRY;
473 ath_tx_rc_status(bf, ts, nbad,
474 0, false);
475 ath_tx_complete_buf(sc, bf, txq,
476 &bf_head,
477 ts, 0, 0);
478 break;
479 }
480
481 ath9k_hw_cleartxdesc(sc->sc_ah,
482 tbf->bf_desc);
483 list_add_tail(&tbf->list, &bf_head);
484 } else {
485 /*
486 * Clear descriptor status words for
487 * software retry
488 */
489 ath9k_hw_cleartxdesc(sc->sc_ah,
490 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400491 }
Sujithe8324352009-01-16 21:38:42 +0530492 }
493
494 /*
495 * Put this buffer to the temporary pending
496 * queue to retain ordering
497 */
498 list_splice_tail_init(&bf_head, &bf_pending);
499 }
500
501 bf = bf_next;
502 }
503
Felix Fietkau4cee7862010-07-23 03:53:16 +0200504 /* prepend un-acked frames to the beginning of the pending frame queue */
505 if (!list_empty(&bf_pending)) {
506 spin_lock_bh(&txq->axq_lock);
507 list_splice(&bf_pending, &tid->buf_q);
508 ath_tx_queue_tid(txq, tid);
509 spin_unlock_bh(&txq->axq_lock);
510 }
511
Sujithe8324352009-01-16 21:38:42 +0530512 if (tid->state & AGGR_CLEANUP) {
Sujithe8324352009-01-16 21:38:42 +0530513 if (tid->baw_head == tid->baw_tail) {
514 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530515 tid->state &= ~AGGR_CLEANUP;
516
517 /* send buffered frames as singles */
518 ath_tx_flush_tid(sc, tid);
Sujithd43f30152009-01-16 21:38:53 +0530519 }
Sujith1286ec62009-01-27 13:30:37 +0530520 rcu_read_unlock();
Sujithe8324352009-01-16 21:38:42 +0530521 return;
522 }
523
Sujith1286ec62009-01-27 13:30:37 +0530524 rcu_read_unlock();
525
Sujithe8324352009-01-16 21:38:42 +0530526 if (needreset)
527 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530528}
529
530static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
531 struct ath_atx_tid *tid)
532{
Sujithe8324352009-01-16 21:38:42 +0530533 struct sk_buff *skb;
534 struct ieee80211_tx_info *tx_info;
535 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530536 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530537 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530538 int i;
539
Sujitha22be222009-03-30 15:28:36 +0530540 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530541 tx_info = IEEE80211_SKB_CB(skb);
542 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530543
544 /*
545 * Find the lowest frame length among the rate series that will have a
546 * 4ms transmit duration.
547 * TODO - TXOP limit needs to be considered.
548 */
549 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
550
551 for (i = 0; i < 4; i++) {
552 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100553 int modeidx;
554 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530555 legacy = 1;
556 break;
557 }
558
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200559 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100560 modeidx = MCS_HT40;
561 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200562 modeidx = MCS_HT20;
563
564 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
565 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100566
567 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530568 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530569 }
570 }
571
572 /*
573 * limit aggregate size by the minimum rate if rate selected is
574 * not a probe rate, if rate selected is a probe rate then
575 * avoid aggregation of this packet.
576 */
577 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
578 return 0;
579
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530580 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
581 aggr_limit = min((max_4ms_framelen * 3) / 8,
582 (u32)ATH_AMPDU_LIMIT_MAX);
583 else
584 aggr_limit = min(max_4ms_framelen,
585 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * h/w can accept aggregates upto 16 bit lengths (65535).
589 * The IE, however can hold upto 65536, which shows up here
590 * as zero. Ignore 65536 since we are constrained by hw.
591 */
Sujith4ef70842009-07-23 15:32:41 +0530592 if (tid->an->maxampdu)
593 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530594
595 return aggr_limit;
596}
597
598/*
Sujithd43f30152009-01-16 21:38:53 +0530599 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530600 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530601 */
602static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
603 struct ath_buf *bf, u16 frmlen)
604{
Sujithe8324352009-01-16 21:38:42 +0530605 struct sk_buff *skb = bf->bf_mpdu;
606 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530607 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530608 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200610 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530611
612 /* Select standard number of delimiters based on frame length alone */
613 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
614
615 /*
616 * If encryption enabled, hardware requires some more padding between
617 * subframes.
618 * TODO - this could be improved to be dependent on the rate.
619 * The hardware can keep up at lower rates, but not higher rates
620 */
621 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
622 ndelim += ATH_AGGR_ENCRYPTDELIM;
623
624 /*
625 * Convert desired mpdu density from microeconds to bytes based
626 * on highest rate in rate series (i.e. first rate) to determine
627 * required minimum length for subframe. Take into account
628 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530629 *
Sujithe8324352009-01-16 21:38:42 +0530630 * If there is no mpdu density restriction, no further calculation
631 * is needed.
632 */
Sujith4ef70842009-07-23 15:32:41 +0530633
634 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530635 return ndelim;
636
637 rix = tx_info->control.rates[0].idx;
638 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530639 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
640 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
641
642 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530643 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530644 else
Sujith4ef70842009-07-23 15:32:41 +0530645 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530646
647 if (nsymbols == 0)
648 nsymbols = 1;
649
Felix Fietkauc6663872010-04-19 19:57:33 +0200650 streams = HT_RC_2_STREAMS(rix);
651 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530652 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
653
Sujithe8324352009-01-16 21:38:42 +0530654 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530655 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
656 ndelim = max(mindelim, ndelim);
657 }
658
659 return ndelim;
660}
661
662static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530663 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530664 struct ath_atx_tid *tid,
665 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530666{
667#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530668 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
669 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530670 u16 aggr_limit = 0, al = 0, bpad = 0,
671 al_delta, h_baw = tid->baw_size / 2;
672 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujithe8324352009-01-16 21:38:42 +0530673
674 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
675
676 do {
677 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
678
Sujithd43f30152009-01-16 21:38:53 +0530679 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530680 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
681 status = ATH_AGGR_BAW_CLOSED;
682 break;
683 }
684
685 if (!rl) {
686 aggr_limit = ath_lookup_rate(sc, bf, tid);
687 rl = 1;
688 }
689
Sujithd43f30152009-01-16 21:38:53 +0530690 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530691 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
692
Sujithd43f30152009-01-16 21:38:53 +0530693 if (nframes &&
694 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530695 status = ATH_AGGR_LIMITED;
696 break;
697 }
698
Sujithd43f30152009-01-16 21:38:53 +0530699 /* do not exceed subframe limit */
700 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530701 status = ATH_AGGR_LIMITED;
702 break;
703 }
Sujithd43f30152009-01-16 21:38:53 +0530704 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530705
Sujithd43f30152009-01-16 21:38:53 +0530706 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530707 al += bpad + al_delta;
708
709 /*
710 * Get the delimiters needed to meet the MPDU
711 * density for this node.
712 */
713 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530714 bpad = PADBYTES(al_delta) + (ndelim << 2);
715
716 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400717 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530718
Sujithd43f30152009-01-16 21:38:53 +0530719 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530720 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530721 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
722 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530723 if (bf_prev) {
724 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400725 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
726 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530727 }
728 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530729
Sujithe8324352009-01-16 21:38:42 +0530730 } while (!list_empty(&tid->buf_q));
731
732 bf_first->bf_al = al;
733 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530734
Sujithe8324352009-01-16 21:38:42 +0530735 return status;
736#undef PADBYTES
737}
738
739static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
740 struct ath_atx_tid *tid)
741{
Sujithd43f30152009-01-16 21:38:53 +0530742 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530743 enum ATH_AGGR_STATUS status;
744 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530745
746 do {
747 if (list_empty(&tid->buf_q))
748 return;
749
750 INIT_LIST_HEAD(&bf_q);
751
Sujithfec247c2009-07-27 12:08:16 +0530752 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530753
754 /*
Sujithd43f30152009-01-16 21:38:53 +0530755 * no frames picked up to be aggregated;
756 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530757 */
758 if (list_empty(&bf_q))
759 break;
760
761 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530762 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530763
Sujithd43f30152009-01-16 21:38:53 +0530764 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530765 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530766 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530767 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530768 ath_buf_set_rate(sc, bf);
769 ath_tx_txqaddbuf(sc, txq, &bf_q);
770 continue;
771 }
772
Sujithd43f30152009-01-16 21:38:53 +0530773 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530774 bf->bf_state.bf_type |= BUF_AGGR;
775 ath_buf_set_rate(sc, bf);
776 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
777
Sujithd43f30152009-01-16 21:38:53 +0530778 /* anchor last desc of aggregate */
779 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530780
Sujithe8324352009-01-16 21:38:42 +0530781 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530782 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530783
784 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
785 status != ATH_AGGR_BAW_CLOSED);
786}
787
Sujithf83da962009-07-23 15:32:37 +0530788void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
789 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530790{
791 struct ath_atx_tid *txtid;
792 struct ath_node *an;
793
794 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530795 txtid = ATH_AN_2_TID(an, tid);
796 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200797 txtid->paused = true;
Sujithf83da962009-07-23 15:32:37 +0530798 *ssn = txtid->seq_start;
Sujithe8324352009-01-16 21:38:42 +0530799}
800
Sujithf83da962009-07-23 15:32:37 +0530801void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530802{
803 struct ath_node *an = (struct ath_node *)sta->drv_priv;
804 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
805 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700806 struct ath_tx_status ts;
Sujithe8324352009-01-16 21:38:42 +0530807 struct ath_buf *bf;
808 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700809
810 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530811 INIT_LIST_HEAD(&bf_head);
812
813 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530814 return;
Sujithe8324352009-01-16 21:38:42 +0530815
816 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530817 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530818 return;
Sujithe8324352009-01-16 21:38:42 +0530819 }
820
Sujithe8324352009-01-16 21:38:42 +0530821 /* drop all software retried frames and mark this TID */
822 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200823 txtid->paused = true;
Sujithe8324352009-01-16 21:38:42 +0530824 while (!list_empty(&txtid->buf_q)) {
825 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
826 if (!bf_isretried(bf)) {
827 /*
828 * NB: it's based on the assumption that
829 * software retried frame will always stay
830 * at the head of software queue.
831 */
832 break;
833 }
Sujithd43f30152009-01-16 21:38:53 +0530834 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530835 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700836 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530837 }
Sujithd43f30152009-01-16 21:38:53 +0530838 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530839
840 if (txtid->baw_head != txtid->baw_tail) {
Sujithe8324352009-01-16 21:38:42 +0530841 txtid->state |= AGGR_CLEANUP;
842 } else {
843 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530844 ath_tx_flush_tid(sc, txtid);
845 }
Sujithe8324352009-01-16 21:38:42 +0530846}
847
848void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
849{
850 struct ath_atx_tid *txtid;
851 struct ath_node *an;
852
853 an = (struct ath_node *)sta->drv_priv;
854
855 if (sc->sc_flags & SC_OP_TXAGGR) {
856 txtid = ATH_AN_2_TID(an, tid);
857 txtid->baw_size =
858 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
859 txtid->state |= AGGR_ADDBA_COMPLETE;
860 txtid->state &= ~AGGR_ADDBA_PROGRESS;
861 ath_tx_resume_tid(sc, txtid);
862 }
863}
864
865bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
866{
867 struct ath_atx_tid *txtid;
868
869 if (!(sc->sc_flags & SC_OP_TXAGGR))
870 return false;
871
872 txtid = ATH_AN_2_TID(an, tidno);
873
Vasanthakumar Thiagarajanc3d8f022009-06-10 17:50:08 +0530874 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
Sujithe8324352009-01-16 21:38:42 +0530875 return true;
Sujithe8324352009-01-16 21:38:42 +0530876 return false;
877}
878
879/********************/
880/* Queue Management */
881/********************/
882
Sujithe8324352009-01-16 21:38:42 +0530883static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
884 struct ath_txq *txq)
885{
886 struct ath_atx_ac *ac, *ac_tmp;
887 struct ath_atx_tid *tid, *tid_tmp;
888
889 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
890 list_del(&ac->list);
891 ac->sched = false;
892 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
893 list_del(&tid->list);
894 tid->sched = false;
895 ath_tid_drain(sc, txq, tid);
896 }
897 }
898}
899
900struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
901{
Sujithcbe61d82009-02-09 13:27:12 +0530902 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700903 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530904 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400905 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530906
907 memset(&qi, 0, sizeof(qi));
908 qi.tqi_subtype = subtype;
909 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
910 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
911 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
912 qi.tqi_physCompBuf = 0;
913
914 /*
915 * Enable interrupts only for EOL and DESC conditions.
916 * We mark tx descriptors to receive a DESC interrupt
917 * when a tx queue gets deep; otherwise waiting for the
918 * EOL to reap descriptors. Note that this is done to
919 * reduce interrupt load and this only defers reaping
920 * descriptors, never transmitting frames. Aside from
921 * reducing interrupts this also permits more concurrency.
922 * The only potential downside is if the tx queue backs
923 * up in which case the top half of the kernel may backup
924 * due to a lack of tx descriptors.
925 *
926 * The UAPSD queue is an exception, since we take a desc-
927 * based intr on the EOSP frames.
928 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400929 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
930 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
931 TXQ_FLAG_TXERRINT_ENABLE;
932 } else {
933 if (qtype == ATH9K_TX_QUEUE_UAPSD)
934 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
935 else
936 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
937 TXQ_FLAG_TXDESCINT_ENABLE;
938 }
Sujithe8324352009-01-16 21:38:42 +0530939 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
940 if (qnum == -1) {
941 /*
942 * NB: don't print a message, this happens
943 * normally on parts with too few tx queues
944 */
945 return NULL;
946 }
947 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700948 ath_print(common, ATH_DBG_FATAL,
949 "qnum %u out of range, max %u!\n",
950 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530951 ath9k_hw_releasetxqueue(ah, qnum);
952 return NULL;
953 }
954 if (!ATH_TXQ_SETUP(sc, qnum)) {
955 struct ath_txq *txq = &sc->tx.txq[qnum];
956
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400957 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530958 txq->axq_qnum = qnum;
959 txq->axq_link = NULL;
960 INIT_LIST_HEAD(&txq->axq_q);
961 INIT_LIST_HEAD(&txq->axq_acq);
962 spin_lock_init(&txq->axq_lock);
963 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400964 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530965 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400966
967 txq->txq_headidx = txq->txq_tailidx = 0;
968 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
969 INIT_LIST_HEAD(&txq->txq_fifo[i]);
970 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530971 }
972 return &sc->tx.txq[qnum];
973}
974
Sujithe8324352009-01-16 21:38:42 +0530975int ath_txq_update(struct ath_softc *sc, int qnum,
976 struct ath9k_tx_queue_info *qinfo)
977{
Sujithcbe61d82009-02-09 13:27:12 +0530978 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530979 int error = 0;
980 struct ath9k_tx_queue_info qi;
981
982 if (qnum == sc->beacon.beaconq) {
983 /*
984 * XXX: for beacon queue, we just save the parameter.
985 * It will be picked up by ath_beaconq_config when
986 * it's necessary.
987 */
988 sc->beacon.beacon_qi = *qinfo;
989 return 0;
990 }
991
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700992 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530993
994 ath9k_hw_get_txq_props(ah, qnum, &qi);
995 qi.tqi_aifs = qinfo->tqi_aifs;
996 qi.tqi_cwmin = qinfo->tqi_cwmin;
997 qi.tqi_cwmax = qinfo->tqi_cwmax;
998 qi.tqi_burstTime = qinfo->tqi_burstTime;
999 qi.tqi_readyTime = qinfo->tqi_readyTime;
1000
1001 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001002 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1003 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301004 error = -EIO;
1005 } else {
1006 ath9k_hw_resettxqueue(ah, qnum);
1007 }
1008
1009 return error;
1010}
1011
1012int ath_cabq_update(struct ath_softc *sc)
1013{
1014 struct ath9k_tx_queue_info qi;
1015 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301016
1017 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1018 /*
1019 * Ensure the readytime % is within the bounds.
1020 */
Sujith17d79042009-02-09 13:27:03 +05301021 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1022 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1023 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1024 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301025
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001026 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301027 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301028 ath_txq_update(sc, qnum, &qi);
1029
1030 return 0;
1031}
1032
Sujith043a0402009-01-16 21:38:47 +05301033/*
1034 * Drain a given TX queue (could be Beacon or Data)
1035 *
1036 * This assumes output has been stopped and
1037 * we do not need to block ath_tx_tasklet.
1038 */
1039void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301040{
1041 struct ath_buf *bf, *lastbf;
1042 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001043 struct ath_tx_status ts;
1044
1045 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301046 INIT_LIST_HEAD(&bf_head);
1047
Sujithe8324352009-01-16 21:38:42 +05301048 for (;;) {
1049 spin_lock_bh(&txq->axq_lock);
1050
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001051 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1052 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1053 txq->txq_headidx = txq->txq_tailidx = 0;
1054 spin_unlock_bh(&txq->axq_lock);
1055 break;
1056 } else {
1057 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1058 struct ath_buf, list);
1059 }
1060 } else {
1061 if (list_empty(&txq->axq_q)) {
1062 txq->axq_link = NULL;
1063 spin_unlock_bh(&txq->axq_lock);
1064 break;
1065 }
1066 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1067 list);
Sujithe8324352009-01-16 21:38:42 +05301068
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001069 if (bf->bf_stale) {
1070 list_del(&bf->list);
1071 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301072
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001073 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001074 continue;
1075 }
Sujithe8324352009-01-16 21:38:42 +05301076 }
1077
1078 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001079 if (!retry_tx)
1080 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301081
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001082 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1083 list_cut_position(&bf_head,
1084 &txq->txq_fifo[txq->txq_tailidx],
1085 &lastbf->list);
1086 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1087 } else {
1088 /* remove ath_buf's of the same mpdu from txq */
1089 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1090 }
1091
Sujithe8324352009-01-16 21:38:42 +05301092 txq->axq_depth--;
1093
1094 spin_unlock_bh(&txq->axq_lock);
1095
1096 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001097 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301098 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001099 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301100 }
1101
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001102 spin_lock_bh(&txq->axq_lock);
1103 txq->axq_tx_inprogress = false;
1104 spin_unlock_bh(&txq->axq_lock);
1105
Sujithe8324352009-01-16 21:38:42 +05301106 /* flush any pending frames if aggregation is enabled */
1107 if (sc->sc_flags & SC_OP_TXAGGR) {
1108 if (!retry_tx) {
1109 spin_lock_bh(&txq->axq_lock);
1110 ath_txq_drain_pending_buffers(sc, txq);
1111 spin_unlock_bh(&txq->axq_lock);
1112 }
1113 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001114
1115 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1116 spin_lock_bh(&txq->axq_lock);
1117 while (!list_empty(&txq->txq_fifo_pending)) {
1118 bf = list_first_entry(&txq->txq_fifo_pending,
1119 struct ath_buf, list);
1120 list_cut_position(&bf_head,
1121 &txq->txq_fifo_pending,
1122 &bf->bf_lastbf->list);
1123 spin_unlock_bh(&txq->axq_lock);
1124
1125 if (bf_isampdu(bf))
1126 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1127 &ts, 0);
1128 else
1129 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1130 &ts, 0, 0);
1131 spin_lock_bh(&txq->axq_lock);
1132 }
1133 spin_unlock_bh(&txq->axq_lock);
1134 }
Sujithe8324352009-01-16 21:38:42 +05301135}
1136
Sujith043a0402009-01-16 21:38:47 +05301137void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1138{
Sujithcbe61d82009-02-09 13:27:12 +05301139 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001140 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301141 struct ath_txq *txq;
1142 int i, npend = 0;
1143
1144 if (sc->sc_flags & SC_OP_INVALID)
1145 return;
1146
1147 /* Stop beacon queue */
1148 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1149
1150 /* Stop data queues */
1151 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1152 if (ATH_TXQ_SETUP(sc, i)) {
1153 txq = &sc->tx.txq[i];
1154 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1155 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1156 }
1157 }
1158
1159 if (npend) {
1160 int r;
1161
Sujithe8009e92009-12-14 14:57:08 +05301162 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001163 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301164
1165 spin_lock_bh(&sc->sc_resetlock);
Felix Fietkau20bd2a02010-07-31 00:12:00 +02001166 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
Sujith043a0402009-01-16 21:38:47 +05301167 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001168 ath_print(common, ATH_DBG_FATAL,
1169 "Unable to reset hardware; reset status %d\n",
1170 r);
Sujith043a0402009-01-16 21:38:47 +05301171 spin_unlock_bh(&sc->sc_resetlock);
1172 }
1173
1174 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1175 if (ATH_TXQ_SETUP(sc, i))
1176 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1177 }
1178}
1179
Sujithe8324352009-01-16 21:38:42 +05301180void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1181{
1182 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1183 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1184}
1185
Sujithe8324352009-01-16 21:38:42 +05301186void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1187{
1188 struct ath_atx_ac *ac;
1189 struct ath_atx_tid *tid;
1190
1191 if (list_empty(&txq->axq_acq))
1192 return;
1193
1194 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1195 list_del(&ac->list);
1196 ac->sched = false;
1197
1198 do {
1199 if (list_empty(&ac->tid_q))
1200 return;
1201
1202 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1203 list_del(&tid->list);
1204 tid->sched = false;
1205
1206 if (tid->paused)
1207 continue;
1208
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001209 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301210
1211 /*
1212 * add tid to round-robin queue if more frames
1213 * are pending for the tid
1214 */
1215 if (!list_empty(&tid->buf_q))
1216 ath_tx_queue_tid(txq, tid);
1217
1218 break;
1219 } while (!list_empty(&ac->tid_q));
1220
1221 if (!list_empty(&ac->tid_q)) {
1222 if (!ac->sched) {
1223 ac->sched = true;
1224 list_add_tail(&ac->list, &txq->axq_acq);
1225 }
1226 }
1227}
1228
1229int ath_tx_setup(struct ath_softc *sc, int haltype)
1230{
1231 struct ath_txq *txq;
1232
1233 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001234 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1235 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301236 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1237 return 0;
1238 }
1239 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1240 if (txq != NULL) {
1241 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1242 return 1;
1243 } else
1244 return 0;
1245}
1246
1247/***********/
1248/* TX, DMA */
1249/***********/
1250
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001251/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001252 * Insert a chain of ath_buf (descriptors) on a txq and
1253 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001254 */
Sujith102e0572008-10-29 10:15:16 +05301255static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1256 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001257{
Sujithcbe61d82009-02-09 13:27:12 +05301258 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001259 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001260 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301261
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001262 /*
1263 * Insert the frame on the outbound list and
1264 * pass it on to the hardware.
1265 */
1266
1267 if (list_empty(head))
1268 return;
1269
1270 bf = list_first_entry(head, struct ath_buf, list);
1271
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001272 ath_print(common, ATH_DBG_QUEUE,
1273 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001274
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001275 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1276 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1277 list_splice_tail_init(head, &txq->txq_fifo_pending);
1278 return;
1279 }
1280 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1281 ath_print(common, ATH_DBG_XMIT,
1282 "Initializing tx fifo %d which "
1283 "is non-empty\n",
1284 txq->txq_headidx);
1285 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1286 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1287 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001288 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001289 ath_print(common, ATH_DBG_XMIT,
1290 "TXDP[%u] = %llx (%p)\n",
1291 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001292 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001293 list_splice_tail_init(head, &txq->axq_q);
1294
1295 if (txq->axq_link == NULL) {
1296 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1297 ath_print(common, ATH_DBG_XMIT,
1298 "TXDP[%u] = %llx (%p)\n",
1299 txq->axq_qnum, ito64(bf->bf_daddr),
1300 bf->bf_desc);
1301 } else {
1302 *txq->axq_link = bf->bf_daddr;
1303 ath_print(common, ATH_DBG_XMIT,
1304 "link[%u] (%p)=%llx (%p)\n",
1305 txq->axq_qnum, txq->axq_link,
1306 ito64(bf->bf_daddr), bf->bf_desc);
1307 }
1308 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1309 &txq->axq_link);
1310 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001311 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001312 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001313}
1314
Sujithe8324352009-01-16 21:38:42 +05301315static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1316 struct list_head *bf_head,
1317 struct ath_tx_control *txctl)
1318{
1319 struct ath_buf *bf;
1320
Sujithe8324352009-01-16 21:38:42 +05301321 bf = list_first_entry(bf_head, struct ath_buf, list);
1322 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301323 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301324
1325 /*
1326 * Do not queue to h/w when any of the following conditions is true:
1327 * - there are pending frames in software queue
1328 * - the TID is currently paused for ADDBA/BAR request
1329 * - seqno is not within block-ack window
1330 * - h/w queue depth exceeds low water mark
1331 */
1332 if (!list_empty(&tid->buf_q) || tid->paused ||
1333 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1334 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001335 /*
Sujithe8324352009-01-16 21:38:42 +05301336 * Add this frame to software queue for scheduling later
1337 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001338 */
Sujithd43f30152009-01-16 21:38:53 +05301339 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301340 ath_tx_queue_tid(txctl->txq, tid);
1341 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001342 }
1343
Sujithe8324352009-01-16 21:38:42 +05301344 /* Add sub-frame to BAW */
1345 ath_tx_addto_baw(sc, tid, bf);
1346
1347 /* Queue to h/w without aggregation */
1348 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301349 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301350 ath_buf_set_rate(sc, bf);
1351 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301352}
1353
Sujithc37452b2009-03-09 09:31:57 +05301354static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1355 struct ath_atx_tid *tid,
1356 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001357{
Sujithe8324352009-01-16 21:38:42 +05301358 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001359
Sujithe8324352009-01-16 21:38:42 +05301360 bf = list_first_entry(bf_head, struct ath_buf, list);
1361 bf->bf_state.bf_type &= ~BUF_AMPDU;
1362
1363 /* update starting sequence number for subsequent ADDBA request */
1364 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1365
1366 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301367 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301368 ath_buf_set_rate(sc, bf);
1369 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301370 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001371}
1372
Sujithc37452b2009-03-09 09:31:57 +05301373static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1374 struct list_head *bf_head)
1375{
1376 struct ath_buf *bf;
1377
1378 bf = list_first_entry(bf_head, struct ath_buf, list);
1379
1380 bf->bf_lastbf = bf;
1381 bf->bf_nframes = 1;
1382 ath_buf_set_rate(sc, bf);
1383 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301384 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301385}
1386
Sujith528f0c62008-10-29 10:14:26 +05301387static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001388{
Sujith528f0c62008-10-29 10:14:26 +05301389 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001390 enum ath9k_pkt_type htype;
1391 __le16 fc;
1392
Sujith528f0c62008-10-29 10:14:26 +05301393 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001394 fc = hdr->frame_control;
1395
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001396 if (ieee80211_is_beacon(fc))
1397 htype = ATH9K_PKT_TYPE_BEACON;
1398 else if (ieee80211_is_probe_resp(fc))
1399 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1400 else if (ieee80211_is_atim(fc))
1401 htype = ATH9K_PKT_TYPE_ATIM;
1402 else if (ieee80211_is_pspoll(fc))
1403 htype = ATH9K_PKT_TYPE_PSPOLL;
1404 else
1405 htype = ATH9K_PKT_TYPE_NORMAL;
1406
1407 return htype;
1408}
1409
Sujith528f0c62008-10-29 10:14:26 +05301410static void assign_aggr_tid_seqno(struct sk_buff *skb,
1411 struct ath_buf *bf)
1412{
1413 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1414 struct ieee80211_hdr *hdr;
1415 struct ath_node *an;
1416 struct ath_atx_tid *tid;
1417 __le16 fc;
1418 u8 *qc;
1419
1420 if (!tx_info->control.sta)
1421 return;
1422
1423 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1424 hdr = (struct ieee80211_hdr *)skb->data;
1425 fc = hdr->frame_control;
1426
Sujith528f0c62008-10-29 10:14:26 +05301427 if (ieee80211_is_data_qos(fc)) {
1428 qc = ieee80211_get_qos_ctl(hdr);
1429 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301430 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001431
Sujithe8324352009-01-16 21:38:42 +05301432 /*
1433 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301434 * We also override seqno set by upper layer with the one
1435 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301436 */
1437 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301438 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301439 bf->bf_seqno = tid->seq_next;
1440 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301441}
1442
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001443static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301444{
1445 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1446 int flags = 0;
1447
1448 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1449 flags |= ATH9K_TXDESC_INTREQ;
1450
1451 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1452 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301453
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001454 if (use_ldpc)
1455 flags |= ATH9K_TXDESC_LDPC;
1456
Sujith528f0c62008-10-29 10:14:26 +05301457 return flags;
1458}
1459
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001460/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001461 * rix - rate index
1462 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1463 * width - 0 for 20 MHz, 1 for 40 MHz
1464 * half_gi - to use 4us v/s 3.6 us for symbol time
1465 */
Sujith102e0572008-10-29 10:15:16 +05301466static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1467 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001468{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001469 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001470 int streams, pktlen;
1471
Sujithcd3d39a2008-08-11 14:03:34 +05301472 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301473
1474 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001475 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001476 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001477 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001478 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1479
1480 if (!half_gi)
1481 duration = SYMBOL_TIME(nsymbols);
1482 else
1483 duration = SYMBOL_TIME_HALFGI(nsymbols);
1484
Sujithe63835b2008-11-18 09:07:53 +05301485 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001486 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301487
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001488 return duration;
1489}
1490
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001491static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1492{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001493 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001494 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301495 struct sk_buff *skb;
1496 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301497 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001498 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301499 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301500 int i, flags = 0;
1501 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301502 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301503
1504 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301505
Sujitha22be222009-03-30 15:28:36 +05301506 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301507 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301508 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301509 hdr = (struct ieee80211_hdr *)skb->data;
1510 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301511
Sujithc89424d2009-01-30 14:29:28 +05301512 /*
1513 * We check if Short Preamble is needed for the CTS rate by
1514 * checking the BSS's global flag.
1515 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1516 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001517 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1518 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301519 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001520 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001521
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001522 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001523 bool is_40, is_sgi, is_sp;
1524 int phy;
1525
Sujithe63835b2008-11-18 09:07:53 +05301526 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001527 continue;
1528
Sujitha8efee42008-11-18 09:07:30 +05301529 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301530 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001531 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001532
Felix Fietkau27032052010-01-17 21:08:50 +01001533 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1534 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301535 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001536 flags |= ATH9K_TXDESC_RTSENA;
1537 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1538 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1539 flags |= ATH9K_TXDESC_CTSENA;
1540 }
1541
Sujithc89424d2009-01-30 14:29:28 +05301542 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1543 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1544 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1545 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001546
Felix Fietkau545750d2009-11-23 22:21:01 +01001547 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1548 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1549 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1550
1551 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1552 /* MCS rates */
1553 series[i].Rate = rix | 0x80;
1554 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1555 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001556 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1557 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001558 continue;
1559 }
1560
1561 /* legcay rates */
1562 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1563 !(rate->flags & IEEE80211_RATE_ERP_G))
1564 phy = WLAN_RC_PHY_CCK;
1565 else
1566 phy = WLAN_RC_PHY_OFDM;
1567
1568 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1569 series[i].Rate = rate->hw_value;
1570 if (rate->hw_value_short) {
1571 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1572 series[i].Rate |= rate->hw_value_short;
1573 } else {
1574 is_sp = false;
1575 }
1576
1577 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1578 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001579 }
1580
Felix Fietkau27032052010-01-17 21:08:50 +01001581 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1582 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1583 flags &= ~ATH9K_TXDESC_RTSENA;
1584
1585 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1586 if (flags & ATH9K_TXDESC_RTSENA)
1587 flags &= ~ATH9K_TXDESC_CTSENA;
1588
Sujithe63835b2008-11-18 09:07:53 +05301589 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301590 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1591 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301592 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301593 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301594
Sujith17d79042009-02-09 13:27:03 +05301595 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301596 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001597}
1598
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001599static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301600 struct sk_buff *skb,
1601 struct ath_tx_control *txctl)
1602{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001603 struct ath_wiphy *aphy = hw->priv;
1604 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301605 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1606 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301607 int hdrlen;
1608 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001609 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001610 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301611
Felix Fietkau827e69b2009-11-15 23:09:25 +01001612 tx_info->pad[0] = 0;
1613 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001614 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001615 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001616 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001617 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1618 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001619 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001620 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1621 break;
1622 }
Sujithe8324352009-01-16 21:38:42 +05301623 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1624 fc = hdr->frame_control;
1625
1626 ATH_TXBUF_RESET(bf);
1627
Felix Fietkau827e69b2009-11-15 23:09:25 +01001628 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001629 bf->bf_frmlen = skb->len + FCS_LEN;
1630 /* Remove the padding size from bf_frmlen, if any */
1631 padpos = ath9k_cmn_padpos(hdr->frame_control);
1632 padsize = padpos & 3;
1633 if (padsize && skb->len>padpos+padsize) {
1634 bf->bf_frmlen -= padsize;
1635 }
Sujithe8324352009-01-16 21:38:42 +05301636
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001637 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301638 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001639 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1640 use_ldpc = true;
1641 }
Sujithe8324352009-01-16 21:38:42 +05301642
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001643 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001644 if (txctl->paprd)
1645 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001646 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301647
Luis R. Rodriguezc17512d2010-08-05 17:56:54 -04001648 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301649 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1650 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1651 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1652 } else {
1653 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1654 }
1655
Sujith17b182e2009-12-14 14:56:56 +05301656 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1657 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301658 assign_aggr_tid_seqno(skb, bf);
1659
1660 bf->bf_mpdu = skb;
1661
1662 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1663 skb->len, DMA_TO_DEVICE);
1664 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1665 bf->bf_mpdu = NULL;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001666 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1667 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301668 return -ENOMEM;
1669 }
1670
1671 bf->bf_buf_addr = bf->bf_dmacontext;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001672
1673 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1674 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1675 bf->bf_isnullfunc = true;
Sujith1b04b932010-01-08 10:36:05 +05301676 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001677 } else
1678 bf->bf_isnullfunc = false;
1679
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001680 bf->bf_tx_aborted = false;
1681
Sujithe8324352009-01-16 21:38:42 +05301682 return 0;
1683}
1684
1685/* FIXME: tx power */
1686static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1687 struct ath_tx_control *txctl)
1688{
Sujitha22be222009-03-30 15:28:36 +05301689 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301690 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301691 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301692 struct ath_node *an = NULL;
1693 struct list_head bf_head;
1694 struct ath_desc *ds;
1695 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301696 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301697 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301698 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301699
1700 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301701 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301702
1703 INIT_LIST_HEAD(&bf_head);
1704 list_add_tail(&bf->list, &bf_head);
1705
1706 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001707 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301708
1709 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1710 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1711
1712 ath9k_hw_filltxdesc(ah, ds,
1713 skb->len, /* segment length */
1714 true, /* first segment */
1715 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001716 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001717 bf->bf_buf_addr,
1718 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301719
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001720 if (bf->bf_state.bfs_paprd)
1721 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1722
Sujithe8324352009-01-16 21:38:42 +05301723 spin_lock_bh(&txctl->txq->axq_lock);
1724
1725 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1726 tx_info->control.sta) {
1727 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1728 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1729
Sujithc37452b2009-03-09 09:31:57 +05301730 if (!ieee80211_is_data_qos(fc)) {
1731 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1732 goto tx_done;
1733 }
1734
Felix Fietkau4fdec032010-03-12 04:02:43 +01001735 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301736 /*
1737 * Try aggregation if it's a unicast data frame
1738 * and the destination is HT capable.
1739 */
1740 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1741 } else {
1742 /*
1743 * Send this frame as regular when ADDBA
1744 * exchange is neither complete nor pending.
1745 */
Sujithc37452b2009-03-09 09:31:57 +05301746 ath_tx_send_ht_normal(sc, txctl->txq,
1747 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301748 }
1749 } else {
Sujithc37452b2009-03-09 09:31:57 +05301750 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301751 }
1752
Sujithc37452b2009-03-09 09:31:57 +05301753tx_done:
Sujithe8324352009-01-16 21:38:42 +05301754 spin_unlock_bh(&txctl->txq->axq_lock);
1755}
1756
1757/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001758int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301759 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001760{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001761 struct ath_wiphy *aphy = hw->priv;
1762 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001763 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001764 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001765 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001766 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001767
Sujithe8324352009-01-16 21:38:42 +05301768 bf = ath_tx_get_buffer(sc);
1769 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001770 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301771 return -1;
1772 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001773
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001774 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301775 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001776 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001777
Sujithe8324352009-01-16 21:38:42 +05301778 /* upon ath_tx_processq() this TX queue will be resumed, we
1779 * guarantee this will happen by knowing beforehand that
1780 * we will at least have to run TX completionon one buffer
1781 * on the queue */
1782 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001783 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001784 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301785 txq->stopped = 1;
1786 }
1787 spin_unlock_bh(&txq->axq_lock);
1788
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001789 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301790
1791 return r;
1792 }
1793
Felix Fietkau97923b12010-06-12 00:33:55 -04001794 q = skb_get_queue_mapping(skb);
1795 if (q >= 4)
1796 q = 0;
1797
1798 spin_lock_bh(&txq->axq_lock);
1799 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1800 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1801 txq->stopped = 1;
1802 }
1803 spin_unlock_bh(&txq->axq_lock);
1804
Sujithe8324352009-01-16 21:38:42 +05301805 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001806
1807 return 0;
1808}
1809
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001810void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001811{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001812 struct ath_wiphy *aphy = hw->priv;
1813 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001814 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001815 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1816 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301817 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1818 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001819
Sujithe8324352009-01-16 21:38:42 +05301820 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001821
Sujithe8324352009-01-16 21:38:42 +05301822 /*
1823 * As a temporary workaround, assign seq# here; this will likely need
1824 * to be cleaned up to work better with Beacon transmission and virtual
1825 * BSSes.
1826 */
1827 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301828 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1829 sc->tx.seq_no += 0x10;
1830 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1831 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001832 }
1833
Sujithe8324352009-01-16 21:38:42 +05301834 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001835 padpos = ath9k_cmn_padpos(hdr->frame_control);
1836 padsize = padpos & 3;
1837 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301838 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001839 ath_print(common, ATH_DBG_XMIT,
1840 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301841 dev_kfree_skb_any(skb);
1842 return;
1843 }
1844 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001845 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001846 }
1847
Sujithe8324352009-01-16 21:38:42 +05301848 txctl.txq = sc->beacon.cabq;
1849
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001850 ath_print(common, ATH_DBG_XMIT,
1851 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301852
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001853 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001854 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301855 goto exit;
1856 }
1857
1858 return;
1859exit:
1860 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001861}
1862
Sujithe8324352009-01-16 21:38:42 +05301863/*****************/
1864/* TX Completion */
1865/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001866
Sujithe8324352009-01-16 21:38:42 +05301867static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001868 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001869{
Sujithe8324352009-01-16 21:38:42 +05301870 struct ieee80211_hw *hw = sc->hw;
1871 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001872 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001873 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001874 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301875
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001876 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301877
Felix Fietkau827e69b2009-11-15 23:09:25 +01001878 if (aphy)
1879 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301880
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301881 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301882 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301883
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301884 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301885 /* Frame was ACKed */
1886 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1887 }
1888
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001889 padpos = ath9k_cmn_padpos(hdr->frame_control);
1890 padsize = padpos & 3;
1891 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301892 /*
1893 * Remove MAC header padding before giving the frame back to
1894 * mac80211.
1895 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001896 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301897 skb_pull(skb, padsize);
1898 }
1899
Sujith1b04b932010-01-08 10:36:05 +05301900 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1901 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001902 ath_print(common, ATH_DBG_PS,
1903 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001904 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301905 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1906 PS_WAIT_FOR_CAB |
1907 PS_WAIT_FOR_PSPOLL_DATA |
1908 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001909 }
1910
Felix Fietkau827e69b2009-11-15 23:09:25 +01001911 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001912 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001913 else {
1914 q = skb_get_queue_mapping(skb);
1915 if (q >= 4)
1916 q = 0;
1917
1918 if (--sc->tx.pending_frames[q] < 0)
1919 sc->tx.pending_frames[q] = 0;
1920
Felix Fietkau827e69b2009-11-15 23:09:25 +01001921 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001922 }
Sujithe8324352009-01-16 21:38:42 +05301923}
1924
1925static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001926 struct ath_txq *txq, struct list_head *bf_q,
1927 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301928{
1929 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301930 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301931 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301932
Sujithe8324352009-01-16 21:38:42 +05301933 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301934 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301935
1936 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301937 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301938
1939 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301940 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301941 }
1942
1943 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001944
1945 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001946 if (time_after(jiffies,
1947 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001948 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001949 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001950 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001951 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001952 } else {
1953 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1954 ath_debug_stat_tx(sc, txq, bf, ts);
1955 }
Sujithe8324352009-01-16 21:38:42 +05301956
1957 /*
1958 * Return the list of ath_buf of this mpdu to free queue
1959 */
1960 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1961 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1962 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1963}
1964
1965static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001966 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301967{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001968 u16 seq_st = 0;
1969 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301970 int ba_index;
1971 int nbad = 0;
1972 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001973
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001974 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301975 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301976
Sujithcd3d39a2008-08-11 14:03:34 +05301977 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001978 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001979 seq_st = ts->ts_seqnum;
1980 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001981 }
1982
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001983 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301984 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1985 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1986 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001987
Sujithe8324352009-01-16 21:38:42 +05301988 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001989 }
1990
Sujithe8324352009-01-16 21:38:42 +05301991 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001992}
1993
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001994static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301995 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301996{
Sujitha22be222009-03-30 15:28:36 +05301997 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301998 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301999 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01002000 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302001 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302002
Sujith95e4acb2009-03-13 08:56:09 +05302003 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002004 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302005
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002006 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302007 WARN_ON(tx_rateindex >= hw->max_rates);
2008
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002009 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302010 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Felix Fietkaud9698472010-03-01 13:32:11 +01002011 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2012 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302013
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002014 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302015 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05302016 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002017 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01002018 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2019 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002020 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2021 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002022 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2023 tx_info->status.ampdu_len = bf->bf_nframes;
2024 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
Sujithc4288392008-11-18 09:09:30 +05302025 }
2026 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302027
Felix Fietkau545750d2009-11-23 22:21:01 +01002028 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302029 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002030 tx_info->status.rates[i].idx = -1;
2031 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302032
Felix Fietkau78c46532010-06-25 01:26:16 +02002033 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302034}
2035
Sujith059d8062009-01-16 21:38:49 +05302036static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2037{
2038 int qnum;
2039
Felix Fietkau97923b12010-06-12 00:33:55 -04002040 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2041 if (qnum == -1)
2042 return;
2043
Sujith059d8062009-01-16 21:38:49 +05302044 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002045 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07002046 if (ath_mac80211_start_queue(sc, qnum))
2047 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302048 }
2049 spin_unlock_bh(&txq->axq_lock);
2050}
2051
Sujithc4288392008-11-18 09:09:30 +05302052static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002053{
Sujithcbe61d82009-02-09 13:27:12 +05302054 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002055 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002056 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2057 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302058 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002059 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302060 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002061 int status;
2062
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002063 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2064 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2065 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002066
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002067 for (;;) {
2068 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002069 if (list_empty(&txq->axq_q)) {
2070 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002071 spin_unlock_bh(&txq->axq_lock);
2072 break;
2073 }
2074 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2075
2076 /*
2077 * There is a race condition that a BH gets scheduled
2078 * after sw writes TxE and before hw re-load the last
2079 * descriptor to get the newly chained one.
2080 * Software must keep the last DONE descriptor as a
2081 * holding descriptor - software does so by marking
2082 * it with the STALE flag.
2083 */
2084 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302085 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086 bf_held = bf;
2087 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302088 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089 break;
2090 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002091 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302092 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002093 }
2094 }
2095
2096 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302097 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002098
Felix Fietkau29bffa92010-03-29 20:14:23 -07002099 memset(&ts, 0, sizeof(ts));
2100 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002101 if (status == -EINPROGRESS) {
2102 spin_unlock_bh(&txq->axq_lock);
2103 break;
2104 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002105
2106 /*
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002107 * We now know the nullfunc frame has been ACKed so we
2108 * can disable RX.
2109 */
2110 if (bf->bf_isnullfunc &&
Felix Fietkau29bffa92010-03-29 20:14:23 -07002111 (ts.ts_status & ATH9K_TX_ACKED)) {
Senthil Balasubramanian3f7c5c12010-02-03 22:51:13 +05302112 if ((sc->ps_flags & PS_ENABLED))
2113 ath9k_enable_ps(sc);
2114 else
Sujith1b04b932010-01-08 10:36:05 +05302115 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002116 }
2117
2118 /*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002119 * Remove ath_buf's of the same transmit unit from txq,
2120 * however leave the last descriptor back as the holding
2121 * descriptor for hw.
2122 */
Sujitha119cc42009-03-30 15:28:38 +05302123 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002125 if (!list_is_singular(&lastbf->list))
2126 list_cut_position(&bf_head,
2127 &txq->axq_q, lastbf->list.prev);
2128
2129 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002130 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002131 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002132 if (bf_held)
2133 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002134 spin_unlock_bh(&txq->axq_lock);
2135
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002136 if (bf_held)
2137 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138
Sujithcd3d39a2008-08-11 14:03:34 +05302139 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002140 /*
2141 * This frame is sent out as a single frame.
2142 * Use hardware retry status for this frame.
2143 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002144 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302145 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002146 ath_tx_rc_status(bf, &ts, 0, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002147 }
Johannes Berge6a98542008-10-21 12:40:02 +02002148
Sujithcd3d39a2008-08-11 14:03:34 +05302149 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002150 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002152 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002153
Sujith059d8062009-01-16 21:38:49 +05302154 ath_wake_mac80211_queue(sc, txq);
2155
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002156 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302157 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002158 ath_txq_schedule(sc, txq);
2159 spin_unlock_bh(&txq->axq_lock);
2160 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002161}
2162
Sujith305fe472009-07-23 15:32:29 +05302163static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002164{
2165 struct ath_softc *sc = container_of(work, struct ath_softc,
2166 tx_complete_work.work);
2167 struct ath_txq *txq;
2168 int i;
2169 bool needreset = false;
2170
2171 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2172 if (ATH_TXQ_SETUP(sc, i)) {
2173 txq = &sc->tx.txq[i];
2174 spin_lock_bh(&txq->axq_lock);
2175 if (txq->axq_depth) {
2176 if (txq->axq_tx_inprogress) {
2177 needreset = true;
2178 spin_unlock_bh(&txq->axq_lock);
2179 break;
2180 } else {
2181 txq->axq_tx_inprogress = true;
2182 }
2183 }
2184 spin_unlock_bh(&txq->axq_lock);
2185 }
2186
2187 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002188 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2189 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302190 ath9k_ps_wakeup(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002191 ath_reset(sc, false);
Sujith332c5562009-10-09 09:51:28 +05302192 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002193 }
2194
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002195 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002196 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2197}
2198
2199
Sujithe8324352009-01-16 21:38:42 +05302200
2201void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002202{
Sujithe8324352009-01-16 21:38:42 +05302203 int i;
2204 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002205
Sujithe8324352009-01-16 21:38:42 +05302206 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002207
2208 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302209 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2210 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002211 }
2212}
2213
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002214void ath_tx_edma_tasklet(struct ath_softc *sc)
2215{
2216 struct ath_tx_status txs;
2217 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2218 struct ath_hw *ah = sc->sc_ah;
2219 struct ath_txq *txq;
2220 struct ath_buf *bf, *lastbf;
2221 struct list_head bf_head;
2222 int status;
2223 int txok;
2224
2225 for (;;) {
2226 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2227 if (status == -EINPROGRESS)
2228 break;
2229 if (status == -EIO) {
2230 ath_print(common, ATH_DBG_XMIT,
2231 "Error processing tx status\n");
2232 break;
2233 }
2234
2235 /* Skip beacon completions */
2236 if (txs.qid == sc->beacon.beaconq)
2237 continue;
2238
2239 txq = &sc->tx.txq[txs.qid];
2240
2241 spin_lock_bh(&txq->axq_lock);
2242 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2243 spin_unlock_bh(&txq->axq_lock);
2244 return;
2245 }
2246
2247 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2248 struct ath_buf, list);
2249 lastbf = bf->bf_lastbf;
2250
2251 INIT_LIST_HEAD(&bf_head);
2252 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2253 &lastbf->list);
2254 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2255 txq->axq_depth--;
2256 txq->axq_tx_inprogress = false;
2257 spin_unlock_bh(&txq->axq_lock);
2258
2259 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2260
Vasanthakumar Thiagarajande0f6482010-05-17 18:57:54 -07002261 /*
2262 * Make sure null func frame is acked before configuring
2263 * hw into ps mode.
2264 */
2265 if (bf->bf_isnullfunc && txok) {
2266 if ((sc->ps_flags & PS_ENABLED))
2267 ath9k_enable_ps(sc);
2268 else
2269 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2270 }
2271
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002272 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002273 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2274 bf->bf_state.bf_type |= BUF_XRETRY;
2275 ath_tx_rc_status(bf, &txs, 0, txok, true);
2276 }
2277
2278 if (bf_isampdu(bf))
2279 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2280 else
2281 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2282 &txs, txok, 0);
2283
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002284 ath_wake_mac80211_queue(sc, txq);
2285
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002286 spin_lock_bh(&txq->axq_lock);
2287 if (!list_empty(&txq->txq_fifo_pending)) {
2288 INIT_LIST_HEAD(&bf_head);
2289 bf = list_first_entry(&txq->txq_fifo_pending,
2290 struct ath_buf, list);
2291 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2292 &bf->bf_lastbf->list);
2293 ath_tx_txqaddbuf(sc, txq, &bf_head);
2294 } else if (sc->sc_flags & SC_OP_TXAGGR)
2295 ath_txq_schedule(sc, txq);
2296 spin_unlock_bh(&txq->axq_lock);
2297 }
2298}
2299
Sujithe8324352009-01-16 21:38:42 +05302300/*****************/
2301/* Init, Cleanup */
2302/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002303
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002304static int ath_txstatus_setup(struct ath_softc *sc, int size)
2305{
2306 struct ath_descdma *dd = &sc->txsdma;
2307 u8 txs_len = sc->sc_ah->caps.txs_len;
2308
2309 dd->dd_desc_len = size * txs_len;
2310 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2311 &dd->dd_desc_paddr, GFP_KERNEL);
2312 if (!dd->dd_desc)
2313 return -ENOMEM;
2314
2315 return 0;
2316}
2317
2318static int ath_tx_edma_init(struct ath_softc *sc)
2319{
2320 int err;
2321
2322 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2323 if (!err)
2324 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2325 sc->txsdma.dd_desc_paddr,
2326 ATH_TXSTATUS_RING_SIZE);
2327
2328 return err;
2329}
2330
2331static void ath_tx_edma_cleanup(struct ath_softc *sc)
2332{
2333 struct ath_descdma *dd = &sc->txsdma;
2334
2335 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2336 dd->dd_desc_paddr);
2337}
2338
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002339int ath_tx_init(struct ath_softc *sc, int nbufs)
2340{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002341 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002342 int error = 0;
2343
Sujith797fe5cb2009-03-30 15:28:45 +05302344 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002345
Sujith797fe5cb2009-03-30 15:28:45 +05302346 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002347 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302348 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002349 ath_print(common, ATH_DBG_FATAL,
2350 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302351 goto err;
2352 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002353
Sujith797fe5cb2009-03-30 15:28:45 +05302354 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002355 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302356 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002357 ath_print(common, ATH_DBG_FATAL,
2358 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302359 goto err;
2360 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002361
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002362 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2363
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002364 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2365 error = ath_tx_edma_init(sc);
2366 if (error)
2367 goto err;
2368 }
2369
Sujith797fe5cb2009-03-30 15:28:45 +05302370err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002371 if (error != 0)
2372 ath_tx_cleanup(sc);
2373
2374 return error;
2375}
2376
Sujith797fe5cb2009-03-30 15:28:45 +05302377void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002378{
Sujithb77f4832008-12-07 21:44:03 +05302379 if (sc->beacon.bdma.dd_desc_len != 0)
2380 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002381
Sujithb77f4832008-12-07 21:44:03 +05302382 if (sc->tx.txdma.dd_desc_len != 0)
2383 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002384
2385 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2386 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387}
2388
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2390{
Sujithc5170162008-10-29 10:13:59 +05302391 struct ath_atx_tid *tid;
2392 struct ath_atx_ac *ac;
2393 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002394
Sujith8ee5afb2008-12-07 21:43:36 +05302395 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302396 tidno < WME_NUM_TID;
2397 tidno++, tid++) {
2398 tid->an = an;
2399 tid->tidno = tidno;
2400 tid->seq_start = tid->seq_next = 0;
2401 tid->baw_size = WME_MAX_BA;
2402 tid->baw_head = tid->baw_tail = 0;
2403 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302404 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302405 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302406 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302407 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302408 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302409 tid->state &= ~AGGR_ADDBA_COMPLETE;
2410 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302411 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002412
Sujith8ee5afb2008-12-07 21:43:36 +05302413 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302414 acno < WME_NUM_AC; acno++, ac++) {
2415 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002416 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302417 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002418 }
2419}
2420
Sujithb5aa9bf2008-10-29 10:13:31 +05302421void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002422{
Felix Fietkau2b409942010-07-07 19:42:08 +02002423 struct ath_atx_ac *ac;
2424 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002425 struct ath_txq *txq;
Felix Fietkau2b409942010-07-07 19:42:08 +02002426 int i, tidno;
Sujithe8324352009-01-16 21:38:42 +05302427
Felix Fietkau2b409942010-07-07 19:42:08 +02002428 for (tidno = 0, tid = &an->tid[tidno];
2429 tidno < WME_NUM_TID; tidno++, tid++) {
2430 i = tid->ac->qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431
Felix Fietkau2b409942010-07-07 19:42:08 +02002432 if (!ATH_TXQ_SETUP(sc, i))
2433 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002434
Felix Fietkau2b409942010-07-07 19:42:08 +02002435 txq = &sc->tx.txq[i];
2436 ac = tid->ac;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002437
Felix Fietkau2b409942010-07-07 19:42:08 +02002438 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002439
Felix Fietkau2b409942010-07-07 19:42:08 +02002440 if (tid->sched) {
2441 list_del(&tid->list);
2442 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002443 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002444
2445 if (ac->sched) {
2446 list_del(&ac->list);
2447 tid->ac->sched = false;
2448 }
2449
2450 ath_tid_drain(sc, txq, tid);
2451 tid->state &= ~AGGR_ADDBA_COMPLETE;
2452 tid->state &= ~AGGR_CLEANUP;
2453
2454 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002455 }
2456}