blob: 30ef2dfc1ed2037ee63c54ec0a16223732194ac4 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020064static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
Sujithe8324352009-01-16 21:38:42 +053066
Felix Fietkau545750d2009-11-23 22:21:01 +010067enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020068 MCS_HT20,
69 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010070 MCS_HT40,
71 MCS_HT40_SGI,
72};
73
Felix Fietkau0e668cd2010-04-19 19:57:32 +020074static int ath_max_4ms_framelen[4][32] = {
75 [MCS_HT20] = {
76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
80 },
81 [MCS_HT20_SGI] = {
82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010086 },
87 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020088 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010092 },
93 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020094 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010098 }
99};
100
Sujithe8324352009-01-16 21:38:42 +0530101/*********************/
102/* Aggregation logic */
103/*********************/
104
Sujithe8324352009-01-16 21:38:42 +0530105static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
106{
107 struct ath_atx_ac *ac = tid->ac;
108
109 if (tid->paused)
110 return;
111
112 if (tid->sched)
113 return;
114
115 tid->sched = true;
116 list_add_tail(&tid->list, &ac->tid_q);
117
118 if (ac->sched)
119 return;
120
121 ac->sched = true;
122 list_add_tail(&ac->list, &txq->axq_acq);
123}
124
Sujithe8324352009-01-16 21:38:42 +0530125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
126{
127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
128
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 WARN_ON(!tid->paused);
130
Sujithe8324352009-01-16 21:38:42 +0530131 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200132 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530133
134 if (list_empty(&tid->buf_q))
135 goto unlock;
136
137 ath_tx_queue_tid(txq, tid);
138 ath_txq_schedule(sc, txq);
139unlock:
140 spin_unlock_bh(&txq->axq_lock);
141}
142
143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
144{
145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
146 struct ath_buf *bf;
147 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200148 struct ath_tx_status ts;
149
Sujithe8324352009-01-16 21:38:42 +0530150 INIT_LIST_HEAD(&bf_head);
151
Felix Fietkau90fa5392010-09-20 13:45:38 +0200152 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530153 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530154
155 while (!list_empty(&tid->buf_q)) {
156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530157 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158
159 if (bf_isretried(bf)) {
160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
164 }
Sujithe8324352009-01-16 21:38:42 +0530165 }
166
167 spin_unlock_bh(&txq->axq_lock);
168}
169
170static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
171 int seqno)
172{
173 int index, cindex;
174
175 index = ATH_BA_INDEX(tid->seq_start, seqno);
176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
177
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200178 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530179
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
183 }
184}
185
186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf)
188{
189 int index, cindex;
190
191 if (bf_isretried(bf))
192 return;
193
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200196 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530197
198 if (index >= ((tid->baw_tail - tid->baw_head) &
199 (ATH_TID_MAX_BUFS - 1))) {
200 tid->baw_tail = cindex;
201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
202 }
203}
204
205/*
206 * TODO: For frame(s) that are in the retry state, we will reuse the
207 * sequence number(s) without setting the retry bit. The
208 * alternative is to give up on these and BAR the receiver's window
209 * forward.
210 */
211static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
212 struct ath_atx_tid *tid)
213
214{
215 struct ath_buf *bf;
216 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700217 struct ath_tx_status ts;
218
219 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530220 INIT_LIST_HEAD(&bf_head);
221
222 for (;;) {
223 if (list_empty(&tid->buf_q))
224 break;
Sujithe8324352009-01-16 21:38:42 +0530225
Sujithd43f30152009-01-16 21:38:53 +0530226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
227 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530228
229 if (bf_isretried(bf))
230 ath_tx_update_baw(sc, tid, bf->bf_seqno);
231
232 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530234 spin_lock(&txq->axq_lock);
235 }
236
237 tid->seq_next = tid->seq_start;
238 tid->baw_tail = tid->baw_head;
239}
240
Sujithfec247c2009-07-27 12:08:16 +0530241static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
242 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530243{
244 struct sk_buff *skb;
245 struct ieee80211_hdr *hdr;
246
247 bf->bf_state.bf_type |= BUF_RETRY;
248 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530249 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530250
251 skb = bf->bf_mpdu;
252 hdr = (struct ieee80211_hdr *)skb->data;
253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
254}
255
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200256static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
257{
258 struct ath_buf *bf = NULL;
259
260 spin_lock_bh(&sc->tx.txbuflock);
261
262 if (unlikely(list_empty(&sc->tx.txbuf))) {
263 spin_unlock_bh(&sc->tx.txbuflock);
264 return NULL;
265 }
266
267 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
268 list_del(&bf->list);
269
270 spin_unlock_bh(&sc->tx.txbuflock);
271
272 return bf;
273}
274
275static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
276{
277 spin_lock_bh(&sc->tx.txbuflock);
278 list_add_tail(&bf->list, &sc->tx.txbuf);
279 spin_unlock_bh(&sc->tx.txbuflock);
280}
281
Sujithd43f30152009-01-16 21:38:53 +0530282static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
283{
284 struct ath_buf *tbf;
285
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200286 tbf = ath_tx_get_buffer(sc);
287 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530288 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530289
290 ATH_TXBUF_RESET(tbf);
291
Felix Fietkau827e69b2009-11-15 23:09:25 +0100292 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530293 tbf->bf_mpdu = bf->bf_mpdu;
294 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400295 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530296 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 return tbf;
299}
300
301static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
302 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700303 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530304{
305 struct ath_node *an = NULL;
306 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530307 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800308 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530309 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800310 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530311 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530312 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530313 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530314 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530315 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
317 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200318 struct ieee80211_tx_rate rates[4];
Björn Smedmanebd02282010-10-10 22:44:39 +0200319 int nframes;
Sujithe8324352009-01-16 21:38:42 +0530320
Sujitha22be222009-03-30 15:28:36 +0530321 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530322 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530323
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800324 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100325 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800326
Felix Fietkau78c46532010-06-25 01:26:16 +0200327 memcpy(rates, tx_info->control.rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200328 nframes = bf->bf_nframes;
Felix Fietkau78c46532010-06-25 01:26:16 +0200329
Sujith1286ec62009-01-27 13:30:37 +0530330 rcu_read_lock();
331
Ben Greear686b9cb2010-09-23 09:44:36 -0700332 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530333 if (!sta) {
334 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200335
Felix Fietkau31e79a52010-07-12 23:16:34 +0200336 INIT_LIST_HEAD(&bf_head);
337 while (bf) {
338 bf_next = bf->bf_next;
339
340 bf->bf_state.bf_type |= BUF_XRETRY;
341 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
342 !bf->bf_stale || bf_next != NULL)
343 list_move_tail(&bf->list, &bf_head);
344
Björn Smedmanebd02282010-10-10 22:44:39 +0200345 ath_tx_rc_status(bf, ts, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
347 0, 0);
348
349 bf = bf_next;
350 }
Sujith1286ec62009-01-27 13:30:37 +0530351 return;
Sujithe8324352009-01-16 21:38:42 +0530352 }
353
Sujith1286ec62009-01-27 13:30:37 +0530354 an = (struct ath_node *)sta->drv_priv;
355 tid = ATH_AN_2_TID(an, bf->bf_tidno);
356
Felix Fietkaub11b1602010-07-11 12:48:44 +0200357 /*
358 * The hardware occasionally sends a tx status for the wrong TID.
359 * In this case, the BA status cannot be considered valid and all
360 * subframes need to be retransmitted
361 */
362 if (bf->bf_tidno != ts->tid)
363 txok = false;
364
Sujithe8324352009-01-16 21:38:42 +0530365 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530366 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530367
Sujithd43f30152009-01-16 21:38:53 +0530368 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700369 if (ts->ts_flags & ATH9K_TX_BA) {
370 seq_st = ts->ts_seqnum;
371 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530372 } else {
Sujithd43f30152009-01-16 21:38:53 +0530373 /*
374 * AR5416 can become deaf/mute when BA
375 * issue happens. Chip needs to be reset.
376 * But AP code may have sychronization issues
377 * when perform internal reset in this routine.
378 * Only enable reset in STA mode for now.
379 */
Sujith2660b812009-02-09 13:27:26 +0530380 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530381 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530382 }
383 }
384
385 INIT_LIST_HEAD(&bf_pending);
386 INIT_LIST_HEAD(&bf_head);
387
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700388 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530389 while (bf) {
390 txfail = txpending = 0;
391 bf_next = bf->bf_next;
392
Felix Fietkau78c46532010-06-25 01:26:16 +0200393 skb = bf->bf_mpdu;
394 tx_info = IEEE80211_SKB_CB(skb);
395
Sujithe8324352009-01-16 21:38:42 +0530396 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
397 /* transmit completion, subframe is
398 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530399 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530400 } else if (!isaggr && txok) {
401 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530402 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530403 } else {
Sujithe8324352009-01-16 21:38:42 +0530404 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400405 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530406 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530407 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530408 txpending = 1;
409 } else {
410 bf->bf_state.bf_type |= BUF_XRETRY;
411 txfail = 1;
412 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530413 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530414 }
415 } else {
416 /*
417 * cleanup in progress, just fail
418 * the un-acked sub-frames
419 */
420 txfail = 1;
421 }
422 }
423
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400424 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
425 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530426 /*
427 * Make sure the last desc is reclaimed if it
428 * not a holding desc.
429 */
430 if (!bf_last->bf_stale)
431 list_move_tail(&bf->list, &bf_head);
432 else
433 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530434 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700435 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530436 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530437 }
438
Felix Fietkau90fa5392010-09-20 13:45:38 +0200439 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530440 /*
441 * complete the acked-ones/xretried ones; update
442 * block-ack window
443 */
444 spin_lock_bh(&txq->axq_lock);
445 ath_tx_update_baw(sc, tid, bf->bf_seqno);
446 spin_unlock_bh(&txq->axq_lock);
447
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200449 memcpy(tx_info->control.rates, rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200450 bf->bf_nframes = nframes;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700451 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530452 rc_update = false;
453 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700454 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530455 }
456
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
458 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530459 } else {
Sujithd43f30152009-01-16 21:38:53 +0530460 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400461 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
462 if (bf->bf_next == NULL && bf_last->bf_stale) {
463 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530464
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400465 tbf = ath_clone_txbuf(sc, bf_last);
466 /*
467 * Update tx baw and complete the
468 * frame with failed status if we
469 * run out of tx buf.
470 */
471 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid,
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400476
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400477 bf->bf_state.bf_type |=
478 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad,
480 0, false);
481 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head,
483 ts, 0, 0);
484 break;
485 }
486
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 tbf->bf_desc);
489 list_add_tail(&tbf->list, &bf_head);
490 } else {
491 /*
492 * Clear descriptor status words for
493 * software retry
494 */
495 ath9k_hw_cleartxdesc(sc->sc_ah,
496 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400497 }
Sujithe8324352009-01-16 21:38:42 +0530498 }
499
500 /*
501 * Put this buffer to the temporary pending
502 * queue to retain ordering
503 */
504 list_splice_tail_init(&bf_head, &bf_pending);
505 }
506
507 bf = bf_next;
508 }
509
Felix Fietkau4cee7862010-07-23 03:53:16 +0200510 /* prepend un-acked frames to the beginning of the pending frame queue */
511 if (!list_empty(&bf_pending)) {
512 spin_lock_bh(&txq->axq_lock);
513 list_splice(&bf_pending, &tid->buf_q);
514 ath_tx_queue_tid(txq, tid);
515 spin_unlock_bh(&txq->axq_lock);
516 }
517
Sujithe8324352009-01-16 21:38:42 +0530518 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200519 ath_tx_flush_tid(sc, tid);
520
Sujithe8324352009-01-16 21:38:42 +0530521 if (tid->baw_head == tid->baw_tail) {
522 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530523 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530524 }
Sujithe8324352009-01-16 21:38:42 +0530525 }
526
Sujith1286ec62009-01-27 13:30:37 +0530527 rcu_read_unlock();
528
Sujithe8324352009-01-16 21:38:42 +0530529 if (needreset)
530 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530531}
532
533static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
534 struct ath_atx_tid *tid)
535{
Sujithe8324352009-01-16 21:38:42 +0530536 struct sk_buff *skb;
537 struct ieee80211_tx_info *tx_info;
538 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530539 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530540 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530541 int i;
542
Sujitha22be222009-03-30 15:28:36 +0530543 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530544 tx_info = IEEE80211_SKB_CB(skb);
545 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530546
547 /*
548 * Find the lowest frame length among the rate series that will have a
549 * 4ms transmit duration.
550 * TODO - TXOP limit needs to be considered.
551 */
552 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
553
554 for (i = 0; i < 4; i++) {
555 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100556 int modeidx;
557 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530558 legacy = 1;
559 break;
560 }
561
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200562 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100563 modeidx = MCS_HT40;
564 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200565 modeidx = MCS_HT20;
566
567 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
568 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100569
570 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530571 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530572 }
573 }
574
575 /*
576 * limit aggregate size by the minimum rate if rate selected is
577 * not a probe rate, if rate selected is a probe rate then
578 * avoid aggregation of this packet.
579 */
580 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
581 return 0;
582
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530583 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
584 aggr_limit = min((max_4ms_framelen * 3) / 8,
585 (u32)ATH_AMPDU_LIMIT_MAX);
586 else
587 aggr_limit = min(max_4ms_framelen,
588 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530589
590 /*
591 * h/w can accept aggregates upto 16 bit lengths (65535).
592 * The IE, however can hold upto 65536, which shows up here
593 * as zero. Ignore 65536 since we are constrained by hw.
594 */
Sujith4ef70842009-07-23 15:32:41 +0530595 if (tid->an->maxampdu)
596 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530597
598 return aggr_limit;
599}
600
601/*
Sujithd43f30152009-01-16 21:38:53 +0530602 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530603 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530604 */
605static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
606 struct ath_buf *bf, u16 frmlen)
607{
Sujithe8324352009-01-16 21:38:42 +0530608 struct sk_buff *skb = bf->bf_mpdu;
609 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530610 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530611 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100612 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200613 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530614
615 /* Select standard number of delimiters based on frame length alone */
616 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
617
618 /*
619 * If encryption enabled, hardware requires some more padding between
620 * subframes.
621 * TODO - this could be improved to be dependent on the rate.
622 * The hardware can keep up at lower rates, but not higher rates
623 */
624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
625 ndelim += ATH_AGGR_ENCRYPTDELIM;
626
627 /*
628 * Convert desired mpdu density from microeconds to bytes based
629 * on highest rate in rate series (i.e. first rate) to determine
630 * required minimum length for subframe. Take into account
631 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530632 *
Sujithe8324352009-01-16 21:38:42 +0530633 * If there is no mpdu density restriction, no further calculation
634 * is needed.
635 */
Sujith4ef70842009-07-23 15:32:41 +0530636
637 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530638 return ndelim;
639
640 rix = tx_info->control.rates[0].idx;
641 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530642 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
643 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
644
645 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530646 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530647 else
Sujith4ef70842009-07-23 15:32:41 +0530648 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530649
650 if (nsymbols == 0)
651 nsymbols = 1;
652
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 streams = HT_RC_2_STREAMS(rix);
654 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530655 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
656
Sujithe8324352009-01-16 21:38:42 +0530657 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530658 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
659 ndelim = max(mindelim, ndelim);
660 }
661
662 return ndelim;
663}
664
665static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530666 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530667 struct ath_atx_tid *tid,
668 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530669{
670#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530671 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
672 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530673 u16 aggr_limit = 0, al = 0, bpad = 0,
674 al_delta, h_baw = tid->baw_size / 2;
675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200676 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530677
678 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
679
680 do {
681 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
682
Sujithd43f30152009-01-16 21:38:53 +0530683 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530684 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
685 status = ATH_AGGR_BAW_CLOSED;
686 break;
687 }
688
689 if (!rl) {
690 aggr_limit = ath_lookup_rate(sc, bf, tid);
691 rl = 1;
692 }
693
Sujithd43f30152009-01-16 21:38:53 +0530694 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530695 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
696
Sujithd43f30152009-01-16 21:38:53 +0530697 if (nframes &&
698 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530699 status = ATH_AGGR_LIMITED;
700 break;
701 }
702
Felix Fietkau0299a502010-10-21 02:47:24 +0200703 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
704 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
705 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
706 break;
707
Sujithd43f30152009-01-16 21:38:53 +0530708 /* do not exceed subframe limit */
709 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530710 status = ATH_AGGR_LIMITED;
711 break;
712 }
Sujithd43f30152009-01-16 21:38:53 +0530713 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530714
Sujithd43f30152009-01-16 21:38:53 +0530715 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530716 al += bpad + al_delta;
717
718 /*
719 * Get the delimiters needed to meet the MPDU
720 * density for this node.
721 */
722 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530723 bpad = PADBYTES(al_delta) + (ndelim << 2);
724
725 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400726 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530727
Sujithd43f30152009-01-16 21:38:53 +0530728 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530729 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530730 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
731 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530732 if (bf_prev) {
733 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400734 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
735 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530736 }
737 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530738
Sujithe8324352009-01-16 21:38:42 +0530739 } while (!list_empty(&tid->buf_q));
740
741 bf_first->bf_al = al;
742 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530743
Sujithe8324352009-01-16 21:38:42 +0530744 return status;
745#undef PADBYTES
746}
747
748static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
749 struct ath_atx_tid *tid)
750{
Sujithd43f30152009-01-16 21:38:53 +0530751 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530752 enum ATH_AGGR_STATUS status;
753 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530754
755 do {
756 if (list_empty(&tid->buf_q))
757 return;
758
759 INIT_LIST_HEAD(&bf_q);
760
Sujithfec247c2009-07-27 12:08:16 +0530761 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530762
763 /*
Sujithd43f30152009-01-16 21:38:53 +0530764 * no frames picked up to be aggregated;
765 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530766 */
767 if (list_empty(&bf_q))
768 break;
769
770 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530771 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530772
Sujithd43f30152009-01-16 21:38:53 +0530773 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530774 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530775 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530776 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530777 ath_buf_set_rate(sc, bf);
778 ath_tx_txqaddbuf(sc, txq, &bf_q);
779 continue;
780 }
781
Sujithd43f30152009-01-16 21:38:53 +0530782 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530783 bf->bf_state.bf_type |= BUF_AGGR;
784 ath_buf_set_rate(sc, bf);
785 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
786
Sujithd43f30152009-01-16 21:38:53 +0530787 /* anchor last desc of aggregate */
788 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530789
Sujithe8324352009-01-16 21:38:42 +0530790 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530791 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530792
793 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
794 status != ATH_AGGR_BAW_CLOSED);
795}
796
Felix Fietkau231c3a12010-09-20 19:35:28 +0200797int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
798 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530799{
800 struct ath_atx_tid *txtid;
801 struct ath_node *an;
802
803 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530804 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200805
806 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
807 return -EAGAIN;
808
Sujithf83da962009-07-23 15:32:37 +0530809 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200810 txtid->paused = true;
Sujithf83da962009-07-23 15:32:37 +0530811 *ssn = txtid->seq_start;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200812
813 return 0;
Sujithe8324352009-01-16 21:38:42 +0530814}
815
Sujithf83da962009-07-23 15:32:37 +0530816void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530817{
818 struct ath_node *an = (struct ath_node *)sta->drv_priv;
819 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
820 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Sujithe8324352009-01-16 21:38:42 +0530821
822 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530823 return;
Sujithe8324352009-01-16 21:38:42 +0530824
825 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530826 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530827 return;
Sujithe8324352009-01-16 21:38:42 +0530828 }
829
Sujithe8324352009-01-16 21:38:42 +0530830 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200831 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200832
833 /*
834 * If frames are still being transmitted for this TID, they will be
835 * cleaned up during tx completion. To prevent race conditions, this
836 * TID can only be reused after all in-progress subframes have been
837 * completed.
838 */
839 if (txtid->baw_head != txtid->baw_tail)
840 txtid->state |= AGGR_CLEANUP;
841 else
842 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530843 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530844
Felix Fietkau90fa5392010-09-20 13:45:38 +0200845 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530846}
847
848void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
849{
850 struct ath_atx_tid *txtid;
851 struct ath_node *an;
852
853 an = (struct ath_node *)sta->drv_priv;
854
855 if (sc->sc_flags & SC_OP_TXAGGR) {
856 txtid = ATH_AN_2_TID(an, tid);
857 txtid->baw_size =
858 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
859 txtid->state |= AGGR_ADDBA_COMPLETE;
860 txtid->state &= ~AGGR_ADDBA_PROGRESS;
861 ath_tx_resume_tid(sc, txtid);
862 }
863}
864
Sujithe8324352009-01-16 21:38:42 +0530865/********************/
866/* Queue Management */
867/********************/
868
Sujithe8324352009-01-16 21:38:42 +0530869static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
870 struct ath_txq *txq)
871{
872 struct ath_atx_ac *ac, *ac_tmp;
873 struct ath_atx_tid *tid, *tid_tmp;
874
875 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
876 list_del(&ac->list);
877 ac->sched = false;
878 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
879 list_del(&tid->list);
880 tid->sched = false;
881 ath_tid_drain(sc, txq, tid);
882 }
883 }
884}
885
886struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
887{
Sujithcbe61d82009-02-09 13:27:12 +0530888 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700889 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530890 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400891 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530892
893 memset(&qi, 0, sizeof(qi));
894 qi.tqi_subtype = subtype;
895 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
896 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
897 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
898 qi.tqi_physCompBuf = 0;
899
900 /*
901 * Enable interrupts only for EOL and DESC conditions.
902 * We mark tx descriptors to receive a DESC interrupt
903 * when a tx queue gets deep; otherwise waiting for the
904 * EOL to reap descriptors. Note that this is done to
905 * reduce interrupt load and this only defers reaping
906 * descriptors, never transmitting frames. Aside from
907 * reducing interrupts this also permits more concurrency.
908 * The only potential downside is if the tx queue backs
909 * up in which case the top half of the kernel may backup
910 * due to a lack of tx descriptors.
911 *
912 * The UAPSD queue is an exception, since we take a desc-
913 * based intr on the EOSP frames.
914 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400915 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
916 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
917 TXQ_FLAG_TXERRINT_ENABLE;
918 } else {
919 if (qtype == ATH9K_TX_QUEUE_UAPSD)
920 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
921 else
922 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
923 TXQ_FLAG_TXDESCINT_ENABLE;
924 }
Sujithe8324352009-01-16 21:38:42 +0530925 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
926 if (qnum == -1) {
927 /*
928 * NB: don't print a message, this happens
929 * normally on parts with too few tx queues
930 */
931 return NULL;
932 }
933 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700934 ath_print(common, ATH_DBG_FATAL,
935 "qnum %u out of range, max %u!\n",
936 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530937 ath9k_hw_releasetxqueue(ah, qnum);
938 return NULL;
939 }
940 if (!ATH_TXQ_SETUP(sc, qnum)) {
941 struct ath_txq *txq = &sc->tx.txq[qnum];
942
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400943 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530944 txq->axq_qnum = qnum;
945 txq->axq_link = NULL;
946 INIT_LIST_HEAD(&txq->axq_q);
947 INIT_LIST_HEAD(&txq->axq_acq);
948 spin_lock_init(&txq->axq_lock);
949 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400950 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530951 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400952
953 txq->txq_headidx = txq->txq_tailidx = 0;
954 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
955 INIT_LIST_HEAD(&txq->txq_fifo[i]);
956 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530957 }
958 return &sc->tx.txq[qnum];
959}
960
Sujithe8324352009-01-16 21:38:42 +0530961int ath_txq_update(struct ath_softc *sc, int qnum,
962 struct ath9k_tx_queue_info *qinfo)
963{
Sujithcbe61d82009-02-09 13:27:12 +0530964 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530965 int error = 0;
966 struct ath9k_tx_queue_info qi;
967
968 if (qnum == sc->beacon.beaconq) {
969 /*
970 * XXX: for beacon queue, we just save the parameter.
971 * It will be picked up by ath_beaconq_config when
972 * it's necessary.
973 */
974 sc->beacon.beacon_qi = *qinfo;
975 return 0;
976 }
977
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700978 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530979
980 ath9k_hw_get_txq_props(ah, qnum, &qi);
981 qi.tqi_aifs = qinfo->tqi_aifs;
982 qi.tqi_cwmin = qinfo->tqi_cwmin;
983 qi.tqi_cwmax = qinfo->tqi_cwmax;
984 qi.tqi_burstTime = qinfo->tqi_burstTime;
985 qi.tqi_readyTime = qinfo->tqi_readyTime;
986
987 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700988 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
989 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +0530990 error = -EIO;
991 } else {
992 ath9k_hw_resettxqueue(ah, qnum);
993 }
994
995 return error;
996}
997
998int ath_cabq_update(struct ath_softc *sc)
999{
1000 struct ath9k_tx_queue_info qi;
1001 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301002
1003 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1004 /*
1005 * Ensure the readytime % is within the bounds.
1006 */
Sujith17d79042009-02-09 13:27:03 +05301007 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1008 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1009 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1010 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301011
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001012 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301013 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301014 ath_txq_update(sc, qnum, &qi);
1015
1016 return 0;
1017}
1018
Sujith043a0402009-01-16 21:38:47 +05301019/*
1020 * Drain a given TX queue (could be Beacon or Data)
1021 *
1022 * This assumes output has been stopped and
1023 * we do not need to block ath_tx_tasklet.
1024 */
1025void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301026{
1027 struct ath_buf *bf, *lastbf;
1028 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001029 struct ath_tx_status ts;
1030
1031 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301032 INIT_LIST_HEAD(&bf_head);
1033
Sujithe8324352009-01-16 21:38:42 +05301034 for (;;) {
1035 spin_lock_bh(&txq->axq_lock);
1036
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001037 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1038 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1039 txq->txq_headidx = txq->txq_tailidx = 0;
1040 spin_unlock_bh(&txq->axq_lock);
1041 break;
1042 } else {
1043 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1044 struct ath_buf, list);
1045 }
1046 } else {
1047 if (list_empty(&txq->axq_q)) {
1048 txq->axq_link = NULL;
1049 spin_unlock_bh(&txq->axq_lock);
1050 break;
1051 }
1052 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1053 list);
Sujithe8324352009-01-16 21:38:42 +05301054
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001055 if (bf->bf_stale) {
1056 list_del(&bf->list);
1057 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301058
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001059 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001060 continue;
1061 }
Sujithe8324352009-01-16 21:38:42 +05301062 }
1063
1064 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001065 if (!retry_tx)
1066 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301067
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001068 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1069 list_cut_position(&bf_head,
1070 &txq->txq_fifo[txq->txq_tailidx],
1071 &lastbf->list);
1072 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1073 } else {
1074 /* remove ath_buf's of the same mpdu from txq */
1075 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1076 }
1077
Sujithe8324352009-01-16 21:38:42 +05301078 txq->axq_depth--;
1079
1080 spin_unlock_bh(&txq->axq_lock);
1081
1082 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001083 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301084 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001085 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301086 }
1087
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001088 spin_lock_bh(&txq->axq_lock);
1089 txq->axq_tx_inprogress = false;
1090 spin_unlock_bh(&txq->axq_lock);
1091
Sujithe8324352009-01-16 21:38:42 +05301092 /* flush any pending frames if aggregation is enabled */
1093 if (sc->sc_flags & SC_OP_TXAGGR) {
1094 if (!retry_tx) {
1095 spin_lock_bh(&txq->axq_lock);
1096 ath_txq_drain_pending_buffers(sc, txq);
1097 spin_unlock_bh(&txq->axq_lock);
1098 }
1099 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001100
1101 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1102 spin_lock_bh(&txq->axq_lock);
1103 while (!list_empty(&txq->txq_fifo_pending)) {
1104 bf = list_first_entry(&txq->txq_fifo_pending,
1105 struct ath_buf, list);
1106 list_cut_position(&bf_head,
1107 &txq->txq_fifo_pending,
1108 &bf->bf_lastbf->list);
1109 spin_unlock_bh(&txq->axq_lock);
1110
1111 if (bf_isampdu(bf))
1112 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1113 &ts, 0);
1114 else
1115 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1116 &ts, 0, 0);
1117 spin_lock_bh(&txq->axq_lock);
1118 }
1119 spin_unlock_bh(&txq->axq_lock);
1120 }
Sujithe8324352009-01-16 21:38:42 +05301121}
1122
Sujith043a0402009-01-16 21:38:47 +05301123void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1124{
Sujithcbe61d82009-02-09 13:27:12 +05301125 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001126 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301127 struct ath_txq *txq;
1128 int i, npend = 0;
1129
1130 if (sc->sc_flags & SC_OP_INVALID)
1131 return;
1132
1133 /* Stop beacon queue */
1134 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1135
1136 /* Stop data queues */
1137 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1138 if (ATH_TXQ_SETUP(sc, i)) {
1139 txq = &sc->tx.txq[i];
1140 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1141 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1142 }
1143 }
1144
1145 if (npend) {
1146 int r;
1147
Sujithe8009e92009-12-14 14:57:08 +05301148 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001149 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301150
1151 spin_lock_bh(&sc->sc_resetlock);
Felix Fietkau20bd2a02010-07-31 00:12:00 +02001152 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
Sujith043a0402009-01-16 21:38:47 +05301153 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001154 ath_print(common, ATH_DBG_FATAL,
1155 "Unable to reset hardware; reset status %d\n",
1156 r);
Sujith043a0402009-01-16 21:38:47 +05301157 spin_unlock_bh(&sc->sc_resetlock);
1158 }
1159
1160 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1161 if (ATH_TXQ_SETUP(sc, i))
1162 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1163 }
1164}
1165
Sujithe8324352009-01-16 21:38:42 +05301166void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1167{
1168 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1169 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1170}
1171
Sujithe8324352009-01-16 21:38:42 +05301172void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1173{
1174 struct ath_atx_ac *ac;
1175 struct ath_atx_tid *tid;
1176
1177 if (list_empty(&txq->axq_acq))
1178 return;
1179
1180 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1181 list_del(&ac->list);
1182 ac->sched = false;
1183
1184 do {
1185 if (list_empty(&ac->tid_q))
1186 return;
1187
1188 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1189 list_del(&tid->list);
1190 tid->sched = false;
1191
1192 if (tid->paused)
1193 continue;
1194
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001195 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301196
1197 /*
1198 * add tid to round-robin queue if more frames
1199 * are pending for the tid
1200 */
1201 if (!list_empty(&tid->buf_q))
1202 ath_tx_queue_tid(txq, tid);
1203
1204 break;
1205 } while (!list_empty(&ac->tid_q));
1206
1207 if (!list_empty(&ac->tid_q)) {
1208 if (!ac->sched) {
1209 ac->sched = true;
1210 list_add_tail(&ac->list, &txq->axq_acq);
1211 }
1212 }
1213}
1214
1215int ath_tx_setup(struct ath_softc *sc, int haltype)
1216{
1217 struct ath_txq *txq;
1218
1219 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001220 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1221 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301222 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1223 return 0;
1224 }
1225 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1226 if (txq != NULL) {
1227 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1228 return 1;
1229 } else
1230 return 0;
1231}
1232
1233/***********/
1234/* TX, DMA */
1235/***********/
1236
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001237/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001238 * Insert a chain of ath_buf (descriptors) on a txq and
1239 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001240 */
Sujith102e0572008-10-29 10:15:16 +05301241static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1242 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001243{
Sujithcbe61d82009-02-09 13:27:12 +05301244 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001245 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001246 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301247
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001248 /*
1249 * Insert the frame on the outbound list and
1250 * pass it on to the hardware.
1251 */
1252
1253 if (list_empty(head))
1254 return;
1255
1256 bf = list_first_entry(head, struct ath_buf, list);
1257
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001258 ath_print(common, ATH_DBG_QUEUE,
1259 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001260
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001261 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1262 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1263 list_splice_tail_init(head, &txq->txq_fifo_pending);
1264 return;
1265 }
1266 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1267 ath_print(common, ATH_DBG_XMIT,
1268 "Initializing tx fifo %d which "
1269 "is non-empty\n",
1270 txq->txq_headidx);
1271 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1272 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1273 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001274 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001275 ath_print(common, ATH_DBG_XMIT,
1276 "TXDP[%u] = %llx (%p)\n",
1277 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001278 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001279 list_splice_tail_init(head, &txq->axq_q);
1280
1281 if (txq->axq_link == NULL) {
1282 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1283 ath_print(common, ATH_DBG_XMIT,
1284 "TXDP[%u] = %llx (%p)\n",
1285 txq->axq_qnum, ito64(bf->bf_daddr),
1286 bf->bf_desc);
1287 } else {
1288 *txq->axq_link = bf->bf_daddr;
1289 ath_print(common, ATH_DBG_XMIT,
1290 "link[%u] (%p)=%llx (%p)\n",
1291 txq->axq_qnum, txq->axq_link,
1292 ito64(bf->bf_daddr), bf->bf_desc);
1293 }
1294 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1295 &txq->axq_link);
1296 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001297 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001298 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001299}
1300
Sujithe8324352009-01-16 21:38:42 +05301301static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1302 struct list_head *bf_head,
1303 struct ath_tx_control *txctl)
1304{
1305 struct ath_buf *bf;
1306
Sujithe8324352009-01-16 21:38:42 +05301307 bf = list_first_entry(bf_head, struct ath_buf, list);
1308 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301309 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301310
1311 /*
1312 * Do not queue to h/w when any of the following conditions is true:
1313 * - there are pending frames in software queue
1314 * - the TID is currently paused for ADDBA/BAR request
1315 * - seqno is not within block-ack window
1316 * - h/w queue depth exceeds low water mark
1317 */
1318 if (!list_empty(&tid->buf_q) || tid->paused ||
1319 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1320 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001321 /*
Sujithe8324352009-01-16 21:38:42 +05301322 * Add this frame to software queue for scheduling later
1323 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001324 */
Sujithd43f30152009-01-16 21:38:53 +05301325 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301326 ath_tx_queue_tid(txctl->txq, tid);
1327 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001328 }
1329
Sujithe8324352009-01-16 21:38:42 +05301330 /* Add sub-frame to BAW */
1331 ath_tx_addto_baw(sc, tid, bf);
1332
1333 /* Queue to h/w without aggregation */
1334 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301335 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301336 ath_buf_set_rate(sc, bf);
1337 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301338}
1339
Sujithc37452b2009-03-09 09:31:57 +05301340static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1341 struct ath_atx_tid *tid,
1342 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001343{
Sujithe8324352009-01-16 21:38:42 +05301344 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001345
Sujithe8324352009-01-16 21:38:42 +05301346 bf = list_first_entry(bf_head, struct ath_buf, list);
1347 bf->bf_state.bf_type &= ~BUF_AMPDU;
1348
1349 /* update starting sequence number for subsequent ADDBA request */
1350 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1351
1352 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301353 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301354 ath_buf_set_rate(sc, bf);
1355 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301356 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001357}
1358
Sujithc37452b2009-03-09 09:31:57 +05301359static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1360 struct list_head *bf_head)
1361{
1362 struct ath_buf *bf;
1363
1364 bf = list_first_entry(bf_head, struct ath_buf, list);
1365
1366 bf->bf_lastbf = bf;
1367 bf->bf_nframes = 1;
1368 ath_buf_set_rate(sc, bf);
1369 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301370 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301371}
1372
Sujith528f0c62008-10-29 10:14:26 +05301373static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001374{
Sujith528f0c62008-10-29 10:14:26 +05301375 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001376 enum ath9k_pkt_type htype;
1377 __le16 fc;
1378
Sujith528f0c62008-10-29 10:14:26 +05301379 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001380 fc = hdr->frame_control;
1381
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001382 if (ieee80211_is_beacon(fc))
1383 htype = ATH9K_PKT_TYPE_BEACON;
1384 else if (ieee80211_is_probe_resp(fc))
1385 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1386 else if (ieee80211_is_atim(fc))
1387 htype = ATH9K_PKT_TYPE_ATIM;
1388 else if (ieee80211_is_pspoll(fc))
1389 htype = ATH9K_PKT_TYPE_PSPOLL;
1390 else
1391 htype = ATH9K_PKT_TYPE_NORMAL;
1392
1393 return htype;
1394}
1395
Sujith528f0c62008-10-29 10:14:26 +05301396static void assign_aggr_tid_seqno(struct sk_buff *skb,
1397 struct ath_buf *bf)
1398{
1399 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1400 struct ieee80211_hdr *hdr;
1401 struct ath_node *an;
1402 struct ath_atx_tid *tid;
1403 __le16 fc;
1404 u8 *qc;
1405
1406 if (!tx_info->control.sta)
1407 return;
1408
1409 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1410 hdr = (struct ieee80211_hdr *)skb->data;
1411 fc = hdr->frame_control;
1412
Sujith528f0c62008-10-29 10:14:26 +05301413 if (ieee80211_is_data_qos(fc)) {
1414 qc = ieee80211_get_qos_ctl(hdr);
1415 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301416 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001417
Sujithe8324352009-01-16 21:38:42 +05301418 /*
1419 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301420 * We also override seqno set by upper layer with the one
1421 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301422 */
1423 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301424 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301425 bf->bf_seqno = tid->seq_next;
1426 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301427}
1428
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001429static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301430{
1431 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1432 int flags = 0;
1433
1434 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1435 flags |= ATH9K_TXDESC_INTREQ;
1436
1437 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1438 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301439
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001440 if (use_ldpc)
1441 flags |= ATH9K_TXDESC_LDPC;
1442
Sujith528f0c62008-10-29 10:14:26 +05301443 return flags;
1444}
1445
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001446/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001447 * rix - rate index
1448 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1449 * width - 0 for 20 MHz, 1 for 40 MHz
1450 * half_gi - to use 4us v/s 3.6 us for symbol time
1451 */
Sujith102e0572008-10-29 10:15:16 +05301452static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1453 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001454{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001455 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001456 int streams, pktlen;
1457
Sujithcd3d39a2008-08-11 14:03:34 +05301458 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301459
1460 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001461 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001462 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001463 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001464 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1465
1466 if (!half_gi)
1467 duration = SYMBOL_TIME(nsymbols);
1468 else
1469 duration = SYMBOL_TIME_HALFGI(nsymbols);
1470
Sujithe63835b2008-11-18 09:07:53 +05301471 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001472 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301473
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001474 return duration;
1475}
1476
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001477static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1478{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001479 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001480 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301481 struct sk_buff *skb;
1482 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301483 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001484 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301485 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301486 int i, flags = 0;
1487 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301488 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301489
1490 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301491
Sujitha22be222009-03-30 15:28:36 +05301492 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301493 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301494 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301495 hdr = (struct ieee80211_hdr *)skb->data;
1496 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301497
Sujithc89424d2009-01-30 14:29:28 +05301498 /*
1499 * We check if Short Preamble is needed for the CTS rate by
1500 * checking the BSS's global flag.
1501 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1502 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001503 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1504 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301505 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001506 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001507
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001508 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001509 bool is_40, is_sgi, is_sp;
1510 int phy;
1511
Sujithe63835b2008-11-18 09:07:53 +05301512 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001513 continue;
1514
Sujitha8efee42008-11-18 09:07:30 +05301515 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301516 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001517 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001518
Felix Fietkau27032052010-01-17 21:08:50 +01001519 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1520 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301521 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001522 flags |= ATH9K_TXDESC_RTSENA;
1523 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1524 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1525 flags |= ATH9K_TXDESC_CTSENA;
1526 }
1527
Sujithc89424d2009-01-30 14:29:28 +05301528 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1529 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1530 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1531 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001532
Felix Fietkau545750d2009-11-23 22:21:01 +01001533 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1534 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1535 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1536
1537 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1538 /* MCS rates */
1539 series[i].Rate = rix | 0x80;
1540 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1541 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001542 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1543 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001544 continue;
1545 }
1546
1547 /* legcay rates */
1548 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1549 !(rate->flags & IEEE80211_RATE_ERP_G))
1550 phy = WLAN_RC_PHY_CCK;
1551 else
1552 phy = WLAN_RC_PHY_OFDM;
1553
1554 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1555 series[i].Rate = rate->hw_value;
1556 if (rate->hw_value_short) {
1557 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1558 series[i].Rate |= rate->hw_value_short;
1559 } else {
1560 is_sp = false;
1561 }
1562
1563 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1564 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001565 }
1566
Felix Fietkau27032052010-01-17 21:08:50 +01001567 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1568 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1569 flags &= ~ATH9K_TXDESC_RTSENA;
1570
1571 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1572 if (flags & ATH9K_TXDESC_RTSENA)
1573 flags &= ~ATH9K_TXDESC_CTSENA;
1574
Sujithe63835b2008-11-18 09:07:53 +05301575 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301576 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1577 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301578 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301579 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301580
Sujith17d79042009-02-09 13:27:03 +05301581 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301582 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001583}
1584
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001585static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301586 struct sk_buff *skb,
1587 struct ath_tx_control *txctl)
1588{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001589 struct ath_wiphy *aphy = hw->priv;
1590 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301591 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1592 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301593 int hdrlen;
1594 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001595 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001596 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301597
Felix Fietkau827e69b2009-11-15 23:09:25 +01001598 tx_info->pad[0] = 0;
1599 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001600 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001601 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001602 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001603 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1604 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001605 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001606 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1607 break;
1608 }
Sujithe8324352009-01-16 21:38:42 +05301609 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1610 fc = hdr->frame_control;
1611
1612 ATH_TXBUF_RESET(bf);
1613
Felix Fietkau827e69b2009-11-15 23:09:25 +01001614 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001615 bf->bf_frmlen = skb->len + FCS_LEN;
1616 /* Remove the padding size from bf_frmlen, if any */
1617 padpos = ath9k_cmn_padpos(hdr->frame_control);
1618 padsize = padpos & 3;
1619 if (padsize && skb->len>padpos+padsize) {
1620 bf->bf_frmlen -= padsize;
1621 }
Sujithe8324352009-01-16 21:38:42 +05301622
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001623 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301624 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001625 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1626 use_ldpc = true;
1627 }
Sujithe8324352009-01-16 21:38:42 +05301628
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001629 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001630 if (txctl->paprd)
1631 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001632 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301633
Luis R. Rodriguezc17512d2010-08-05 17:56:54 -04001634 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301635 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1636 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1637 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1638 } else {
1639 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1640 }
1641
Sujith17b182e2009-12-14 14:56:56 +05301642 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1643 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301644 assign_aggr_tid_seqno(skb, bf);
1645
1646 bf->bf_mpdu = skb;
1647
Ben Greearc1739eb2010-10-14 12:45:29 -07001648 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1649 skb->len, DMA_TO_DEVICE);
1650 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301651 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001652 bf->bf_buf_addr = 0;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001653 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1654 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301655 return -ENOMEM;
1656 }
1657
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001658 bf->bf_tx_aborted = false;
1659
Sujithe8324352009-01-16 21:38:42 +05301660 return 0;
1661}
1662
1663/* FIXME: tx power */
1664static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1665 struct ath_tx_control *txctl)
1666{
Sujitha22be222009-03-30 15:28:36 +05301667 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301668 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301669 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301670 struct ath_node *an = NULL;
1671 struct list_head bf_head;
1672 struct ath_desc *ds;
1673 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301674 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301675 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301676 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301677
1678 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301679 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301680
1681 INIT_LIST_HEAD(&bf_head);
1682 list_add_tail(&bf->list, &bf_head);
1683
1684 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001685 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301686
1687 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1688 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1689
1690 ath9k_hw_filltxdesc(ah, ds,
1691 skb->len, /* segment length */
1692 true, /* first segment */
1693 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001694 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001695 bf->bf_buf_addr,
1696 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301697
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001698 if (bf->bf_state.bfs_paprd)
1699 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1700
Sujithe8324352009-01-16 21:38:42 +05301701 spin_lock_bh(&txctl->txq->axq_lock);
1702
1703 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1704 tx_info->control.sta) {
1705 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1706 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1707
Sujithc37452b2009-03-09 09:31:57 +05301708 if (!ieee80211_is_data_qos(fc)) {
1709 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1710 goto tx_done;
1711 }
1712
Felix Fietkau4fdec032010-03-12 04:02:43 +01001713 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301714 /*
1715 * Try aggregation if it's a unicast data frame
1716 * and the destination is HT capable.
1717 */
1718 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1719 } else {
1720 /*
1721 * Send this frame as regular when ADDBA
1722 * exchange is neither complete nor pending.
1723 */
Sujithc37452b2009-03-09 09:31:57 +05301724 ath_tx_send_ht_normal(sc, txctl->txq,
1725 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301726 }
1727 } else {
Sujithc37452b2009-03-09 09:31:57 +05301728 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301729 }
1730
Sujithc37452b2009-03-09 09:31:57 +05301731tx_done:
Sujithe8324352009-01-16 21:38:42 +05301732 spin_unlock_bh(&txctl->txq->axq_lock);
1733}
1734
1735/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001736int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301737 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001738{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001739 struct ath_wiphy *aphy = hw->priv;
1740 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001741 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001742 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001743 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001744 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001745
Sujithe8324352009-01-16 21:38:42 +05301746 bf = ath_tx_get_buffer(sc);
1747 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001748 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301749 return -1;
1750 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001751
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001752 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301753 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001754 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001755
Sujithe8324352009-01-16 21:38:42 +05301756 /* upon ath_tx_processq() this TX queue will be resumed, we
1757 * guarantee this will happen by knowing beforehand that
1758 * we will at least have to run TX completionon one buffer
1759 * on the queue */
1760 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001761 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001762 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301763 txq->stopped = 1;
1764 }
1765 spin_unlock_bh(&txq->axq_lock);
1766
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001767 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301768
1769 return r;
1770 }
1771
Felix Fietkau97923b12010-06-12 00:33:55 -04001772 q = skb_get_queue_mapping(skb);
1773 if (q >= 4)
1774 q = 0;
1775
1776 spin_lock_bh(&txq->axq_lock);
1777 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1778 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1779 txq->stopped = 1;
1780 }
1781 spin_unlock_bh(&txq->axq_lock);
1782
Sujithe8324352009-01-16 21:38:42 +05301783 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001784
1785 return 0;
1786}
1787
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001788void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001789{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001790 struct ath_wiphy *aphy = hw->priv;
1791 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001792 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001793 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1794 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301795 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1796 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001797
Sujithe8324352009-01-16 21:38:42 +05301798 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001799
Sujithe8324352009-01-16 21:38:42 +05301800 /*
1801 * As a temporary workaround, assign seq# here; this will likely need
1802 * to be cleaned up to work better with Beacon transmission and virtual
1803 * BSSes.
1804 */
1805 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301806 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1807 sc->tx.seq_no += 0x10;
1808 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1809 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001810 }
1811
Sujithe8324352009-01-16 21:38:42 +05301812 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001813 padpos = ath9k_cmn_padpos(hdr->frame_control);
1814 padsize = padpos & 3;
1815 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301816 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001817 ath_print(common, ATH_DBG_XMIT,
1818 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301819 dev_kfree_skb_any(skb);
1820 return;
1821 }
1822 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001823 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001824 }
1825
Sujithe8324352009-01-16 21:38:42 +05301826 txctl.txq = sc->beacon.cabq;
1827
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001828 ath_print(common, ATH_DBG_XMIT,
1829 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301830
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001831 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001832 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301833 goto exit;
1834 }
1835
1836 return;
1837exit:
1838 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001839}
1840
Sujithe8324352009-01-16 21:38:42 +05301841/*****************/
1842/* TX Completion */
1843/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001844
Sujithe8324352009-01-16 21:38:42 +05301845static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001846 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001847{
Sujithe8324352009-01-16 21:38:42 +05301848 struct ieee80211_hw *hw = sc->hw;
1849 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001850 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001851 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001852 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301853
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001854 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301855
Felix Fietkau827e69b2009-11-15 23:09:25 +01001856 if (aphy)
1857 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301858
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301859 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301860 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301861
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301862 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301863 /* Frame was ACKed */
1864 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1865 }
1866
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001867 padpos = ath9k_cmn_padpos(hdr->frame_control);
1868 padsize = padpos & 3;
1869 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301870 /*
1871 * Remove MAC header padding before giving the frame back to
1872 * mac80211.
1873 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001874 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301875 skb_pull(skb, padsize);
1876 }
1877
Sujith1b04b932010-01-08 10:36:05 +05301878 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1879 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001880 ath_print(common, ATH_DBG_PS,
1881 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001882 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301883 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1884 PS_WAIT_FOR_CAB |
1885 PS_WAIT_FOR_PSPOLL_DATA |
1886 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001887 }
1888
Felix Fietkau827e69b2009-11-15 23:09:25 +01001889 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001890 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001891 else {
1892 q = skb_get_queue_mapping(skb);
1893 if (q >= 4)
1894 q = 0;
1895
1896 if (--sc->tx.pending_frames[q] < 0)
1897 sc->tx.pending_frames[q] = 0;
1898
Felix Fietkau827e69b2009-11-15 23:09:25 +01001899 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001900 }
Sujithe8324352009-01-16 21:38:42 +05301901}
1902
1903static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001904 struct ath_txq *txq, struct list_head *bf_q,
1905 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301906{
1907 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301908 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301909 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301910
Sujithe8324352009-01-16 21:38:42 +05301911 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301912 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301913
1914 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301915 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301916
1917 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301918 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301919 }
1920
Ben Greearc1739eb2010-10-14 12:45:29 -07001921 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001922 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001923
1924 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001925 if (time_after(jiffies,
1926 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001927 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001928 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001929 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001930 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001931 } else {
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001932 ath_debug_stat_tx(sc, txq, bf, ts);
Ben Greearc23cc812010-10-13 12:01:23 -07001933 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001934 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001935 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1936 * accidentally reference it later.
1937 */
1938 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301939
1940 /*
1941 * Return the list of ath_buf of this mpdu to free queue
1942 */
1943 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1944 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1945 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1946}
1947
1948static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001949 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301950{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001951 u16 seq_st = 0;
1952 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301953 int ba_index;
1954 int nbad = 0;
1955 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001956
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001957 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301958 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301959
Sujithcd3d39a2008-08-11 14:03:34 +05301960 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001961 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001962 seq_st = ts->ts_seqnum;
1963 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001964 }
1965
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001966 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301967 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1968 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1969 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001970
Sujithe8324352009-01-16 21:38:42 +05301971 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001972 }
1973
Sujithe8324352009-01-16 21:38:42 +05301974 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001975}
1976
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001977static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301978 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301979{
Sujitha22be222009-03-30 15:28:36 +05301980 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301981 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301982 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001983 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301984 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301985
Sujith95e4acb2009-03-13 08:56:09 +05301986 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001987 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301988
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001989 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301990 WARN_ON(tx_rateindex >= hw->max_rates);
1991
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001992 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301993 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001994 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001995 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301996
Björn Smedmanebd02282010-10-10 22:44:39 +02001997 BUG_ON(nbad > bf->bf_nframes);
1998
1999 tx_info->status.ampdu_len = bf->bf_nframes;
2000 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
2001 }
2002
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002003 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302004 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05302005 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002006 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01002007 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2008 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002009 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2010 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002011 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
Sujithc4288392008-11-18 09:09:30 +05302012 }
2013 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302014
Felix Fietkau545750d2009-11-23 22:21:01 +01002015 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302016 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002017 tx_info->status.rates[i].idx = -1;
2018 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302019
Felix Fietkau78c46532010-06-25 01:26:16 +02002020 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302021}
2022
Sujith059d8062009-01-16 21:38:49 +05302023static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2024{
2025 int qnum;
2026
Felix Fietkau97923b12010-06-12 00:33:55 -04002027 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2028 if (qnum == -1)
2029 return;
2030
Sujith059d8062009-01-16 21:38:49 +05302031 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002032 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07002033 if (ath_mac80211_start_queue(sc, qnum))
2034 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302035 }
2036 spin_unlock_bh(&txq->axq_lock);
2037}
2038
Sujithc4288392008-11-18 09:09:30 +05302039static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002040{
Sujithcbe61d82009-02-09 13:27:12 +05302041 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002042 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002043 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2044 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302045 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002046 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302047 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002048 int status;
2049
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002050 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2051 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2052 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002053
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002054 for (;;) {
2055 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002056 if (list_empty(&txq->axq_q)) {
2057 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002058 spin_unlock_bh(&txq->axq_lock);
2059 break;
2060 }
2061 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2062
2063 /*
2064 * There is a race condition that a BH gets scheduled
2065 * after sw writes TxE and before hw re-load the last
2066 * descriptor to get the newly chained one.
2067 * Software must keep the last DONE descriptor as a
2068 * holding descriptor - software does so by marking
2069 * it with the STALE flag.
2070 */
2071 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302072 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002073 bf_held = bf;
2074 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302075 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076 break;
2077 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002078 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302079 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002080 }
2081 }
2082
2083 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302084 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002085
Felix Fietkau29bffa92010-03-29 20:14:23 -07002086 memset(&ts, 0, sizeof(ts));
2087 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002088 if (status == -EINPROGRESS) {
2089 spin_unlock_bh(&txq->axq_lock);
2090 break;
2091 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092
2093 /*
2094 * Remove ath_buf's of the same transmit unit from txq,
2095 * however leave the last descriptor back as the holding
2096 * descriptor for hw.
2097 */
Sujitha119cc42009-03-30 15:28:38 +05302098 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002099 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002100 if (!list_is_singular(&lastbf->list))
2101 list_cut_position(&bf_head,
2102 &txq->axq_q, lastbf->list.prev);
2103
2104 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002105 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002106 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002107 if (bf_held)
2108 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109 spin_unlock_bh(&txq->axq_lock);
2110
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002111 if (bf_held)
2112 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113
Sujithcd3d39a2008-08-11 14:03:34 +05302114 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002115 /*
2116 * This frame is sent out as a single frame.
2117 * Use hardware retry status for this frame.
2118 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002119 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302120 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002121 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002122 }
Johannes Berge6a98542008-10-21 12:40:02 +02002123
Sujithcd3d39a2008-08-11 14:03:34 +05302124 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002125 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002126 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002127 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128
Sujith059d8062009-01-16 21:38:49 +05302129 ath_wake_mac80211_queue(sc, txq);
2130
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302132 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002133 ath_txq_schedule(sc, txq);
2134 spin_unlock_bh(&txq->axq_lock);
2135 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002136}
2137
Sujith305fe472009-07-23 15:32:29 +05302138static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002139{
2140 struct ath_softc *sc = container_of(work, struct ath_softc,
2141 tx_complete_work.work);
2142 struct ath_txq *txq;
2143 int i;
2144 bool needreset = false;
2145
2146 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2147 if (ATH_TXQ_SETUP(sc, i)) {
2148 txq = &sc->tx.txq[i];
2149 spin_lock_bh(&txq->axq_lock);
2150 if (txq->axq_depth) {
2151 if (txq->axq_tx_inprogress) {
2152 needreset = true;
2153 spin_unlock_bh(&txq->axq_lock);
2154 break;
2155 } else {
2156 txq->axq_tx_inprogress = true;
2157 }
2158 }
2159 spin_unlock_bh(&txq->axq_lock);
2160 }
2161
2162 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002163 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2164 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302165 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002166 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302167 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002168 }
2169
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002170 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002171 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2172}
2173
2174
Sujithe8324352009-01-16 21:38:42 +05302175
2176void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177{
Sujithe8324352009-01-16 21:38:42 +05302178 int i;
2179 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002180
Sujithe8324352009-01-16 21:38:42 +05302181 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002182
2183 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302184 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2185 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002186 }
2187}
2188
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002189void ath_tx_edma_tasklet(struct ath_softc *sc)
2190{
2191 struct ath_tx_status txs;
2192 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2193 struct ath_hw *ah = sc->sc_ah;
2194 struct ath_txq *txq;
2195 struct ath_buf *bf, *lastbf;
2196 struct list_head bf_head;
2197 int status;
2198 int txok;
2199
2200 for (;;) {
2201 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2202 if (status == -EINPROGRESS)
2203 break;
2204 if (status == -EIO) {
2205 ath_print(common, ATH_DBG_XMIT,
2206 "Error processing tx status\n");
2207 break;
2208 }
2209
2210 /* Skip beacon completions */
2211 if (txs.qid == sc->beacon.beaconq)
2212 continue;
2213
2214 txq = &sc->tx.txq[txs.qid];
2215
2216 spin_lock_bh(&txq->axq_lock);
2217 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2218 spin_unlock_bh(&txq->axq_lock);
2219 return;
2220 }
2221
2222 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2223 struct ath_buf, list);
2224 lastbf = bf->bf_lastbf;
2225
2226 INIT_LIST_HEAD(&bf_head);
2227 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2228 &lastbf->list);
2229 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2230 txq->axq_depth--;
2231 txq->axq_tx_inprogress = false;
2232 spin_unlock_bh(&txq->axq_lock);
2233
2234 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2235
2236 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002237 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2238 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002239 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002240 }
2241
2242 if (bf_isampdu(bf))
2243 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2244 else
2245 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2246 &txs, txok, 0);
2247
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002248 ath_wake_mac80211_queue(sc, txq);
2249
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002250 spin_lock_bh(&txq->axq_lock);
2251 if (!list_empty(&txq->txq_fifo_pending)) {
2252 INIT_LIST_HEAD(&bf_head);
2253 bf = list_first_entry(&txq->txq_fifo_pending,
2254 struct ath_buf, list);
2255 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2256 &bf->bf_lastbf->list);
2257 ath_tx_txqaddbuf(sc, txq, &bf_head);
2258 } else if (sc->sc_flags & SC_OP_TXAGGR)
2259 ath_txq_schedule(sc, txq);
2260 spin_unlock_bh(&txq->axq_lock);
2261 }
2262}
2263
Sujithe8324352009-01-16 21:38:42 +05302264/*****************/
2265/* Init, Cleanup */
2266/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002267
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002268static int ath_txstatus_setup(struct ath_softc *sc, int size)
2269{
2270 struct ath_descdma *dd = &sc->txsdma;
2271 u8 txs_len = sc->sc_ah->caps.txs_len;
2272
2273 dd->dd_desc_len = size * txs_len;
2274 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2275 &dd->dd_desc_paddr, GFP_KERNEL);
2276 if (!dd->dd_desc)
2277 return -ENOMEM;
2278
2279 return 0;
2280}
2281
2282static int ath_tx_edma_init(struct ath_softc *sc)
2283{
2284 int err;
2285
2286 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2287 if (!err)
2288 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2289 sc->txsdma.dd_desc_paddr,
2290 ATH_TXSTATUS_RING_SIZE);
2291
2292 return err;
2293}
2294
2295static void ath_tx_edma_cleanup(struct ath_softc *sc)
2296{
2297 struct ath_descdma *dd = &sc->txsdma;
2298
2299 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2300 dd->dd_desc_paddr);
2301}
2302
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002303int ath_tx_init(struct ath_softc *sc, int nbufs)
2304{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002305 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002306 int error = 0;
2307
Sujith797fe5c2009-03-30 15:28:45 +05302308 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002309
Sujith797fe5c2009-03-30 15:28:45 +05302310 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002311 "tx", nbufs, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302312 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002313 ath_print(common, ATH_DBG_FATAL,
2314 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302315 goto err;
2316 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002317
Sujith797fe5c2009-03-30 15:28:45 +05302318 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002319 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5c2009-03-30 15:28:45 +05302320 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002321 ath_print(common, ATH_DBG_FATAL,
2322 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5c2009-03-30 15:28:45 +05302323 goto err;
2324 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002325
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002326 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2327
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002328 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2329 error = ath_tx_edma_init(sc);
2330 if (error)
2331 goto err;
2332 }
2333
Sujith797fe5c2009-03-30 15:28:45 +05302334err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002335 if (error != 0)
2336 ath_tx_cleanup(sc);
2337
2338 return error;
2339}
2340
Sujith797fe5c2009-03-30 15:28:45 +05302341void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002342{
Sujithb77f4832008-12-07 21:44:03 +05302343 if (sc->beacon.bdma.dd_desc_len != 0)
2344 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002345
Sujithb77f4832008-12-07 21:44:03 +05302346 if (sc->tx.txdma.dd_desc_len != 0)
2347 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002348
2349 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2350 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002351}
2352
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002353void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2354{
Sujithc5170162008-10-29 10:13:59 +05302355 struct ath_atx_tid *tid;
2356 struct ath_atx_ac *ac;
2357 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002358
Sujith8ee5afb2008-12-07 21:43:36 +05302359 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302360 tidno < WME_NUM_TID;
2361 tidno++, tid++) {
2362 tid->an = an;
2363 tid->tidno = tidno;
2364 tid->seq_start = tid->seq_next = 0;
2365 tid->baw_size = WME_MAX_BA;
2366 tid->baw_head = tid->baw_tail = 0;
2367 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302368 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302369 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302370 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302371 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302372 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302373 tid->state &= ~AGGR_ADDBA_COMPLETE;
2374 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302375 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376
Sujith8ee5afb2008-12-07 21:43:36 +05302377 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302378 acno < WME_NUM_AC; acno++, ac++) {
2379 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002380 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302381 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002382 }
2383}
2384
Sujithb5aa9bf2008-10-29 10:13:31 +05302385void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002386{
Felix Fietkau2b409942010-07-07 19:42:08 +02002387 struct ath_atx_ac *ac;
2388 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389 struct ath_txq *txq;
Felix Fietkau2b409942010-07-07 19:42:08 +02002390 int i, tidno;
Sujithe8324352009-01-16 21:38:42 +05302391
Felix Fietkau2b409942010-07-07 19:42:08 +02002392 for (tidno = 0, tid = &an->tid[tidno];
2393 tidno < WME_NUM_TID; tidno++, tid++) {
2394 i = tid->ac->qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002395
Felix Fietkau2b409942010-07-07 19:42:08 +02002396 if (!ATH_TXQ_SETUP(sc, i))
2397 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002398
Felix Fietkau2b409942010-07-07 19:42:08 +02002399 txq = &sc->tx.txq[i];
2400 ac = tid->ac;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401
Felix Fietkau2b409942010-07-07 19:42:08 +02002402 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002403
Felix Fietkau2b409942010-07-07 19:42:08 +02002404 if (tid->sched) {
2405 list_del(&tid->list);
2406 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002408
2409 if (ac->sched) {
2410 list_del(&ac->list);
2411 tid->ac->sched = false;
2412 }
2413
2414 ath_tid_drain(sc, txq, tid);
2415 tid->state &= ~AGGR_ADDBA_COMPLETE;
2416 tid->state &= ~AGGR_CLEANUP;
2417
2418 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002419 }
2420}