blob: 5323a4d9ebb8c46a01fc4bb5358f7848ee820c04 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
Sujithe8324352009-01-16 21:38:42 +0530123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
126
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200127 WARN_ON(!tid->paused);
128
Sujithe8324352009-01-16 21:38:42 +0530129 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200130 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530131
132 if (list_empty(&tid->buf_q))
133 goto unlock;
134
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
139}
140
141static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
142{
143 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
144 struct ath_buf *bf;
145 struct list_head bf_head;
146 INIT_LIST_HEAD(&bf_head);
147
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200148 WARN_ON(!tid->paused);
149
Sujithe8324352009-01-16 21:38:42 +0530150 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200151 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530152
153 while (!list_empty(&tid->buf_q)) {
154 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700155 BUG_ON(bf_isretried(bf));
Sujithd43f30152009-01-16 21:38:53 +0530156 list_move_tail(&bf->list, &bf_head);
Sujithc37452b2009-03-09 09:31:57 +0530157 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530158 }
159
160 spin_unlock_bh(&txq->axq_lock);
161}
162
163static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
164 int seqno)
165{
166 int index, cindex;
167
168 index = ATH_BA_INDEX(tid->seq_start, seqno);
169 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
170
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200171 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530172
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200173 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530174 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
175 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
176 }
177}
178
179static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 struct ath_buf *bf)
181{
182 int index, cindex;
183
184 if (bf_isretried(bf))
185 return;
186
187 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
188 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200189 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530190
191 if (index >= ((tid->baw_tail - tid->baw_head) &
192 (ATH_TID_MAX_BUFS - 1))) {
193 tid->baw_tail = cindex;
194 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
195 }
196}
197
198/*
199 * TODO: For frame(s) that are in the retry state, we will reuse the
200 * sequence number(s) without setting the retry bit. The
201 * alternative is to give up on these and BAR the receiver's window
202 * forward.
203 */
204static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
205 struct ath_atx_tid *tid)
206
207{
208 struct ath_buf *bf;
209 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700210 struct ath_tx_status ts;
211
212 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530213 INIT_LIST_HEAD(&bf_head);
214
215 for (;;) {
216 if (list_empty(&tid->buf_q))
217 break;
Sujithe8324352009-01-16 21:38:42 +0530218
Sujithd43f30152009-01-16 21:38:53 +0530219 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
220 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530221
222 if (bf_isretried(bf))
223 ath_tx_update_baw(sc, tid, bf->bf_seqno);
224
225 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700226 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530227 spin_lock(&txq->axq_lock);
228 }
229
230 tid->seq_next = tid->seq_start;
231 tid->baw_tail = tid->baw_head;
232}
233
Sujithfec247c2009-07-27 12:08:16 +0530234static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
235 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530236{
237 struct sk_buff *skb;
238 struct ieee80211_hdr *hdr;
239
240 bf->bf_state.bf_type |= BUF_RETRY;
241 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530242 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530243
244 skb = bf->bf_mpdu;
245 hdr = (struct ieee80211_hdr *)skb->data;
246 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
247}
248
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200249static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
250{
251 struct ath_buf *bf = NULL;
252
253 spin_lock_bh(&sc->tx.txbuflock);
254
255 if (unlikely(list_empty(&sc->tx.txbuf))) {
256 spin_unlock_bh(&sc->tx.txbuflock);
257 return NULL;
258 }
259
260 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
261 list_del(&bf->list);
262
263 spin_unlock_bh(&sc->tx.txbuflock);
264
265 return bf;
266}
267
268static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
269{
270 spin_lock_bh(&sc->tx.txbuflock);
271 list_add_tail(&bf->list, &sc->tx.txbuf);
272 spin_unlock_bh(&sc->tx.txbuflock);
273}
274
Sujithd43f30152009-01-16 21:38:53 +0530275static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
276{
277 struct ath_buf *tbf;
278
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200279 tbf = ath_tx_get_buffer(sc);
280 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530281 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530282
283 ATH_TXBUF_RESET(tbf);
284
Felix Fietkau827e69b2009-11-15 23:09:25 +0100285 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530286 tbf->bf_mpdu = bf->bf_mpdu;
287 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400288 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530289 tbf->bf_state = bf->bf_state;
290 tbf->bf_dmacontext = bf->bf_dmacontext;
291
292 return tbf;
293}
294
295static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
296 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700297 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530298{
299 struct ath_node *an = NULL;
300 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530301 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800302 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530303 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800304 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530305 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530306 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530307 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530308 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530309 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530310 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
311 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200312 struct ieee80211_tx_rate rates[4];
Sujithe8324352009-01-16 21:38:42 +0530313
Sujitha22be222009-03-30 15:28:36 +0530314 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530315 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530316
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800317 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100318 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800319
Felix Fietkau78c46532010-06-25 01:26:16 +0200320 memcpy(rates, tx_info->control.rates, sizeof(rates));
321
Sujith1286ec62009-01-27 13:30:37 +0530322 rcu_read_lock();
323
Johannes Berg5ed176e2009-11-04 14:42:28 +0100324 /* XXX: use ieee80211_find_sta! */
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800325 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
Sujith1286ec62009-01-27 13:30:37 +0530326 if (!sta) {
327 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200328
Felix Fietkau31e79a52010-07-12 23:16:34 +0200329 INIT_LIST_HEAD(&bf_head);
330 while (bf) {
331 bf_next = bf->bf_next;
332
333 bf->bf_state.bf_type |= BUF_XRETRY;
334 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
335 !bf->bf_stale || bf_next != NULL)
336 list_move_tail(&bf->list, &bf_head);
337
338 ath_tx_rc_status(bf, ts, 0, 0, false);
339 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
340 0, 0);
341
342 bf = bf_next;
343 }
Sujith1286ec62009-01-27 13:30:37 +0530344 return;
Sujithe8324352009-01-16 21:38:42 +0530345 }
346
Sujith1286ec62009-01-27 13:30:37 +0530347 an = (struct ath_node *)sta->drv_priv;
348 tid = ATH_AN_2_TID(an, bf->bf_tidno);
349
Felix Fietkaub11b1602010-07-11 12:48:44 +0200350 /*
351 * The hardware occasionally sends a tx status for the wrong TID.
352 * In this case, the BA status cannot be considered valid and all
353 * subframes need to be retransmitted
354 */
355 if (bf->bf_tidno != ts->tid)
356 txok = false;
357
Sujithe8324352009-01-16 21:38:42 +0530358 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530359 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530360
Sujithd43f30152009-01-16 21:38:53 +0530361 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700362 if (ts->ts_flags & ATH9K_TX_BA) {
363 seq_st = ts->ts_seqnum;
364 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530365 } else {
Sujithd43f30152009-01-16 21:38:53 +0530366 /*
367 * AR5416 can become deaf/mute when BA
368 * issue happens. Chip needs to be reset.
369 * But AP code may have sychronization issues
370 * when perform internal reset in this routine.
371 * Only enable reset in STA mode for now.
372 */
Sujith2660b812009-02-09 13:27:26 +0530373 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530374 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530375 }
376 }
377
378 INIT_LIST_HEAD(&bf_pending);
379 INIT_LIST_HEAD(&bf_head);
380
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700381 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530382 while (bf) {
383 txfail = txpending = 0;
384 bf_next = bf->bf_next;
385
Felix Fietkau78c46532010-06-25 01:26:16 +0200386 skb = bf->bf_mpdu;
387 tx_info = IEEE80211_SKB_CB(skb);
388
Sujithe8324352009-01-16 21:38:42 +0530389 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
390 /* transmit completion, subframe is
391 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530392 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530393 } else if (!isaggr && txok) {
394 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530395 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530396 } else {
Sujithe8324352009-01-16 21:38:42 +0530397 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400398 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530399 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530400 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530401 txpending = 1;
402 } else {
403 bf->bf_state.bf_type |= BUF_XRETRY;
404 txfail = 1;
405 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530406 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530407 }
408 } else {
409 /*
410 * cleanup in progress, just fail
411 * the un-acked sub-frames
412 */
413 txfail = 1;
414 }
415 }
416
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400417 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
418 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530419 /*
420 * Make sure the last desc is reclaimed if it
421 * not a holding desc.
422 */
423 if (!bf_last->bf_stale)
424 list_move_tail(&bf->list, &bf_head);
425 else
426 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530427 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700428 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530429 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530430 }
431
432 if (!txpending) {
433 /*
434 * complete the acked-ones/xretried ones; update
435 * block-ack window
436 */
437 spin_lock_bh(&txq->axq_lock);
438 ath_tx_update_baw(sc, tid, bf->bf_seqno);
439 spin_unlock_bh(&txq->axq_lock);
440
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530441 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200442 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700443 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530444 rc_update = false;
445 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700446 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530447 }
448
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700449 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
450 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530451 } else {
Sujithd43f30152009-01-16 21:38:53 +0530452 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400453 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
454 if (bf->bf_next == NULL && bf_last->bf_stale) {
455 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530456
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400457 tbf = ath_clone_txbuf(sc, bf_last);
458 /*
459 * Update tx baw and complete the
460 * frame with failed status if we
461 * run out of tx buf.
462 */
463 if (!tbf) {
464 spin_lock_bh(&txq->axq_lock);
465 ath_tx_update_baw(sc, tid,
466 bf->bf_seqno);
467 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400468
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400469 bf->bf_state.bf_type |=
470 BUF_XRETRY;
471 ath_tx_rc_status(bf, ts, nbad,
472 0, false);
473 ath_tx_complete_buf(sc, bf, txq,
474 &bf_head,
475 ts, 0, 0);
476 break;
477 }
478
479 ath9k_hw_cleartxdesc(sc->sc_ah,
480 tbf->bf_desc);
481 list_add_tail(&tbf->list, &bf_head);
482 } else {
483 /*
484 * Clear descriptor status words for
485 * software retry
486 */
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400489 }
Sujithe8324352009-01-16 21:38:42 +0530490 }
491
492 /*
493 * Put this buffer to the temporary pending
494 * queue to retain ordering
495 */
496 list_splice_tail_init(&bf_head, &bf_pending);
497 }
498
499 bf = bf_next;
500 }
501
Felix Fietkau4cee7862010-07-23 03:53:16 +0200502 /* prepend un-acked frames to the beginning of the pending frame queue */
503 if (!list_empty(&bf_pending)) {
504 spin_lock_bh(&txq->axq_lock);
505 list_splice(&bf_pending, &tid->buf_q);
506 ath_tx_queue_tid(txq, tid);
507 spin_unlock_bh(&txq->axq_lock);
508 }
509
Sujithe8324352009-01-16 21:38:42 +0530510 if (tid->state & AGGR_CLEANUP) {
Sujithe8324352009-01-16 21:38:42 +0530511 if (tid->baw_head == tid->baw_tail) {
512 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530513 tid->state &= ~AGGR_CLEANUP;
514
515 /* send buffered frames as singles */
516 ath_tx_flush_tid(sc, tid);
Sujithd43f30152009-01-16 21:38:53 +0530517 }
Sujith1286ec62009-01-27 13:30:37 +0530518 rcu_read_unlock();
Sujithe8324352009-01-16 21:38:42 +0530519 return;
520 }
521
Sujith1286ec62009-01-27 13:30:37 +0530522 rcu_read_unlock();
523
Sujithe8324352009-01-16 21:38:42 +0530524 if (needreset)
525 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530526}
527
528static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
529 struct ath_atx_tid *tid)
530{
Sujithe8324352009-01-16 21:38:42 +0530531 struct sk_buff *skb;
532 struct ieee80211_tx_info *tx_info;
533 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530534 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530535 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530536 int i;
537
Sujitha22be222009-03-30 15:28:36 +0530538 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530539 tx_info = IEEE80211_SKB_CB(skb);
540 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530541
542 /*
543 * Find the lowest frame length among the rate series that will have a
544 * 4ms transmit duration.
545 * TODO - TXOP limit needs to be considered.
546 */
547 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
548
549 for (i = 0; i < 4; i++) {
550 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100551 int modeidx;
552 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530553 legacy = 1;
554 break;
555 }
556
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200557 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100558 modeidx = MCS_HT40;
559 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200560 modeidx = MCS_HT20;
561
562 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
563 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100564
565 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530566 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530567 }
568 }
569
570 /*
571 * limit aggregate size by the minimum rate if rate selected is
572 * not a probe rate, if rate selected is a probe rate then
573 * avoid aggregation of this packet.
574 */
575 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
576 return 0;
577
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530578 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
579 aggr_limit = min((max_4ms_framelen * 3) / 8,
580 (u32)ATH_AMPDU_LIMIT_MAX);
581 else
582 aggr_limit = min(max_4ms_framelen,
583 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530584
585 /*
586 * h/w can accept aggregates upto 16 bit lengths (65535).
587 * The IE, however can hold upto 65536, which shows up here
588 * as zero. Ignore 65536 since we are constrained by hw.
589 */
Sujith4ef70842009-07-23 15:32:41 +0530590 if (tid->an->maxampdu)
591 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530592
593 return aggr_limit;
594}
595
596/*
Sujithd43f30152009-01-16 21:38:53 +0530597 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530598 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530599 */
600static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
601 struct ath_buf *bf, u16 frmlen)
602{
Sujithe8324352009-01-16 21:38:42 +0530603 struct sk_buff *skb = bf->bf_mpdu;
604 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530605 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530606 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100607 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200608 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530609
610 /* Select standard number of delimiters based on frame length alone */
611 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
612
613 /*
614 * If encryption enabled, hardware requires some more padding between
615 * subframes.
616 * TODO - this could be improved to be dependent on the rate.
617 * The hardware can keep up at lower rates, but not higher rates
618 */
619 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
620 ndelim += ATH_AGGR_ENCRYPTDELIM;
621
622 /*
623 * Convert desired mpdu density from microeconds to bytes based
624 * on highest rate in rate series (i.e. first rate) to determine
625 * required minimum length for subframe. Take into account
626 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530627 *
Sujithe8324352009-01-16 21:38:42 +0530628 * If there is no mpdu density restriction, no further calculation
629 * is needed.
630 */
Sujith4ef70842009-07-23 15:32:41 +0530631
632 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530633 return ndelim;
634
635 rix = tx_info->control.rates[0].idx;
636 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530637 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
638 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
639
640 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530641 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530642 else
Sujith4ef70842009-07-23 15:32:41 +0530643 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530644
645 if (nsymbols == 0)
646 nsymbols = 1;
647
Felix Fietkauc6663872010-04-19 19:57:33 +0200648 streams = HT_RC_2_STREAMS(rix);
649 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530650 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
651
Sujithe8324352009-01-16 21:38:42 +0530652 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530653 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
654 ndelim = max(mindelim, ndelim);
655 }
656
657 return ndelim;
658}
659
660static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530661 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530662 struct ath_atx_tid *tid,
663 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530664{
665#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530666 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
667 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530668 u16 aggr_limit = 0, al = 0, bpad = 0,
669 al_delta, h_baw = tid->baw_size / 2;
670 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujithe8324352009-01-16 21:38:42 +0530671
672 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
673
674 do {
675 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
676
Sujithd43f30152009-01-16 21:38:53 +0530677 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530678 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
679 status = ATH_AGGR_BAW_CLOSED;
680 break;
681 }
682
683 if (!rl) {
684 aggr_limit = ath_lookup_rate(sc, bf, tid);
685 rl = 1;
686 }
687
Sujithd43f30152009-01-16 21:38:53 +0530688 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530689 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
690
Sujithd43f30152009-01-16 21:38:53 +0530691 if (nframes &&
692 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530693 status = ATH_AGGR_LIMITED;
694 break;
695 }
696
Sujithd43f30152009-01-16 21:38:53 +0530697 /* do not exceed subframe limit */
698 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530699 status = ATH_AGGR_LIMITED;
700 break;
701 }
Sujithd43f30152009-01-16 21:38:53 +0530702 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530703
Sujithd43f30152009-01-16 21:38:53 +0530704 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530705 al += bpad + al_delta;
706
707 /*
708 * Get the delimiters needed to meet the MPDU
709 * density for this node.
710 */
711 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530712 bpad = PADBYTES(al_delta) + (ndelim << 2);
713
714 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400715 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530716
Sujithd43f30152009-01-16 21:38:53 +0530717 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530718 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530719 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
720 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530721 if (bf_prev) {
722 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400723 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
724 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530725 }
726 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530727
Sujithe8324352009-01-16 21:38:42 +0530728 } while (!list_empty(&tid->buf_q));
729
730 bf_first->bf_al = al;
731 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530732
Sujithe8324352009-01-16 21:38:42 +0530733 return status;
734#undef PADBYTES
735}
736
737static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
738 struct ath_atx_tid *tid)
739{
Sujithd43f30152009-01-16 21:38:53 +0530740 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530741 enum ATH_AGGR_STATUS status;
742 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530743
744 do {
745 if (list_empty(&tid->buf_q))
746 return;
747
748 INIT_LIST_HEAD(&bf_q);
749
Sujithfec247c2009-07-27 12:08:16 +0530750 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530751
752 /*
Sujithd43f30152009-01-16 21:38:53 +0530753 * no frames picked up to be aggregated;
754 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530755 */
756 if (list_empty(&bf_q))
757 break;
758
759 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530760 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530761
Sujithd43f30152009-01-16 21:38:53 +0530762 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530763 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530764 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530765 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530766 ath_buf_set_rate(sc, bf);
767 ath_tx_txqaddbuf(sc, txq, &bf_q);
768 continue;
769 }
770
Sujithd43f30152009-01-16 21:38:53 +0530771 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530772 bf->bf_state.bf_type |= BUF_AGGR;
773 ath_buf_set_rate(sc, bf);
774 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
775
Sujithd43f30152009-01-16 21:38:53 +0530776 /* anchor last desc of aggregate */
777 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530778
Sujithe8324352009-01-16 21:38:42 +0530779 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530780 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530781
782 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
783 status != ATH_AGGR_BAW_CLOSED);
784}
785
Sujithf83da962009-07-23 15:32:37 +0530786void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
787 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530788{
789 struct ath_atx_tid *txtid;
790 struct ath_node *an;
791
792 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530793 txtid = ATH_AN_2_TID(an, tid);
794 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200795 txtid->paused = true;
Sujithf83da962009-07-23 15:32:37 +0530796 *ssn = txtid->seq_start;
Sujithe8324352009-01-16 21:38:42 +0530797}
798
Sujithf83da962009-07-23 15:32:37 +0530799void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530800{
801 struct ath_node *an = (struct ath_node *)sta->drv_priv;
802 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
803 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700804 struct ath_tx_status ts;
Sujithe8324352009-01-16 21:38:42 +0530805 struct ath_buf *bf;
806 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700807
808 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530809 INIT_LIST_HEAD(&bf_head);
810
811 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530812 return;
Sujithe8324352009-01-16 21:38:42 +0530813
814 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530815 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530816 return;
Sujithe8324352009-01-16 21:38:42 +0530817 }
818
Sujithe8324352009-01-16 21:38:42 +0530819 /* drop all software retried frames and mark this TID */
820 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200821 txtid->paused = true;
Sujithe8324352009-01-16 21:38:42 +0530822 while (!list_empty(&txtid->buf_q)) {
823 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
824 if (!bf_isretried(bf)) {
825 /*
826 * NB: it's based on the assumption that
827 * software retried frame will always stay
828 * at the head of software queue.
829 */
830 break;
831 }
Sujithd43f30152009-01-16 21:38:53 +0530832 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530833 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700834 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530835 }
Sujithd43f30152009-01-16 21:38:53 +0530836 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530837
838 if (txtid->baw_head != txtid->baw_tail) {
Sujithe8324352009-01-16 21:38:42 +0530839 txtid->state |= AGGR_CLEANUP;
840 } else {
841 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530842 ath_tx_flush_tid(sc, txtid);
843 }
Sujithe8324352009-01-16 21:38:42 +0530844}
845
846void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
852
853 if (sc->sc_flags & SC_OP_TXAGGR) {
854 txtid = ATH_AN_2_TID(an, tid);
855 txtid->baw_size =
856 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
857 txtid->state |= AGGR_ADDBA_COMPLETE;
858 txtid->state &= ~AGGR_ADDBA_PROGRESS;
859 ath_tx_resume_tid(sc, txtid);
860 }
861}
862
863bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
864{
865 struct ath_atx_tid *txtid;
866
867 if (!(sc->sc_flags & SC_OP_TXAGGR))
868 return false;
869
870 txtid = ATH_AN_2_TID(an, tidno);
871
Vasanthakumar Thiagarajanc3d8f022009-06-10 17:50:08 +0530872 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
Sujithe8324352009-01-16 21:38:42 +0530873 return true;
Sujithe8324352009-01-16 21:38:42 +0530874 return false;
875}
876
877/********************/
878/* Queue Management */
879/********************/
880
Sujithe8324352009-01-16 21:38:42 +0530881static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
882 struct ath_txq *txq)
883{
884 struct ath_atx_ac *ac, *ac_tmp;
885 struct ath_atx_tid *tid, *tid_tmp;
886
887 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
888 list_del(&ac->list);
889 ac->sched = false;
890 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
891 list_del(&tid->list);
892 tid->sched = false;
893 ath_tid_drain(sc, txq, tid);
894 }
895 }
896}
897
898struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
899{
Sujithcbe61d82009-02-09 13:27:12 +0530900 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700901 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530902 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400903 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530904
905 memset(&qi, 0, sizeof(qi));
906 qi.tqi_subtype = subtype;
907 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
908 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
909 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
910 qi.tqi_physCompBuf = 0;
911
912 /*
913 * Enable interrupts only for EOL and DESC conditions.
914 * We mark tx descriptors to receive a DESC interrupt
915 * when a tx queue gets deep; otherwise waiting for the
916 * EOL to reap descriptors. Note that this is done to
917 * reduce interrupt load and this only defers reaping
918 * descriptors, never transmitting frames. Aside from
919 * reducing interrupts this also permits more concurrency.
920 * The only potential downside is if the tx queue backs
921 * up in which case the top half of the kernel may backup
922 * due to a lack of tx descriptors.
923 *
924 * The UAPSD queue is an exception, since we take a desc-
925 * based intr on the EOSP frames.
926 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400927 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
928 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
929 TXQ_FLAG_TXERRINT_ENABLE;
930 } else {
931 if (qtype == ATH9K_TX_QUEUE_UAPSD)
932 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
933 else
934 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
935 TXQ_FLAG_TXDESCINT_ENABLE;
936 }
Sujithe8324352009-01-16 21:38:42 +0530937 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
938 if (qnum == -1) {
939 /*
940 * NB: don't print a message, this happens
941 * normally on parts with too few tx queues
942 */
943 return NULL;
944 }
945 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700946 ath_print(common, ATH_DBG_FATAL,
947 "qnum %u out of range, max %u!\n",
948 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530949 ath9k_hw_releasetxqueue(ah, qnum);
950 return NULL;
951 }
952 if (!ATH_TXQ_SETUP(sc, qnum)) {
953 struct ath_txq *txq = &sc->tx.txq[qnum];
954
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400955 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530956 txq->axq_qnum = qnum;
957 txq->axq_link = NULL;
958 INIT_LIST_HEAD(&txq->axq_q);
959 INIT_LIST_HEAD(&txq->axq_acq);
960 spin_lock_init(&txq->axq_lock);
961 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400962 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530963 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400964
965 txq->txq_headidx = txq->txq_tailidx = 0;
966 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
967 INIT_LIST_HEAD(&txq->txq_fifo[i]);
968 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530969 }
970 return &sc->tx.txq[qnum];
971}
972
Sujithe8324352009-01-16 21:38:42 +0530973int ath_txq_update(struct ath_softc *sc, int qnum,
974 struct ath9k_tx_queue_info *qinfo)
975{
Sujithcbe61d82009-02-09 13:27:12 +0530976 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530977 int error = 0;
978 struct ath9k_tx_queue_info qi;
979
980 if (qnum == sc->beacon.beaconq) {
981 /*
982 * XXX: for beacon queue, we just save the parameter.
983 * It will be picked up by ath_beaconq_config when
984 * it's necessary.
985 */
986 sc->beacon.beacon_qi = *qinfo;
987 return 0;
988 }
989
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700990 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530991
992 ath9k_hw_get_txq_props(ah, qnum, &qi);
993 qi.tqi_aifs = qinfo->tqi_aifs;
994 qi.tqi_cwmin = qinfo->tqi_cwmin;
995 qi.tqi_cwmax = qinfo->tqi_cwmax;
996 qi.tqi_burstTime = qinfo->tqi_burstTime;
997 qi.tqi_readyTime = qinfo->tqi_readyTime;
998
999 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001000 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1001 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301002 error = -EIO;
1003 } else {
1004 ath9k_hw_resettxqueue(ah, qnum);
1005 }
1006
1007 return error;
1008}
1009
1010int ath_cabq_update(struct ath_softc *sc)
1011{
1012 struct ath9k_tx_queue_info qi;
1013 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301014
1015 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1016 /*
1017 * Ensure the readytime % is within the bounds.
1018 */
Sujith17d79042009-02-09 13:27:03 +05301019 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1020 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1021 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1022 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301023
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001024 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301025 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301026 ath_txq_update(sc, qnum, &qi);
1027
1028 return 0;
1029}
1030
Sujith043a0402009-01-16 21:38:47 +05301031/*
1032 * Drain a given TX queue (could be Beacon or Data)
1033 *
1034 * This assumes output has been stopped and
1035 * we do not need to block ath_tx_tasklet.
1036 */
1037void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301038{
1039 struct ath_buf *bf, *lastbf;
1040 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001041 struct ath_tx_status ts;
1042
1043 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301044 INIT_LIST_HEAD(&bf_head);
1045
Sujithe8324352009-01-16 21:38:42 +05301046 for (;;) {
1047 spin_lock_bh(&txq->axq_lock);
1048
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001049 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1050 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1051 txq->txq_headidx = txq->txq_tailidx = 0;
1052 spin_unlock_bh(&txq->axq_lock);
1053 break;
1054 } else {
1055 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1056 struct ath_buf, list);
1057 }
1058 } else {
1059 if (list_empty(&txq->axq_q)) {
1060 txq->axq_link = NULL;
1061 spin_unlock_bh(&txq->axq_lock);
1062 break;
1063 }
1064 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1065 list);
Sujithe8324352009-01-16 21:38:42 +05301066
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001067 if (bf->bf_stale) {
1068 list_del(&bf->list);
1069 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301070
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001071 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001072 continue;
1073 }
Sujithe8324352009-01-16 21:38:42 +05301074 }
1075
1076 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001077 if (!retry_tx)
1078 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301079
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001080 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1081 list_cut_position(&bf_head,
1082 &txq->txq_fifo[txq->txq_tailidx],
1083 &lastbf->list);
1084 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1085 } else {
1086 /* remove ath_buf's of the same mpdu from txq */
1087 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1088 }
1089
Sujithe8324352009-01-16 21:38:42 +05301090 txq->axq_depth--;
1091
1092 spin_unlock_bh(&txq->axq_lock);
1093
1094 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001095 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301096 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001097 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301098 }
1099
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001100 spin_lock_bh(&txq->axq_lock);
1101 txq->axq_tx_inprogress = false;
1102 spin_unlock_bh(&txq->axq_lock);
1103
Sujithe8324352009-01-16 21:38:42 +05301104 /* flush any pending frames if aggregation is enabled */
1105 if (sc->sc_flags & SC_OP_TXAGGR) {
1106 if (!retry_tx) {
1107 spin_lock_bh(&txq->axq_lock);
1108 ath_txq_drain_pending_buffers(sc, txq);
1109 spin_unlock_bh(&txq->axq_lock);
1110 }
1111 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001112
1113 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1114 spin_lock_bh(&txq->axq_lock);
1115 while (!list_empty(&txq->txq_fifo_pending)) {
1116 bf = list_first_entry(&txq->txq_fifo_pending,
1117 struct ath_buf, list);
1118 list_cut_position(&bf_head,
1119 &txq->txq_fifo_pending,
1120 &bf->bf_lastbf->list);
1121 spin_unlock_bh(&txq->axq_lock);
1122
1123 if (bf_isampdu(bf))
1124 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1125 &ts, 0);
1126 else
1127 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1128 &ts, 0, 0);
1129 spin_lock_bh(&txq->axq_lock);
1130 }
1131 spin_unlock_bh(&txq->axq_lock);
1132 }
Sujithe8324352009-01-16 21:38:42 +05301133}
1134
Sujith043a0402009-01-16 21:38:47 +05301135void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1136{
Sujithcbe61d82009-02-09 13:27:12 +05301137 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001138 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301139 struct ath_txq *txq;
1140 int i, npend = 0;
1141
1142 if (sc->sc_flags & SC_OP_INVALID)
1143 return;
1144
1145 /* Stop beacon queue */
1146 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1147
1148 /* Stop data queues */
1149 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1150 if (ATH_TXQ_SETUP(sc, i)) {
1151 txq = &sc->tx.txq[i];
1152 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1153 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1154 }
1155 }
1156
1157 if (npend) {
1158 int r;
1159
Sujithe8009e92009-12-14 14:57:08 +05301160 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001161 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301162
1163 spin_lock_bh(&sc->sc_resetlock);
Felix Fietkau20bd2a02010-07-31 00:12:00 +02001164 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
Sujith043a0402009-01-16 21:38:47 +05301165 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001166 ath_print(common, ATH_DBG_FATAL,
1167 "Unable to reset hardware; reset status %d\n",
1168 r);
Sujith043a0402009-01-16 21:38:47 +05301169 spin_unlock_bh(&sc->sc_resetlock);
1170 }
1171
1172 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1173 if (ATH_TXQ_SETUP(sc, i))
1174 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1175 }
1176}
1177
Sujithe8324352009-01-16 21:38:42 +05301178void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1179{
1180 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1181 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1182}
1183
Sujithe8324352009-01-16 21:38:42 +05301184void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1185{
1186 struct ath_atx_ac *ac;
1187 struct ath_atx_tid *tid;
1188
1189 if (list_empty(&txq->axq_acq))
1190 return;
1191
1192 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1193 list_del(&ac->list);
1194 ac->sched = false;
1195
1196 do {
1197 if (list_empty(&ac->tid_q))
1198 return;
1199
1200 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1201 list_del(&tid->list);
1202 tid->sched = false;
1203
1204 if (tid->paused)
1205 continue;
1206
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001207 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301208
1209 /*
1210 * add tid to round-robin queue if more frames
1211 * are pending for the tid
1212 */
1213 if (!list_empty(&tid->buf_q))
1214 ath_tx_queue_tid(txq, tid);
1215
1216 break;
1217 } while (!list_empty(&ac->tid_q));
1218
1219 if (!list_empty(&ac->tid_q)) {
1220 if (!ac->sched) {
1221 ac->sched = true;
1222 list_add_tail(&ac->list, &txq->axq_acq);
1223 }
1224 }
1225}
1226
1227int ath_tx_setup(struct ath_softc *sc, int haltype)
1228{
1229 struct ath_txq *txq;
1230
1231 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001232 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1233 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301234 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1235 return 0;
1236 }
1237 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1238 if (txq != NULL) {
1239 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1240 return 1;
1241 } else
1242 return 0;
1243}
1244
1245/***********/
1246/* TX, DMA */
1247/***********/
1248
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001249/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001250 * Insert a chain of ath_buf (descriptors) on a txq and
1251 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001252 */
Sujith102e0572008-10-29 10:15:16 +05301253static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1254 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001255{
Sujithcbe61d82009-02-09 13:27:12 +05301256 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001257 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001258 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301259
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001260 /*
1261 * Insert the frame on the outbound list and
1262 * pass it on to the hardware.
1263 */
1264
1265 if (list_empty(head))
1266 return;
1267
1268 bf = list_first_entry(head, struct ath_buf, list);
1269
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001270 ath_print(common, ATH_DBG_QUEUE,
1271 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001272
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001273 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1274 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1275 list_splice_tail_init(head, &txq->txq_fifo_pending);
1276 return;
1277 }
1278 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1279 ath_print(common, ATH_DBG_XMIT,
1280 "Initializing tx fifo %d which "
1281 "is non-empty\n",
1282 txq->txq_headidx);
1283 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1284 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1285 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001286 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001287 ath_print(common, ATH_DBG_XMIT,
1288 "TXDP[%u] = %llx (%p)\n",
1289 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001290 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001291 list_splice_tail_init(head, &txq->axq_q);
1292
1293 if (txq->axq_link == NULL) {
1294 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1295 ath_print(common, ATH_DBG_XMIT,
1296 "TXDP[%u] = %llx (%p)\n",
1297 txq->axq_qnum, ito64(bf->bf_daddr),
1298 bf->bf_desc);
1299 } else {
1300 *txq->axq_link = bf->bf_daddr;
1301 ath_print(common, ATH_DBG_XMIT,
1302 "link[%u] (%p)=%llx (%p)\n",
1303 txq->axq_qnum, txq->axq_link,
1304 ito64(bf->bf_daddr), bf->bf_desc);
1305 }
1306 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1307 &txq->axq_link);
1308 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001309 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001310 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001311}
1312
Sujithe8324352009-01-16 21:38:42 +05301313static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1314 struct list_head *bf_head,
1315 struct ath_tx_control *txctl)
1316{
1317 struct ath_buf *bf;
1318
Sujithe8324352009-01-16 21:38:42 +05301319 bf = list_first_entry(bf_head, struct ath_buf, list);
1320 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301321 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301322
1323 /*
1324 * Do not queue to h/w when any of the following conditions is true:
1325 * - there are pending frames in software queue
1326 * - the TID is currently paused for ADDBA/BAR request
1327 * - seqno is not within block-ack window
1328 * - h/w queue depth exceeds low water mark
1329 */
1330 if (!list_empty(&tid->buf_q) || tid->paused ||
1331 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1332 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001333 /*
Sujithe8324352009-01-16 21:38:42 +05301334 * Add this frame to software queue for scheduling later
1335 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001336 */
Sujithd43f30152009-01-16 21:38:53 +05301337 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301338 ath_tx_queue_tid(txctl->txq, tid);
1339 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001340 }
1341
Sujithe8324352009-01-16 21:38:42 +05301342 /* Add sub-frame to BAW */
1343 ath_tx_addto_baw(sc, tid, bf);
1344
1345 /* Queue to h/w without aggregation */
1346 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301347 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301348 ath_buf_set_rate(sc, bf);
1349 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301350}
1351
Sujithc37452b2009-03-09 09:31:57 +05301352static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1353 struct ath_atx_tid *tid,
1354 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001355{
Sujithe8324352009-01-16 21:38:42 +05301356 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001357
Sujithe8324352009-01-16 21:38:42 +05301358 bf = list_first_entry(bf_head, struct ath_buf, list);
1359 bf->bf_state.bf_type &= ~BUF_AMPDU;
1360
1361 /* update starting sequence number for subsequent ADDBA request */
1362 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1363
1364 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301365 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301366 ath_buf_set_rate(sc, bf);
1367 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301368 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001369}
1370
Sujithc37452b2009-03-09 09:31:57 +05301371static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1372 struct list_head *bf_head)
1373{
1374 struct ath_buf *bf;
1375
1376 bf = list_first_entry(bf_head, struct ath_buf, list);
1377
1378 bf->bf_lastbf = bf;
1379 bf->bf_nframes = 1;
1380 ath_buf_set_rate(sc, bf);
1381 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301382 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301383}
1384
Sujith528f0c62008-10-29 10:14:26 +05301385static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001386{
Sujith528f0c62008-10-29 10:14:26 +05301387 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001388 enum ath9k_pkt_type htype;
1389 __le16 fc;
1390
Sujith528f0c62008-10-29 10:14:26 +05301391 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001392 fc = hdr->frame_control;
1393
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001394 if (ieee80211_is_beacon(fc))
1395 htype = ATH9K_PKT_TYPE_BEACON;
1396 else if (ieee80211_is_probe_resp(fc))
1397 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1398 else if (ieee80211_is_atim(fc))
1399 htype = ATH9K_PKT_TYPE_ATIM;
1400 else if (ieee80211_is_pspoll(fc))
1401 htype = ATH9K_PKT_TYPE_PSPOLL;
1402 else
1403 htype = ATH9K_PKT_TYPE_NORMAL;
1404
1405 return htype;
1406}
1407
Sujith528f0c62008-10-29 10:14:26 +05301408static void assign_aggr_tid_seqno(struct sk_buff *skb,
1409 struct ath_buf *bf)
1410{
1411 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1412 struct ieee80211_hdr *hdr;
1413 struct ath_node *an;
1414 struct ath_atx_tid *tid;
1415 __le16 fc;
1416 u8 *qc;
1417
1418 if (!tx_info->control.sta)
1419 return;
1420
1421 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1422 hdr = (struct ieee80211_hdr *)skb->data;
1423 fc = hdr->frame_control;
1424
Sujith528f0c62008-10-29 10:14:26 +05301425 if (ieee80211_is_data_qos(fc)) {
1426 qc = ieee80211_get_qos_ctl(hdr);
1427 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301428 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001429
Sujithe8324352009-01-16 21:38:42 +05301430 /*
1431 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301432 * We also override seqno set by upper layer with the one
1433 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301434 */
1435 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301436 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301437 bf->bf_seqno = tid->seq_next;
1438 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301439}
1440
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001441static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301442{
1443 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1444 int flags = 0;
1445
1446 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1447 flags |= ATH9K_TXDESC_INTREQ;
1448
1449 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1450 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301451
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001452 if (use_ldpc)
1453 flags |= ATH9K_TXDESC_LDPC;
1454
Sujith528f0c62008-10-29 10:14:26 +05301455 return flags;
1456}
1457
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001458/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001459 * rix - rate index
1460 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1461 * width - 0 for 20 MHz, 1 for 40 MHz
1462 * half_gi - to use 4us v/s 3.6 us for symbol time
1463 */
Sujith102e0572008-10-29 10:15:16 +05301464static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1465 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001466{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001467 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001468 int streams, pktlen;
1469
Sujithcd3d39a2008-08-11 14:03:34 +05301470 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301471
1472 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001473 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001474 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001475 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001476 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1477
1478 if (!half_gi)
1479 duration = SYMBOL_TIME(nsymbols);
1480 else
1481 duration = SYMBOL_TIME_HALFGI(nsymbols);
1482
Sujithe63835b2008-11-18 09:07:53 +05301483 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001484 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301485
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001486 return duration;
1487}
1488
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001489static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1490{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001491 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001492 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301493 struct sk_buff *skb;
1494 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301495 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001496 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301497 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301498 int i, flags = 0;
1499 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301500 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301501
1502 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301503
Sujitha22be222009-03-30 15:28:36 +05301504 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301505 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301506 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301507 hdr = (struct ieee80211_hdr *)skb->data;
1508 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301509
Sujithc89424d2009-01-30 14:29:28 +05301510 /*
1511 * We check if Short Preamble is needed for the CTS rate by
1512 * checking the BSS's global flag.
1513 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1514 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001515 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1516 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301517 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001518 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001519
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001520 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001521 bool is_40, is_sgi, is_sp;
1522 int phy;
1523
Sujithe63835b2008-11-18 09:07:53 +05301524 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001525 continue;
1526
Sujitha8efee42008-11-18 09:07:30 +05301527 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301528 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001529 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001530
Felix Fietkau27032052010-01-17 21:08:50 +01001531 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1532 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301533 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001534 flags |= ATH9K_TXDESC_RTSENA;
1535 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1536 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1537 flags |= ATH9K_TXDESC_CTSENA;
1538 }
1539
Sujithc89424d2009-01-30 14:29:28 +05301540 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1541 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1542 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1543 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001544
Felix Fietkau545750d2009-11-23 22:21:01 +01001545 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1546 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1547 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1548
1549 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1550 /* MCS rates */
1551 series[i].Rate = rix | 0x80;
1552 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1553 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001554 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1555 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001556 continue;
1557 }
1558
1559 /* legcay rates */
1560 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1561 !(rate->flags & IEEE80211_RATE_ERP_G))
1562 phy = WLAN_RC_PHY_CCK;
1563 else
1564 phy = WLAN_RC_PHY_OFDM;
1565
1566 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1567 series[i].Rate = rate->hw_value;
1568 if (rate->hw_value_short) {
1569 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1570 series[i].Rate |= rate->hw_value_short;
1571 } else {
1572 is_sp = false;
1573 }
1574
1575 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1576 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001577 }
1578
Felix Fietkau27032052010-01-17 21:08:50 +01001579 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1580 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1581 flags &= ~ATH9K_TXDESC_RTSENA;
1582
1583 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1584 if (flags & ATH9K_TXDESC_RTSENA)
1585 flags &= ~ATH9K_TXDESC_CTSENA;
1586
Sujithe63835b2008-11-18 09:07:53 +05301587 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301588 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1589 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301590 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301591 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301592
Sujith17d79042009-02-09 13:27:03 +05301593 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301594 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001595}
1596
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001597static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301598 struct sk_buff *skb,
1599 struct ath_tx_control *txctl)
1600{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001601 struct ath_wiphy *aphy = hw->priv;
1602 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301603 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1604 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301605 int hdrlen;
1606 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001607 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001608 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301609
Felix Fietkau827e69b2009-11-15 23:09:25 +01001610 tx_info->pad[0] = 0;
1611 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001612 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001613 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001614 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001615 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1616 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001617 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001618 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1619 break;
1620 }
Sujithe8324352009-01-16 21:38:42 +05301621 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1622 fc = hdr->frame_control;
1623
1624 ATH_TXBUF_RESET(bf);
1625
Felix Fietkau827e69b2009-11-15 23:09:25 +01001626 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001627 bf->bf_frmlen = skb->len + FCS_LEN;
1628 /* Remove the padding size from bf_frmlen, if any */
1629 padpos = ath9k_cmn_padpos(hdr->frame_control);
1630 padsize = padpos & 3;
1631 if (padsize && skb->len>padpos+padsize) {
1632 bf->bf_frmlen -= padsize;
1633 }
Sujithe8324352009-01-16 21:38:42 +05301634
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001635 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301636 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001637 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1638 use_ldpc = true;
1639 }
Sujithe8324352009-01-16 21:38:42 +05301640
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001641 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001642 if (txctl->paprd)
1643 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001644 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301645
Luis R. Rodriguezc17512d2010-08-05 17:56:54 -04001646 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301647 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1648 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1649 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1650 } else {
1651 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1652 }
1653
Sujith17b182e2009-12-14 14:56:56 +05301654 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1655 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301656 assign_aggr_tid_seqno(skb, bf);
1657
1658 bf->bf_mpdu = skb;
1659
1660 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1661 skb->len, DMA_TO_DEVICE);
1662 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1663 bf->bf_mpdu = NULL;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001664 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1665 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301666 return -ENOMEM;
1667 }
1668
1669 bf->bf_buf_addr = bf->bf_dmacontext;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001670
1671 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1672 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1673 bf->bf_isnullfunc = true;
Sujith1b04b932010-01-08 10:36:05 +05301674 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001675 } else
1676 bf->bf_isnullfunc = false;
1677
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001678 bf->bf_tx_aborted = false;
1679
Sujithe8324352009-01-16 21:38:42 +05301680 return 0;
1681}
1682
1683/* FIXME: tx power */
1684static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1685 struct ath_tx_control *txctl)
1686{
Sujitha22be222009-03-30 15:28:36 +05301687 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301688 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301689 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301690 struct ath_node *an = NULL;
1691 struct list_head bf_head;
1692 struct ath_desc *ds;
1693 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301694 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301695 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301696 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301697
1698 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301699 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301700
1701 INIT_LIST_HEAD(&bf_head);
1702 list_add_tail(&bf->list, &bf_head);
1703
1704 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001705 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301706
1707 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1708 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1709
1710 ath9k_hw_filltxdesc(ah, ds,
1711 skb->len, /* segment length */
1712 true, /* first segment */
1713 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001714 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001715 bf->bf_buf_addr,
1716 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301717
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001718 if (bf->bf_state.bfs_paprd)
1719 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1720
Sujithe8324352009-01-16 21:38:42 +05301721 spin_lock_bh(&txctl->txq->axq_lock);
1722
1723 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1724 tx_info->control.sta) {
1725 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1726 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1727
Sujithc37452b2009-03-09 09:31:57 +05301728 if (!ieee80211_is_data_qos(fc)) {
1729 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1730 goto tx_done;
1731 }
1732
Felix Fietkau4fdec032010-03-12 04:02:43 +01001733 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301734 /*
1735 * Try aggregation if it's a unicast data frame
1736 * and the destination is HT capable.
1737 */
1738 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1739 } else {
1740 /*
1741 * Send this frame as regular when ADDBA
1742 * exchange is neither complete nor pending.
1743 */
Sujithc37452b2009-03-09 09:31:57 +05301744 ath_tx_send_ht_normal(sc, txctl->txq,
1745 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301746 }
1747 } else {
Sujithc37452b2009-03-09 09:31:57 +05301748 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301749 }
1750
Sujithc37452b2009-03-09 09:31:57 +05301751tx_done:
Sujithe8324352009-01-16 21:38:42 +05301752 spin_unlock_bh(&txctl->txq->axq_lock);
1753}
1754
1755/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001756int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301757 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001758{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001759 struct ath_wiphy *aphy = hw->priv;
1760 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001761 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001762 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001763 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001764 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001765
Sujithe8324352009-01-16 21:38:42 +05301766 bf = ath_tx_get_buffer(sc);
1767 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001768 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301769 return -1;
1770 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001771
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001772 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301773 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001774 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001775
Sujithe8324352009-01-16 21:38:42 +05301776 /* upon ath_tx_processq() this TX queue will be resumed, we
1777 * guarantee this will happen by knowing beforehand that
1778 * we will at least have to run TX completionon one buffer
1779 * on the queue */
1780 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001781 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001782 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301783 txq->stopped = 1;
1784 }
1785 spin_unlock_bh(&txq->axq_lock);
1786
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001787 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301788
1789 return r;
1790 }
1791
Felix Fietkau97923b12010-06-12 00:33:55 -04001792 q = skb_get_queue_mapping(skb);
1793 if (q >= 4)
1794 q = 0;
1795
1796 spin_lock_bh(&txq->axq_lock);
1797 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1798 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1799 txq->stopped = 1;
1800 }
1801 spin_unlock_bh(&txq->axq_lock);
1802
Sujithe8324352009-01-16 21:38:42 +05301803 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001804
1805 return 0;
1806}
1807
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001808void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001809{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001810 struct ath_wiphy *aphy = hw->priv;
1811 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001812 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001813 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1814 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301815 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1816 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001817
Sujithe8324352009-01-16 21:38:42 +05301818 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001819
Sujithe8324352009-01-16 21:38:42 +05301820 /*
1821 * As a temporary workaround, assign seq# here; this will likely need
1822 * to be cleaned up to work better with Beacon transmission and virtual
1823 * BSSes.
1824 */
1825 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301826 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1827 sc->tx.seq_no += 0x10;
1828 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1829 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001830 }
1831
Sujithe8324352009-01-16 21:38:42 +05301832 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001833 padpos = ath9k_cmn_padpos(hdr->frame_control);
1834 padsize = padpos & 3;
1835 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301836 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001837 ath_print(common, ATH_DBG_XMIT,
1838 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301839 dev_kfree_skb_any(skb);
1840 return;
1841 }
1842 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001843 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001844 }
1845
Sujithe8324352009-01-16 21:38:42 +05301846 txctl.txq = sc->beacon.cabq;
1847
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001848 ath_print(common, ATH_DBG_XMIT,
1849 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301850
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001851 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001852 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301853 goto exit;
1854 }
1855
1856 return;
1857exit:
1858 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001859}
1860
Sujithe8324352009-01-16 21:38:42 +05301861/*****************/
1862/* TX Completion */
1863/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001864
Sujithe8324352009-01-16 21:38:42 +05301865static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001866 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001867{
Sujithe8324352009-01-16 21:38:42 +05301868 struct ieee80211_hw *hw = sc->hw;
1869 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001870 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001871 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001872 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301873
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001874 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301875
Felix Fietkau827e69b2009-11-15 23:09:25 +01001876 if (aphy)
1877 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301878
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301879 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301880 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301881
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301882 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301883 /* Frame was ACKed */
1884 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1885 }
1886
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001887 padpos = ath9k_cmn_padpos(hdr->frame_control);
1888 padsize = padpos & 3;
1889 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301890 /*
1891 * Remove MAC header padding before giving the frame back to
1892 * mac80211.
1893 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001894 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301895 skb_pull(skb, padsize);
1896 }
1897
Sujith1b04b932010-01-08 10:36:05 +05301898 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1899 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001900 ath_print(common, ATH_DBG_PS,
1901 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001902 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301903 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1904 PS_WAIT_FOR_CAB |
1905 PS_WAIT_FOR_PSPOLL_DATA |
1906 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001907 }
1908
Felix Fietkau827e69b2009-11-15 23:09:25 +01001909 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001910 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001911 else {
1912 q = skb_get_queue_mapping(skb);
1913 if (q >= 4)
1914 q = 0;
1915
1916 if (--sc->tx.pending_frames[q] < 0)
1917 sc->tx.pending_frames[q] = 0;
1918
Felix Fietkau827e69b2009-11-15 23:09:25 +01001919 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001920 }
Sujithe8324352009-01-16 21:38:42 +05301921}
1922
1923static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001924 struct ath_txq *txq, struct list_head *bf_q,
1925 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301926{
1927 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301928 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301929 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301930
Sujithe8324352009-01-16 21:38:42 +05301931 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301932 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301933
1934 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301935 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301936
1937 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301938 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301939 }
1940
1941 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001942
1943 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001944 if (time_after(jiffies,
1945 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001946 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001947 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001948 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001949 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001950 } else {
1951 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1952 ath_debug_stat_tx(sc, txq, bf, ts);
1953 }
Sujithe8324352009-01-16 21:38:42 +05301954
1955 /*
1956 * Return the list of ath_buf of this mpdu to free queue
1957 */
1958 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1959 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1960 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1961}
1962
1963static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001964 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301965{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001966 u16 seq_st = 0;
1967 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301968 int ba_index;
1969 int nbad = 0;
1970 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001971
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001972 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301973 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301974
Sujithcd3d39a2008-08-11 14:03:34 +05301975 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001976 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001977 seq_st = ts->ts_seqnum;
1978 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001979 }
1980
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001981 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301982 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1983 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1984 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001985
Sujithe8324352009-01-16 21:38:42 +05301986 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001987 }
1988
Sujithe8324352009-01-16 21:38:42 +05301989 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001990}
1991
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001992static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301993 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301994{
Sujitha22be222009-03-30 15:28:36 +05301995 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301996 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001998 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301999 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302000
Sujith95e4acb2009-03-13 08:56:09 +05302001 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002002 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302003
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002004 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302005 WARN_ON(tx_rateindex >= hw->max_rates);
2006
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002007 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302008 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Felix Fietkaud9698472010-03-01 13:32:11 +01002009 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2010 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302011
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002012 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302013 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05302014 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002015 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01002016 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2017 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002018 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2019 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002020 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2021 tx_info->status.ampdu_len = bf->bf_nframes;
2022 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
Sujithc4288392008-11-18 09:09:30 +05302023 }
2024 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302025
Felix Fietkau545750d2009-11-23 22:21:01 +01002026 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302027 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002028 tx_info->status.rates[i].idx = -1;
2029 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302030
Felix Fietkau78c46532010-06-25 01:26:16 +02002031 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302032}
2033
Sujith059d8062009-01-16 21:38:49 +05302034static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2035{
2036 int qnum;
2037
Felix Fietkau97923b12010-06-12 00:33:55 -04002038 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2039 if (qnum == -1)
2040 return;
2041
Sujith059d8062009-01-16 21:38:49 +05302042 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002043 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07002044 if (ath_mac80211_start_queue(sc, qnum))
2045 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302046 }
2047 spin_unlock_bh(&txq->axq_lock);
2048}
2049
Sujithc4288392008-11-18 09:09:30 +05302050static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002051{
Sujithcbe61d82009-02-09 13:27:12 +05302052 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002053 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002054 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2055 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302056 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002057 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302058 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002059 int status;
2060
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002061 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2062 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2063 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002064
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002065 for (;;) {
2066 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002067 if (list_empty(&txq->axq_q)) {
2068 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002069 spin_unlock_bh(&txq->axq_lock);
2070 break;
2071 }
2072 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2073
2074 /*
2075 * There is a race condition that a BH gets scheduled
2076 * after sw writes TxE and before hw re-load the last
2077 * descriptor to get the newly chained one.
2078 * Software must keep the last DONE descriptor as a
2079 * holding descriptor - software does so by marking
2080 * it with the STALE flag.
2081 */
2082 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302083 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002084 bf_held = bf;
2085 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302086 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002087 break;
2088 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302090 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002091 }
2092 }
2093
2094 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302095 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002096
Felix Fietkau29bffa92010-03-29 20:14:23 -07002097 memset(&ts, 0, sizeof(ts));
2098 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002099 if (status == -EINPROGRESS) {
2100 spin_unlock_bh(&txq->axq_lock);
2101 break;
2102 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002103
2104 /*
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002105 * We now know the nullfunc frame has been ACKed so we
2106 * can disable RX.
2107 */
2108 if (bf->bf_isnullfunc &&
Felix Fietkau29bffa92010-03-29 20:14:23 -07002109 (ts.ts_status & ATH9K_TX_ACKED)) {
Senthil Balasubramanian3f7c5c12010-02-03 22:51:13 +05302110 if ((sc->ps_flags & PS_ENABLED))
2111 ath9k_enable_ps(sc);
2112 else
Sujith1b04b932010-01-08 10:36:05 +05302113 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002114 }
2115
2116 /*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002117 * Remove ath_buf's of the same transmit unit from txq,
2118 * however leave the last descriptor back as the holding
2119 * descriptor for hw.
2120 */
Sujitha119cc42009-03-30 15:28:38 +05302121 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002122 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002123 if (!list_is_singular(&lastbf->list))
2124 list_cut_position(&bf_head,
2125 &txq->axq_q, lastbf->list.prev);
2126
2127 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002128 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002129 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002130 if (bf_held)
2131 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002132 spin_unlock_bh(&txq->axq_lock);
2133
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002134 if (bf_held)
2135 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002136
Sujithcd3d39a2008-08-11 14:03:34 +05302137 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138 /*
2139 * This frame is sent out as a single frame.
2140 * Use hardware retry status for this frame.
2141 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002142 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302143 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002144 ath_tx_rc_status(bf, &ts, 0, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002145 }
Johannes Berge6a98542008-10-21 12:40:02 +02002146
Sujithcd3d39a2008-08-11 14:03:34 +05302147 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002148 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002149 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002150 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151
Sujith059d8062009-01-16 21:38:49 +05302152 ath_wake_mac80211_queue(sc, txq);
2153
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002154 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302155 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002156 ath_txq_schedule(sc, txq);
2157 spin_unlock_bh(&txq->axq_lock);
2158 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159}
2160
Sujith305fe472009-07-23 15:32:29 +05302161static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002162{
2163 struct ath_softc *sc = container_of(work, struct ath_softc,
2164 tx_complete_work.work);
2165 struct ath_txq *txq;
2166 int i;
2167 bool needreset = false;
2168
2169 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2170 if (ATH_TXQ_SETUP(sc, i)) {
2171 txq = &sc->tx.txq[i];
2172 spin_lock_bh(&txq->axq_lock);
2173 if (txq->axq_depth) {
2174 if (txq->axq_tx_inprogress) {
2175 needreset = true;
2176 spin_unlock_bh(&txq->axq_lock);
2177 break;
2178 } else {
2179 txq->axq_tx_inprogress = true;
2180 }
2181 }
2182 spin_unlock_bh(&txq->axq_lock);
2183 }
2184
2185 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002186 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2187 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302188 ath9k_ps_wakeup(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002189 ath_reset(sc, false);
Sujith332c5562009-10-09 09:51:28 +05302190 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002191 }
2192
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002193 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002194 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2195}
2196
2197
Sujithe8324352009-01-16 21:38:42 +05302198
2199void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002200{
Sujithe8324352009-01-16 21:38:42 +05302201 int i;
2202 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002203
Sujithe8324352009-01-16 21:38:42 +05302204 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002205
2206 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302207 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2208 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002209 }
2210}
2211
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002212void ath_tx_edma_tasklet(struct ath_softc *sc)
2213{
2214 struct ath_tx_status txs;
2215 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2216 struct ath_hw *ah = sc->sc_ah;
2217 struct ath_txq *txq;
2218 struct ath_buf *bf, *lastbf;
2219 struct list_head bf_head;
2220 int status;
2221 int txok;
2222
2223 for (;;) {
2224 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2225 if (status == -EINPROGRESS)
2226 break;
2227 if (status == -EIO) {
2228 ath_print(common, ATH_DBG_XMIT,
2229 "Error processing tx status\n");
2230 break;
2231 }
2232
2233 /* Skip beacon completions */
2234 if (txs.qid == sc->beacon.beaconq)
2235 continue;
2236
2237 txq = &sc->tx.txq[txs.qid];
2238
2239 spin_lock_bh(&txq->axq_lock);
2240 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2241 spin_unlock_bh(&txq->axq_lock);
2242 return;
2243 }
2244
2245 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2246 struct ath_buf, list);
2247 lastbf = bf->bf_lastbf;
2248
2249 INIT_LIST_HEAD(&bf_head);
2250 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2251 &lastbf->list);
2252 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2253 txq->axq_depth--;
2254 txq->axq_tx_inprogress = false;
2255 spin_unlock_bh(&txq->axq_lock);
2256
2257 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2258
Vasanthakumar Thiagarajande0f6482010-05-17 18:57:54 -07002259 /*
2260 * Make sure null func frame is acked before configuring
2261 * hw into ps mode.
2262 */
2263 if (bf->bf_isnullfunc && txok) {
2264 if ((sc->ps_flags & PS_ENABLED))
2265 ath9k_enable_ps(sc);
2266 else
2267 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2268 }
2269
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002270 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002271 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2272 bf->bf_state.bf_type |= BUF_XRETRY;
2273 ath_tx_rc_status(bf, &txs, 0, txok, true);
2274 }
2275
2276 if (bf_isampdu(bf))
2277 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2278 else
2279 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2280 &txs, txok, 0);
2281
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002282 ath_wake_mac80211_queue(sc, txq);
2283
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002284 spin_lock_bh(&txq->axq_lock);
2285 if (!list_empty(&txq->txq_fifo_pending)) {
2286 INIT_LIST_HEAD(&bf_head);
2287 bf = list_first_entry(&txq->txq_fifo_pending,
2288 struct ath_buf, list);
2289 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2290 &bf->bf_lastbf->list);
2291 ath_tx_txqaddbuf(sc, txq, &bf_head);
2292 } else if (sc->sc_flags & SC_OP_TXAGGR)
2293 ath_txq_schedule(sc, txq);
2294 spin_unlock_bh(&txq->axq_lock);
2295 }
2296}
2297
Sujithe8324352009-01-16 21:38:42 +05302298/*****************/
2299/* Init, Cleanup */
2300/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002301
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002302static int ath_txstatus_setup(struct ath_softc *sc, int size)
2303{
2304 struct ath_descdma *dd = &sc->txsdma;
2305 u8 txs_len = sc->sc_ah->caps.txs_len;
2306
2307 dd->dd_desc_len = size * txs_len;
2308 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2309 &dd->dd_desc_paddr, GFP_KERNEL);
2310 if (!dd->dd_desc)
2311 return -ENOMEM;
2312
2313 return 0;
2314}
2315
2316static int ath_tx_edma_init(struct ath_softc *sc)
2317{
2318 int err;
2319
2320 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2321 if (!err)
2322 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2323 sc->txsdma.dd_desc_paddr,
2324 ATH_TXSTATUS_RING_SIZE);
2325
2326 return err;
2327}
2328
2329static void ath_tx_edma_cleanup(struct ath_softc *sc)
2330{
2331 struct ath_descdma *dd = &sc->txsdma;
2332
2333 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2334 dd->dd_desc_paddr);
2335}
2336
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002337int ath_tx_init(struct ath_softc *sc, int nbufs)
2338{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002339 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002340 int error = 0;
2341
Sujith797fe5cb2009-03-30 15:28:45 +05302342 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002343
Sujith797fe5cb2009-03-30 15:28:45 +05302344 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002345 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302346 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002347 ath_print(common, ATH_DBG_FATAL,
2348 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302349 goto err;
2350 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002351
Sujith797fe5cb2009-03-30 15:28:45 +05302352 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002353 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302354 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002355 ath_print(common, ATH_DBG_FATAL,
2356 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302357 goto err;
2358 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002359
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002360 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2361
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002362 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2363 error = ath_tx_edma_init(sc);
2364 if (error)
2365 goto err;
2366 }
2367
Sujith797fe5cb2009-03-30 15:28:45 +05302368err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002369 if (error != 0)
2370 ath_tx_cleanup(sc);
2371
2372 return error;
2373}
2374
Sujith797fe5cb2009-03-30 15:28:45 +05302375void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376{
Sujithb77f4832008-12-07 21:44:03 +05302377 if (sc->beacon.bdma.dd_desc_len != 0)
2378 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002379
Sujithb77f4832008-12-07 21:44:03 +05302380 if (sc->tx.txdma.dd_desc_len != 0)
2381 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002382
2383 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2384 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385}
2386
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002387void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2388{
Sujithc5170162008-10-29 10:13:59 +05302389 struct ath_atx_tid *tid;
2390 struct ath_atx_ac *ac;
2391 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
Sujith8ee5afb2008-12-07 21:43:36 +05302393 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302394 tidno < WME_NUM_TID;
2395 tidno++, tid++) {
2396 tid->an = an;
2397 tid->tidno = tidno;
2398 tid->seq_start = tid->seq_next = 0;
2399 tid->baw_size = WME_MAX_BA;
2400 tid->baw_head = tid->baw_tail = 0;
2401 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302402 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302403 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302404 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302405 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302406 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302407 tid->state &= ~AGGR_ADDBA_COMPLETE;
2408 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302409 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410
Sujith8ee5afb2008-12-07 21:43:36 +05302411 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302412 acno < WME_NUM_AC; acno++, ac++) {
2413 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002414 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302415 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002416 }
2417}
2418
Sujithb5aa9bf2008-10-29 10:13:31 +05302419void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420{
Felix Fietkau2b409942010-07-07 19:42:08 +02002421 struct ath_atx_ac *ac;
2422 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002423 struct ath_txq *txq;
Felix Fietkau2b409942010-07-07 19:42:08 +02002424 int i, tidno;
Sujithe8324352009-01-16 21:38:42 +05302425
Felix Fietkau2b409942010-07-07 19:42:08 +02002426 for (tidno = 0, tid = &an->tid[tidno];
2427 tidno < WME_NUM_TID; tidno++, tid++) {
2428 i = tid->ac->qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002429
Felix Fietkau2b409942010-07-07 19:42:08 +02002430 if (!ATH_TXQ_SETUP(sc, i))
2431 continue;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002432
Felix Fietkau2b409942010-07-07 19:42:08 +02002433 txq = &sc->tx.txq[i];
2434 ac = tid->ac;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002435
Felix Fietkau2b409942010-07-07 19:42:08 +02002436 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002437
Felix Fietkau2b409942010-07-07 19:42:08 +02002438 if (tid->sched) {
2439 list_del(&tid->list);
2440 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002441 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002442
2443 if (ac->sched) {
2444 list_del(&ac->list);
2445 tid->ac->sched = false;
2446 }
2447
2448 ath_tid_drain(sc, txq, tid);
2449 tid->state &= ~AGGR_ADDBA_COMPLETE;
2450 tid->state &= ~AGGR_CLEANUP;
2451
2452 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002453 }
2454}