blob: 8785ec3b1cb99f468b475c73d56784923329e202 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020064static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
Sujithe8324352009-01-16 21:38:42 +053066
Felix Fietkau545750d2009-11-23 22:21:01 +010067enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020068 MCS_HT20,
69 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010070 MCS_HT40,
71 MCS_HT40_SGI,
72};
73
Felix Fietkau0e668cd2010-04-19 19:57:32 +020074static int ath_max_4ms_framelen[4][32] = {
75 [MCS_HT20] = {
76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
80 },
81 [MCS_HT20_SGI] = {
82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010086 },
87 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020088 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010092 },
93 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020094 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010098 }
99};
100
Sujithe8324352009-01-16 21:38:42 +0530101/*********************/
102/* Aggregation logic */
103/*********************/
104
Sujithe8324352009-01-16 21:38:42 +0530105static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
106{
107 struct ath_atx_ac *ac = tid->ac;
108
109 if (tid->paused)
110 return;
111
112 if (tid->sched)
113 return;
114
115 tid->sched = true;
116 list_add_tail(&tid->list, &ac->tid_q);
117
118 if (ac->sched)
119 return;
120
121 ac->sched = true;
122 list_add_tail(&ac->list, &txq->axq_acq);
123}
124
Sujithe8324352009-01-16 21:38:42 +0530125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
126{
Felix Fietkau066dae92010-11-07 14:59:39 +0100127 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530128
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 WARN_ON(!tid->paused);
130
Sujithe8324352009-01-16 21:38:42 +0530131 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200132 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530133
134 if (list_empty(&tid->buf_q))
135 goto unlock;
136
137 ath_tx_queue_tid(txq, tid);
138 ath_txq_schedule(sc, txq);
139unlock:
140 spin_unlock_bh(&txq->axq_lock);
141}
142
143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
144{
Felix Fietkau066dae92010-11-07 14:59:39 +0100145 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530146 struct ath_buf *bf;
147 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200148 struct ath_tx_status ts;
149
Sujithe8324352009-01-16 21:38:42 +0530150 INIT_LIST_HEAD(&bf_head);
151
Felix Fietkau90fa5392010-09-20 13:45:38 +0200152 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530153 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530154
155 while (!list_empty(&tid->buf_q)) {
156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530157 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158
159 if (bf_isretried(bf)) {
160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
164 }
Sujithe8324352009-01-16 21:38:42 +0530165 }
166
167 spin_unlock_bh(&txq->axq_lock);
168}
169
170static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
171 int seqno)
172{
173 int index, cindex;
174
175 index = ATH_BA_INDEX(tid->seq_start, seqno);
176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
177
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200178 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530179
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
183 }
184}
185
186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf)
188{
189 int index, cindex;
190
191 if (bf_isretried(bf))
192 return;
193
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200196 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530197
198 if (index >= ((tid->baw_tail - tid->baw_head) &
199 (ATH_TID_MAX_BUFS - 1))) {
200 tid->baw_tail = cindex;
201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
202 }
203}
204
205/*
206 * TODO: For frame(s) that are in the retry state, we will reuse the
207 * sequence number(s) without setting the retry bit. The
208 * alternative is to give up on these and BAR the receiver's window
209 * forward.
210 */
211static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
212 struct ath_atx_tid *tid)
213
214{
215 struct ath_buf *bf;
216 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700217 struct ath_tx_status ts;
218
219 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530220 INIT_LIST_HEAD(&bf_head);
221
222 for (;;) {
223 if (list_empty(&tid->buf_q))
224 break;
Sujithe8324352009-01-16 21:38:42 +0530225
Sujithd43f30152009-01-16 21:38:53 +0530226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
227 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530228
229 if (bf_isretried(bf))
230 ath_tx_update_baw(sc, tid, bf->bf_seqno);
231
232 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530234 spin_lock(&txq->axq_lock);
235 }
236
237 tid->seq_next = tid->seq_start;
238 tid->baw_tail = tid->baw_head;
239}
240
Sujithfec247c2009-07-27 12:08:16 +0530241static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
242 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530243{
244 struct sk_buff *skb;
245 struct ieee80211_hdr *hdr;
246
247 bf->bf_state.bf_type |= BUF_RETRY;
248 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530249 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530250
251 skb = bf->bf_mpdu;
252 hdr = (struct ieee80211_hdr *)skb->data;
253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
254}
255
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200256static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
257{
258 struct ath_buf *bf = NULL;
259
260 spin_lock_bh(&sc->tx.txbuflock);
261
262 if (unlikely(list_empty(&sc->tx.txbuf))) {
263 spin_unlock_bh(&sc->tx.txbuflock);
264 return NULL;
265 }
266
267 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
268 list_del(&bf->list);
269
270 spin_unlock_bh(&sc->tx.txbuflock);
271
272 return bf;
273}
274
275static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
276{
277 spin_lock_bh(&sc->tx.txbuflock);
278 list_add_tail(&bf->list, &sc->tx.txbuf);
279 spin_unlock_bh(&sc->tx.txbuflock);
280}
281
Sujithd43f30152009-01-16 21:38:53 +0530282static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
283{
284 struct ath_buf *tbf;
285
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200286 tbf = ath_tx_get_buffer(sc);
287 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530288 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530289
290 ATH_TXBUF_RESET(tbf);
291
Felix Fietkau827e69b2009-11-15 23:09:25 +0100292 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530293 tbf->bf_mpdu = bf->bf_mpdu;
294 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400295 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530296 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530297
298 return tbf;
299}
300
301static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
302 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700303 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530304{
305 struct ath_node *an = NULL;
306 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530307 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800308 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530309 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800310 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530311 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530312 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530313 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530314 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530315 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
317 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200318 struct ieee80211_tx_rate rates[4];
Björn Smedmanebd02282010-10-10 22:44:39 +0200319 int nframes;
Sujithe8324352009-01-16 21:38:42 +0530320
Sujitha22be222009-03-30 15:28:36 +0530321 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530322 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530323
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800324 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100325 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800326
Felix Fietkau78c46532010-06-25 01:26:16 +0200327 memcpy(rates, tx_info->control.rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200328 nframes = bf->bf_nframes;
Felix Fietkau78c46532010-06-25 01:26:16 +0200329
Sujith1286ec62009-01-27 13:30:37 +0530330 rcu_read_lock();
331
Ben Greear686b9cb2010-09-23 09:44:36 -0700332 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530333 if (!sta) {
334 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200335
Felix Fietkau31e79a52010-07-12 23:16:34 +0200336 INIT_LIST_HEAD(&bf_head);
337 while (bf) {
338 bf_next = bf->bf_next;
339
340 bf->bf_state.bf_type |= BUF_XRETRY;
341 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
342 !bf->bf_stale || bf_next != NULL)
343 list_move_tail(&bf->list, &bf_head);
344
Björn Smedmanebd02282010-10-10 22:44:39 +0200345 ath_tx_rc_status(bf, ts, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
347 0, 0);
348
349 bf = bf_next;
350 }
Sujith1286ec62009-01-27 13:30:37 +0530351 return;
Sujithe8324352009-01-16 21:38:42 +0530352 }
353
Sujith1286ec62009-01-27 13:30:37 +0530354 an = (struct ath_node *)sta->drv_priv;
355 tid = ATH_AN_2_TID(an, bf->bf_tidno);
356
Felix Fietkaub11b1602010-07-11 12:48:44 +0200357 /*
358 * The hardware occasionally sends a tx status for the wrong TID.
359 * In this case, the BA status cannot be considered valid and all
360 * subframes need to be retransmitted
361 */
362 if (bf->bf_tidno != ts->tid)
363 txok = false;
364
Sujithe8324352009-01-16 21:38:42 +0530365 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530366 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530367
Sujithd43f30152009-01-16 21:38:53 +0530368 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700369 if (ts->ts_flags & ATH9K_TX_BA) {
370 seq_st = ts->ts_seqnum;
371 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530372 } else {
Sujithd43f30152009-01-16 21:38:53 +0530373 /*
374 * AR5416 can become deaf/mute when BA
375 * issue happens. Chip needs to be reset.
376 * But AP code may have sychronization issues
377 * when perform internal reset in this routine.
378 * Only enable reset in STA mode for now.
379 */
Sujith2660b812009-02-09 13:27:26 +0530380 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530381 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530382 }
383 }
384
385 INIT_LIST_HEAD(&bf_pending);
386 INIT_LIST_HEAD(&bf_head);
387
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700388 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530389 while (bf) {
390 txfail = txpending = 0;
391 bf_next = bf->bf_next;
392
Felix Fietkau78c46532010-06-25 01:26:16 +0200393 skb = bf->bf_mpdu;
394 tx_info = IEEE80211_SKB_CB(skb);
395
Sujithe8324352009-01-16 21:38:42 +0530396 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
397 /* transmit completion, subframe is
398 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530399 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530400 } else if (!isaggr && txok) {
401 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530402 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530403 } else {
Sujithe8324352009-01-16 21:38:42 +0530404 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400405 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530406 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530407 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530408 txpending = 1;
409 } else {
410 bf->bf_state.bf_type |= BUF_XRETRY;
411 txfail = 1;
412 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530413 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530414 }
415 } else {
416 /*
417 * cleanup in progress, just fail
418 * the un-acked sub-frames
419 */
420 txfail = 1;
421 }
422 }
423
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400424 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
425 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530426 /*
427 * Make sure the last desc is reclaimed if it
428 * not a holding desc.
429 */
430 if (!bf_last->bf_stale)
431 list_move_tail(&bf->list, &bf_head);
432 else
433 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530434 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700435 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530436 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530437 }
438
Felix Fietkau90fa5392010-09-20 13:45:38 +0200439 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530440 /*
441 * complete the acked-ones/xretried ones; update
442 * block-ack window
443 */
444 spin_lock_bh(&txq->axq_lock);
445 ath_tx_update_baw(sc, tid, bf->bf_seqno);
446 spin_unlock_bh(&txq->axq_lock);
447
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200449 memcpy(tx_info->control.rates, rates, sizeof(rates));
Björn Smedmanebd02282010-10-10 22:44:39 +0200450 bf->bf_nframes = nframes;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700451 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530452 rc_update = false;
453 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700454 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530455 }
456
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
458 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530459 } else {
Sujithd43f30152009-01-16 21:38:53 +0530460 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400461 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
462 if (bf->bf_next == NULL && bf_last->bf_stale) {
463 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530464
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400465 tbf = ath_clone_txbuf(sc, bf_last);
466 /*
467 * Update tx baw and complete the
468 * frame with failed status if we
469 * run out of tx buf.
470 */
471 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid,
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400476
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400477 bf->bf_state.bf_type |=
478 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad,
480 0, false);
481 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head,
483 ts, 0, 0);
484 break;
485 }
486
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 tbf->bf_desc);
489 list_add_tail(&tbf->list, &bf_head);
490 } else {
491 /*
492 * Clear descriptor status words for
493 * software retry
494 */
495 ath9k_hw_cleartxdesc(sc->sc_ah,
496 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400497 }
Sujithe8324352009-01-16 21:38:42 +0530498 }
499
500 /*
501 * Put this buffer to the temporary pending
502 * queue to retain ordering
503 */
504 list_splice_tail_init(&bf_head, &bf_pending);
505 }
506
507 bf = bf_next;
508 }
509
Felix Fietkau4cee7862010-07-23 03:53:16 +0200510 /* prepend un-acked frames to the beginning of the pending frame queue */
511 if (!list_empty(&bf_pending)) {
512 spin_lock_bh(&txq->axq_lock);
513 list_splice(&bf_pending, &tid->buf_q);
514 ath_tx_queue_tid(txq, tid);
515 spin_unlock_bh(&txq->axq_lock);
516 }
517
Sujithe8324352009-01-16 21:38:42 +0530518 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200519 ath_tx_flush_tid(sc, tid);
520
Sujithe8324352009-01-16 21:38:42 +0530521 if (tid->baw_head == tid->baw_tail) {
522 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530523 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530524 }
Sujithe8324352009-01-16 21:38:42 +0530525 }
526
Sujith1286ec62009-01-27 13:30:37 +0530527 rcu_read_unlock();
528
Sujithe8324352009-01-16 21:38:42 +0530529 if (needreset)
530 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530531}
532
533static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
534 struct ath_atx_tid *tid)
535{
Sujithe8324352009-01-16 21:38:42 +0530536 struct sk_buff *skb;
537 struct ieee80211_tx_info *tx_info;
538 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530539 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530540 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530541 int i;
542
Sujitha22be222009-03-30 15:28:36 +0530543 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530544 tx_info = IEEE80211_SKB_CB(skb);
545 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530546
547 /*
548 * Find the lowest frame length among the rate series that will have a
549 * 4ms transmit duration.
550 * TODO - TXOP limit needs to be considered.
551 */
552 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
553
554 for (i = 0; i < 4; i++) {
555 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100556 int modeidx;
557 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530558 legacy = 1;
559 break;
560 }
561
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200562 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100563 modeidx = MCS_HT40;
564 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200565 modeidx = MCS_HT20;
566
567 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
568 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100569
570 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530571 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530572 }
573 }
574
575 /*
576 * limit aggregate size by the minimum rate if rate selected is
577 * not a probe rate, if rate selected is a probe rate then
578 * avoid aggregation of this packet.
579 */
580 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
581 return 0;
582
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530583 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
584 aggr_limit = min((max_4ms_framelen * 3) / 8,
585 (u32)ATH_AMPDU_LIMIT_MAX);
586 else
587 aggr_limit = min(max_4ms_framelen,
588 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530589
590 /*
591 * h/w can accept aggregates upto 16 bit lengths (65535).
592 * The IE, however can hold upto 65536, which shows up here
593 * as zero. Ignore 65536 since we are constrained by hw.
594 */
Sujith4ef70842009-07-23 15:32:41 +0530595 if (tid->an->maxampdu)
596 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530597
598 return aggr_limit;
599}
600
601/*
Sujithd43f30152009-01-16 21:38:53 +0530602 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530603 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530604 */
605static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
606 struct ath_buf *bf, u16 frmlen)
607{
Sujithe8324352009-01-16 21:38:42 +0530608 struct sk_buff *skb = bf->bf_mpdu;
609 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530610 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530611 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100612 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200613 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530614
615 /* Select standard number of delimiters based on frame length alone */
616 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
617
618 /*
619 * If encryption enabled, hardware requires some more padding between
620 * subframes.
621 * TODO - this could be improved to be dependent on the rate.
622 * The hardware can keep up at lower rates, but not higher rates
623 */
624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
625 ndelim += ATH_AGGR_ENCRYPTDELIM;
626
627 /*
628 * Convert desired mpdu density from microeconds to bytes based
629 * on highest rate in rate series (i.e. first rate) to determine
630 * required minimum length for subframe. Take into account
631 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530632 *
Sujithe8324352009-01-16 21:38:42 +0530633 * If there is no mpdu density restriction, no further calculation
634 * is needed.
635 */
Sujith4ef70842009-07-23 15:32:41 +0530636
637 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530638 return ndelim;
639
640 rix = tx_info->control.rates[0].idx;
641 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530642 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
643 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
644
645 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530646 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530647 else
Sujith4ef70842009-07-23 15:32:41 +0530648 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530649
650 if (nsymbols == 0)
651 nsymbols = 1;
652
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 streams = HT_RC_2_STREAMS(rix);
654 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530655 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
656
Sujithe8324352009-01-16 21:38:42 +0530657 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530658 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
659 ndelim = max(mindelim, ndelim);
660 }
661
662 return ndelim;
663}
664
665static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530666 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530667 struct ath_atx_tid *tid,
668 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530669{
670#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530671 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
672 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530673 u16 aggr_limit = 0, al = 0, bpad = 0,
674 al_delta, h_baw = tid->baw_size / 2;
675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200676 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530677
678 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
679
680 do {
681 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
682
Sujithd43f30152009-01-16 21:38:53 +0530683 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530684 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
685 status = ATH_AGGR_BAW_CLOSED;
686 break;
687 }
688
689 if (!rl) {
690 aggr_limit = ath_lookup_rate(sc, bf, tid);
691 rl = 1;
692 }
693
Sujithd43f30152009-01-16 21:38:53 +0530694 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530695 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
696
Sujithd43f30152009-01-16 21:38:53 +0530697 if (nframes &&
698 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530699 status = ATH_AGGR_LIMITED;
700 break;
701 }
702
Felix Fietkau0299a502010-10-21 02:47:24 +0200703 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
704 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
705 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
706 break;
707
Sujithd43f30152009-01-16 21:38:53 +0530708 /* do not exceed subframe limit */
709 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530710 status = ATH_AGGR_LIMITED;
711 break;
712 }
Sujithd43f30152009-01-16 21:38:53 +0530713 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530714
Sujithd43f30152009-01-16 21:38:53 +0530715 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530716 al += bpad + al_delta;
717
718 /*
719 * Get the delimiters needed to meet the MPDU
720 * density for this node.
721 */
722 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530723 bpad = PADBYTES(al_delta) + (ndelim << 2);
724
725 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400726 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530727
Sujithd43f30152009-01-16 21:38:53 +0530728 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530729 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530730 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
731 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530732 if (bf_prev) {
733 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400734 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
735 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530736 }
737 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530738
Sujithe8324352009-01-16 21:38:42 +0530739 } while (!list_empty(&tid->buf_q));
740
741 bf_first->bf_al = al;
742 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530743
Sujithe8324352009-01-16 21:38:42 +0530744 return status;
745#undef PADBYTES
746}
747
748static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
749 struct ath_atx_tid *tid)
750{
Sujithd43f30152009-01-16 21:38:53 +0530751 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530752 enum ATH_AGGR_STATUS status;
753 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530754
755 do {
756 if (list_empty(&tid->buf_q))
757 return;
758
759 INIT_LIST_HEAD(&bf_q);
760
Sujithfec247c2009-07-27 12:08:16 +0530761 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530762
763 /*
Sujithd43f30152009-01-16 21:38:53 +0530764 * no frames picked up to be aggregated;
765 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530766 */
767 if (list_empty(&bf_q))
768 break;
769
770 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530771 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530772
Sujithd43f30152009-01-16 21:38:53 +0530773 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530774 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530775 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530776 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530777 ath_buf_set_rate(sc, bf);
778 ath_tx_txqaddbuf(sc, txq, &bf_q);
779 continue;
780 }
781
Sujithd43f30152009-01-16 21:38:53 +0530782 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530783 bf->bf_state.bf_type |= BUF_AGGR;
784 ath_buf_set_rate(sc, bf);
785 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
786
Sujithd43f30152009-01-16 21:38:53 +0530787 /* anchor last desc of aggregate */
788 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530789
Sujithe8324352009-01-16 21:38:42 +0530790 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530791 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530792
793 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
794 status != ATH_AGGR_BAW_CLOSED);
795}
796
Felix Fietkau231c3a12010-09-20 19:35:28 +0200797int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
798 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530799{
800 struct ath_atx_tid *txtid;
801 struct ath_node *an;
802
803 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530804 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200805
806 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
807 return -EAGAIN;
808
Sujithf83da962009-07-23 15:32:37 +0530809 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200810 txtid->paused = true;
Sujithf83da962009-07-23 15:32:37 +0530811 *ssn = txtid->seq_start;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200812
813 return 0;
Sujithe8324352009-01-16 21:38:42 +0530814}
815
Sujithf83da962009-07-23 15:32:37 +0530816void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530817{
818 struct ath_node *an = (struct ath_node *)sta->drv_priv;
819 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100820 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530821
822 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530823 return;
Sujithe8324352009-01-16 21:38:42 +0530824
825 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530826 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530827 return;
Sujithe8324352009-01-16 21:38:42 +0530828 }
829
Sujithe8324352009-01-16 21:38:42 +0530830 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200831 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200832
833 /*
834 * If frames are still being transmitted for this TID, they will be
835 * cleaned up during tx completion. To prevent race conditions, this
836 * TID can only be reused after all in-progress subframes have been
837 * completed.
838 */
839 if (txtid->baw_head != txtid->baw_tail)
840 txtid->state |= AGGR_CLEANUP;
841 else
842 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530843 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530844
Felix Fietkau90fa5392010-09-20 13:45:38 +0200845 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530846}
847
848void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
849{
850 struct ath_atx_tid *txtid;
851 struct ath_node *an;
852
853 an = (struct ath_node *)sta->drv_priv;
854
855 if (sc->sc_flags & SC_OP_TXAGGR) {
856 txtid = ATH_AN_2_TID(an, tid);
857 txtid->baw_size =
858 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
859 txtid->state |= AGGR_ADDBA_COMPLETE;
860 txtid->state &= ~AGGR_ADDBA_PROGRESS;
861 ath_tx_resume_tid(sc, txtid);
862 }
863}
864
Sujithe8324352009-01-16 21:38:42 +0530865/********************/
866/* Queue Management */
867/********************/
868
Sujithe8324352009-01-16 21:38:42 +0530869static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
870 struct ath_txq *txq)
871{
872 struct ath_atx_ac *ac, *ac_tmp;
873 struct ath_atx_tid *tid, *tid_tmp;
874
875 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
876 list_del(&ac->list);
877 ac->sched = false;
878 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
879 list_del(&tid->list);
880 tid->sched = false;
881 ath_tid_drain(sc, txq, tid);
882 }
883 }
884}
885
886struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
887{
Sujithcbe61d82009-02-09 13:27:12 +0530888 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700889 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530890 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100891 static const int subtype_txq_to_hwq[] = {
892 [WME_AC_BE] = ATH_TXQ_AC_BE,
893 [WME_AC_BK] = ATH_TXQ_AC_BK,
894 [WME_AC_VI] = ATH_TXQ_AC_VI,
895 [WME_AC_VO] = ATH_TXQ_AC_VO,
896 };
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400897 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530898
899 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100900 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530901 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
902 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
903 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
904 qi.tqi_physCompBuf = 0;
905
906 /*
907 * Enable interrupts only for EOL and DESC conditions.
908 * We mark tx descriptors to receive a DESC interrupt
909 * when a tx queue gets deep; otherwise waiting for the
910 * EOL to reap descriptors. Note that this is done to
911 * reduce interrupt load and this only defers reaping
912 * descriptors, never transmitting frames. Aside from
913 * reducing interrupts this also permits more concurrency.
914 * The only potential downside is if the tx queue backs
915 * up in which case the top half of the kernel may backup
916 * due to a lack of tx descriptors.
917 *
918 * The UAPSD queue is an exception, since we take a desc-
919 * based intr on the EOSP frames.
920 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400921 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
922 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
923 TXQ_FLAG_TXERRINT_ENABLE;
924 } else {
925 if (qtype == ATH9K_TX_QUEUE_UAPSD)
926 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
927 else
928 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
929 TXQ_FLAG_TXDESCINT_ENABLE;
930 }
Sujithe8324352009-01-16 21:38:42 +0530931 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
932 if (qnum == -1) {
933 /*
934 * NB: don't print a message, this happens
935 * normally on parts with too few tx queues
936 */
937 return NULL;
938 }
939 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700940 ath_print(common, ATH_DBG_FATAL,
941 "qnum %u out of range, max %u!\n",
942 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530943 ath9k_hw_releasetxqueue(ah, qnum);
944 return NULL;
945 }
946 if (!ATH_TXQ_SETUP(sc, qnum)) {
947 struct ath_txq *txq = &sc->tx.txq[qnum];
948
949 txq->axq_qnum = qnum;
950 txq->axq_link = NULL;
951 INIT_LIST_HEAD(&txq->axq_q);
952 INIT_LIST_HEAD(&txq->axq_acq);
953 spin_lock_init(&txq->axq_lock);
954 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400955 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530956 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400957
958 txq->txq_headidx = txq->txq_tailidx = 0;
959 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
960 INIT_LIST_HEAD(&txq->txq_fifo[i]);
961 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530962 }
963 return &sc->tx.txq[qnum];
964}
965
Sujithe8324352009-01-16 21:38:42 +0530966int ath_txq_update(struct ath_softc *sc, int qnum,
967 struct ath9k_tx_queue_info *qinfo)
968{
Sujithcbe61d82009-02-09 13:27:12 +0530969 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530970 int error = 0;
971 struct ath9k_tx_queue_info qi;
972
973 if (qnum == sc->beacon.beaconq) {
974 /*
975 * XXX: for beacon queue, we just save the parameter.
976 * It will be picked up by ath_beaconq_config when
977 * it's necessary.
978 */
979 sc->beacon.beacon_qi = *qinfo;
980 return 0;
981 }
982
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700983 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530984
985 ath9k_hw_get_txq_props(ah, qnum, &qi);
986 qi.tqi_aifs = qinfo->tqi_aifs;
987 qi.tqi_cwmin = qinfo->tqi_cwmin;
988 qi.tqi_cwmax = qinfo->tqi_cwmax;
989 qi.tqi_burstTime = qinfo->tqi_burstTime;
990 qi.tqi_readyTime = qinfo->tqi_readyTime;
991
992 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700993 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
994 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +0530995 error = -EIO;
996 } else {
997 ath9k_hw_resettxqueue(ah, qnum);
998 }
999
1000 return error;
1001}
1002
1003int ath_cabq_update(struct ath_softc *sc)
1004{
1005 struct ath9k_tx_queue_info qi;
1006 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301007
1008 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1009 /*
1010 * Ensure the readytime % is within the bounds.
1011 */
Sujith17d79042009-02-09 13:27:03 +05301012 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1013 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1014 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1015 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301016
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001017 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301018 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301019 ath_txq_update(sc, qnum, &qi);
1020
1021 return 0;
1022}
1023
Sujith043a0402009-01-16 21:38:47 +05301024/*
1025 * Drain a given TX queue (could be Beacon or Data)
1026 *
1027 * This assumes output has been stopped and
1028 * we do not need to block ath_tx_tasklet.
1029 */
1030void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301031{
1032 struct ath_buf *bf, *lastbf;
1033 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001034 struct ath_tx_status ts;
1035
1036 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301037 INIT_LIST_HEAD(&bf_head);
1038
Sujithe8324352009-01-16 21:38:42 +05301039 for (;;) {
1040 spin_lock_bh(&txq->axq_lock);
1041
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001042 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1043 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1044 txq->txq_headidx = txq->txq_tailidx = 0;
1045 spin_unlock_bh(&txq->axq_lock);
1046 break;
1047 } else {
1048 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1049 struct ath_buf, list);
1050 }
1051 } else {
1052 if (list_empty(&txq->axq_q)) {
1053 txq->axq_link = NULL;
1054 spin_unlock_bh(&txq->axq_lock);
1055 break;
1056 }
1057 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1058 list);
Sujithe8324352009-01-16 21:38:42 +05301059
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001060 if (bf->bf_stale) {
1061 list_del(&bf->list);
1062 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301063
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001064 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001065 continue;
1066 }
Sujithe8324352009-01-16 21:38:42 +05301067 }
1068
1069 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001070 if (!retry_tx)
1071 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301072
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001073 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1074 list_cut_position(&bf_head,
1075 &txq->txq_fifo[txq->txq_tailidx],
1076 &lastbf->list);
1077 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1078 } else {
1079 /* remove ath_buf's of the same mpdu from txq */
1080 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1081 }
1082
Sujithe8324352009-01-16 21:38:42 +05301083 txq->axq_depth--;
1084
1085 spin_unlock_bh(&txq->axq_lock);
1086
1087 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001088 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301089 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001090 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301091 }
1092
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001093 spin_lock_bh(&txq->axq_lock);
1094 txq->axq_tx_inprogress = false;
1095 spin_unlock_bh(&txq->axq_lock);
1096
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001097 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1098 spin_lock_bh(&txq->axq_lock);
1099 while (!list_empty(&txq->txq_fifo_pending)) {
1100 bf = list_first_entry(&txq->txq_fifo_pending,
1101 struct ath_buf, list);
1102 list_cut_position(&bf_head,
1103 &txq->txq_fifo_pending,
1104 &bf->bf_lastbf->list);
1105 spin_unlock_bh(&txq->axq_lock);
1106
1107 if (bf_isampdu(bf))
1108 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1109 &ts, 0);
1110 else
1111 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1112 &ts, 0, 0);
1113 spin_lock_bh(&txq->axq_lock);
1114 }
1115 spin_unlock_bh(&txq->axq_lock);
1116 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001117
1118 /* flush any pending frames if aggregation is enabled */
1119 if (sc->sc_flags & SC_OP_TXAGGR) {
1120 if (!retry_tx) {
1121 spin_lock_bh(&txq->axq_lock);
1122 ath_txq_drain_pending_buffers(sc, txq);
1123 spin_unlock_bh(&txq->axq_lock);
1124 }
1125 }
Sujithe8324352009-01-16 21:38:42 +05301126}
1127
Sujith043a0402009-01-16 21:38:47 +05301128void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1129{
Sujithcbe61d82009-02-09 13:27:12 +05301130 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001131 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301132 struct ath_txq *txq;
1133 int i, npend = 0;
1134
1135 if (sc->sc_flags & SC_OP_INVALID)
1136 return;
1137
1138 /* Stop beacon queue */
1139 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1140
1141 /* Stop data queues */
1142 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1143 if (ATH_TXQ_SETUP(sc, i)) {
1144 txq = &sc->tx.txq[i];
1145 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1146 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1147 }
1148 }
1149
1150 if (npend) {
1151 int r;
1152
Sujithe8009e92009-12-14 14:57:08 +05301153 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001154 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301155
Felix Fietkau20bd2a02010-07-31 00:12:00 +02001156 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
Sujith043a0402009-01-16 21:38:47 +05301157 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001158 ath_print(common, ATH_DBG_FATAL,
1159 "Unable to reset hardware; reset status %d\n",
1160 r);
Sujith043a0402009-01-16 21:38:47 +05301161 }
1162
1163 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1164 if (ATH_TXQ_SETUP(sc, i))
1165 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1166 }
1167}
1168
Sujithe8324352009-01-16 21:38:42 +05301169void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1170{
1171 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1172 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1173}
1174
Sujithe8324352009-01-16 21:38:42 +05301175void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1176{
1177 struct ath_atx_ac *ac;
1178 struct ath_atx_tid *tid;
1179
1180 if (list_empty(&txq->axq_acq))
1181 return;
1182
1183 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1184 list_del(&ac->list);
1185 ac->sched = false;
1186
1187 do {
1188 if (list_empty(&ac->tid_q))
1189 return;
1190
1191 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1192 list_del(&tid->list);
1193 tid->sched = false;
1194
1195 if (tid->paused)
1196 continue;
1197
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001198 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301199
1200 /*
1201 * add tid to round-robin queue if more frames
1202 * are pending for the tid
1203 */
1204 if (!list_empty(&tid->buf_q))
1205 ath_tx_queue_tid(txq, tid);
1206
1207 break;
1208 } while (!list_empty(&ac->tid_q));
1209
1210 if (!list_empty(&ac->tid_q)) {
1211 if (!ac->sched) {
1212 ac->sched = true;
1213 list_add_tail(&ac->list, &txq->axq_acq);
1214 }
1215 }
1216}
1217
Sujithe8324352009-01-16 21:38:42 +05301218/***********/
1219/* TX, DMA */
1220/***********/
1221
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001222/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001223 * Insert a chain of ath_buf (descriptors) on a txq and
1224 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001225 */
Sujith102e0572008-10-29 10:15:16 +05301226static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1227 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001228{
Sujithcbe61d82009-02-09 13:27:12 +05301229 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001230 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001231 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301232
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001233 /*
1234 * Insert the frame on the outbound list and
1235 * pass it on to the hardware.
1236 */
1237
1238 if (list_empty(head))
1239 return;
1240
1241 bf = list_first_entry(head, struct ath_buf, list);
1242
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001243 ath_print(common, ATH_DBG_QUEUE,
1244 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001245
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001246 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1247 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1248 list_splice_tail_init(head, &txq->txq_fifo_pending);
1249 return;
1250 }
1251 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1252 ath_print(common, ATH_DBG_XMIT,
1253 "Initializing tx fifo %d which "
1254 "is non-empty\n",
1255 txq->txq_headidx);
1256 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1257 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1258 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001259 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001260 ath_print(common, ATH_DBG_XMIT,
1261 "TXDP[%u] = %llx (%p)\n",
1262 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001263 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001264 list_splice_tail_init(head, &txq->axq_q);
1265
1266 if (txq->axq_link == NULL) {
1267 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1268 ath_print(common, ATH_DBG_XMIT,
1269 "TXDP[%u] = %llx (%p)\n",
1270 txq->axq_qnum, ito64(bf->bf_daddr),
1271 bf->bf_desc);
1272 } else {
1273 *txq->axq_link = bf->bf_daddr;
1274 ath_print(common, ATH_DBG_XMIT,
1275 "link[%u] (%p)=%llx (%p)\n",
1276 txq->axq_qnum, txq->axq_link,
1277 ito64(bf->bf_daddr), bf->bf_desc);
1278 }
1279 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1280 &txq->axq_link);
1281 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001282 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001283 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001284}
1285
Sujithe8324352009-01-16 21:38:42 +05301286static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1287 struct list_head *bf_head,
1288 struct ath_tx_control *txctl)
1289{
1290 struct ath_buf *bf;
1291
Sujithe8324352009-01-16 21:38:42 +05301292 bf = list_first_entry(bf_head, struct ath_buf, list);
1293 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301294 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301295
1296 /*
1297 * Do not queue to h/w when any of the following conditions is true:
1298 * - there are pending frames in software queue
1299 * - the TID is currently paused for ADDBA/BAR request
1300 * - seqno is not within block-ack window
1301 * - h/w queue depth exceeds low water mark
1302 */
1303 if (!list_empty(&tid->buf_q) || tid->paused ||
1304 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1305 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001306 /*
Sujithe8324352009-01-16 21:38:42 +05301307 * Add this frame to software queue for scheduling later
1308 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001309 */
Sujithd43f30152009-01-16 21:38:53 +05301310 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301311 ath_tx_queue_tid(txctl->txq, tid);
1312 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001313 }
1314
Sujithe8324352009-01-16 21:38:42 +05301315 /* Add sub-frame to BAW */
1316 ath_tx_addto_baw(sc, tid, bf);
1317
1318 /* Queue to h/w without aggregation */
1319 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301320 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301321 ath_buf_set_rate(sc, bf);
1322 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301323}
1324
Sujithc37452b2009-03-09 09:31:57 +05301325static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1326 struct ath_atx_tid *tid,
1327 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001328{
Sujithe8324352009-01-16 21:38:42 +05301329 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001330
Sujithe8324352009-01-16 21:38:42 +05301331 bf = list_first_entry(bf_head, struct ath_buf, list);
1332 bf->bf_state.bf_type &= ~BUF_AMPDU;
1333
1334 /* update starting sequence number for subsequent ADDBA request */
1335 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1336
1337 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301338 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301339 ath_buf_set_rate(sc, bf);
1340 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301341 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001342}
1343
Sujithc37452b2009-03-09 09:31:57 +05301344static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1345 struct list_head *bf_head)
1346{
1347 struct ath_buf *bf;
1348
1349 bf = list_first_entry(bf_head, struct ath_buf, list);
1350
1351 bf->bf_lastbf = bf;
1352 bf->bf_nframes = 1;
1353 ath_buf_set_rate(sc, bf);
1354 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301355 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301356}
1357
Sujith528f0c62008-10-29 10:14:26 +05301358static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001359{
Sujith528f0c62008-10-29 10:14:26 +05301360 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001361 enum ath9k_pkt_type htype;
1362 __le16 fc;
1363
Sujith528f0c62008-10-29 10:14:26 +05301364 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001365 fc = hdr->frame_control;
1366
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001367 if (ieee80211_is_beacon(fc))
1368 htype = ATH9K_PKT_TYPE_BEACON;
1369 else if (ieee80211_is_probe_resp(fc))
1370 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1371 else if (ieee80211_is_atim(fc))
1372 htype = ATH9K_PKT_TYPE_ATIM;
1373 else if (ieee80211_is_pspoll(fc))
1374 htype = ATH9K_PKT_TYPE_PSPOLL;
1375 else
1376 htype = ATH9K_PKT_TYPE_NORMAL;
1377
1378 return htype;
1379}
1380
Sujith528f0c62008-10-29 10:14:26 +05301381static void assign_aggr_tid_seqno(struct sk_buff *skb,
1382 struct ath_buf *bf)
1383{
1384 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1385 struct ieee80211_hdr *hdr;
1386 struct ath_node *an;
1387 struct ath_atx_tid *tid;
1388 __le16 fc;
1389 u8 *qc;
1390
1391 if (!tx_info->control.sta)
1392 return;
1393
1394 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1395 hdr = (struct ieee80211_hdr *)skb->data;
1396 fc = hdr->frame_control;
1397
Sujith528f0c62008-10-29 10:14:26 +05301398 if (ieee80211_is_data_qos(fc)) {
1399 qc = ieee80211_get_qos_ctl(hdr);
1400 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301401 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001402
Sujithe8324352009-01-16 21:38:42 +05301403 /*
1404 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301405 * We also override seqno set by upper layer with the one
1406 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301407 */
1408 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301409 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301410 bf->bf_seqno = tid->seq_next;
1411 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301412}
1413
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001414static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301415{
1416 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1417 int flags = 0;
1418
1419 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1420 flags |= ATH9K_TXDESC_INTREQ;
1421
1422 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1423 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301424
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001425 if (use_ldpc)
1426 flags |= ATH9K_TXDESC_LDPC;
1427
Sujith528f0c62008-10-29 10:14:26 +05301428 return flags;
1429}
1430
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001431/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001432 * rix - rate index
1433 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1434 * width - 0 for 20 MHz, 1 for 40 MHz
1435 * half_gi - to use 4us v/s 3.6 us for symbol time
1436 */
Sujith102e0572008-10-29 10:15:16 +05301437static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1438 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001439{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001440 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001441 int streams, pktlen;
1442
Sujithcd3d39a2008-08-11 14:03:34 +05301443 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301444
1445 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001446 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001447 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001448 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001449 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1450
1451 if (!half_gi)
1452 duration = SYMBOL_TIME(nsymbols);
1453 else
1454 duration = SYMBOL_TIME_HALFGI(nsymbols);
1455
Sujithe63835b2008-11-18 09:07:53 +05301456 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001457 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301458
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001459 return duration;
1460}
1461
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001462static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1463{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001464 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001465 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301466 struct sk_buff *skb;
1467 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301468 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001469 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301470 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301471 int i, flags = 0;
1472 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301473 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301474
1475 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301476
Sujitha22be222009-03-30 15:28:36 +05301477 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301478 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301479 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301480 hdr = (struct ieee80211_hdr *)skb->data;
1481 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301482
Sujithc89424d2009-01-30 14:29:28 +05301483 /*
1484 * We check if Short Preamble is needed for the CTS rate by
1485 * checking the BSS's global flag.
1486 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1487 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001488 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1489 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301490 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001491 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001492
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001493 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001494 bool is_40, is_sgi, is_sp;
1495 int phy;
1496
Sujithe63835b2008-11-18 09:07:53 +05301497 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001498 continue;
1499
Sujitha8efee42008-11-18 09:07:30 +05301500 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301501 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001502 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001503
Felix Fietkau27032052010-01-17 21:08:50 +01001504 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1505 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301506 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001507 flags |= ATH9K_TXDESC_RTSENA;
1508 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1509 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1510 flags |= ATH9K_TXDESC_CTSENA;
1511 }
1512
Sujithc89424d2009-01-30 14:29:28 +05301513 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1514 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1515 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1516 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001517
Felix Fietkau545750d2009-11-23 22:21:01 +01001518 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1519 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1520 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1521
1522 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1523 /* MCS rates */
1524 series[i].Rate = rix | 0x80;
1525 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1526 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001527 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1528 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001529 continue;
1530 }
1531
1532 /* legcay rates */
1533 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1534 !(rate->flags & IEEE80211_RATE_ERP_G))
1535 phy = WLAN_RC_PHY_CCK;
1536 else
1537 phy = WLAN_RC_PHY_OFDM;
1538
1539 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1540 series[i].Rate = rate->hw_value;
1541 if (rate->hw_value_short) {
1542 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1543 series[i].Rate |= rate->hw_value_short;
1544 } else {
1545 is_sp = false;
1546 }
1547
1548 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1549 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001550 }
1551
Felix Fietkau27032052010-01-17 21:08:50 +01001552 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1553 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1554 flags &= ~ATH9K_TXDESC_RTSENA;
1555
1556 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1557 if (flags & ATH9K_TXDESC_RTSENA)
1558 flags &= ~ATH9K_TXDESC_CTSENA;
1559
Sujithe63835b2008-11-18 09:07:53 +05301560 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301561 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1562 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301563 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301564 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301565
Sujith17d79042009-02-09 13:27:03 +05301566 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301567 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001568}
1569
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001570static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301571 struct sk_buff *skb,
1572 struct ath_tx_control *txctl)
1573{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001574 struct ath_wiphy *aphy = hw->priv;
1575 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301576 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1577 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301578 int hdrlen;
1579 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001580 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001581 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301582
Felix Fietkau827e69b2009-11-15 23:09:25 +01001583 tx_info->pad[0] = 0;
1584 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001585 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001586 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001587 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001588 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1589 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001590 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001591 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1592 break;
1593 }
Sujithe8324352009-01-16 21:38:42 +05301594 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1595 fc = hdr->frame_control;
1596
1597 ATH_TXBUF_RESET(bf);
1598
Felix Fietkau827e69b2009-11-15 23:09:25 +01001599 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001600 bf->bf_frmlen = skb->len + FCS_LEN;
1601 /* Remove the padding size from bf_frmlen, if any */
1602 padpos = ath9k_cmn_padpos(hdr->frame_control);
1603 padsize = padpos & 3;
1604 if (padsize && skb->len>padpos+padsize) {
1605 bf->bf_frmlen -= padsize;
1606 }
Sujithe8324352009-01-16 21:38:42 +05301607
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001608 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301609 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001610 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1611 use_ldpc = true;
1612 }
Sujithe8324352009-01-16 21:38:42 +05301613
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001614 bf->bf_state.bfs_paprd = txctl->paprd;
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001615 if (txctl->paprd)
1616 bf->bf_state.bfs_paprd_timestamp = jiffies;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001617 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301618
Luis R. Rodriguezc17512d2010-08-05 17:56:54 -04001619 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301620 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1621 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1622 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1623 } else {
1624 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1625 }
1626
Sujith17b182e2009-12-14 14:56:56 +05301627 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1628 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301629 assign_aggr_tid_seqno(skb, bf);
1630
1631 bf->bf_mpdu = skb;
1632
Ben Greearc1739eb32010-10-14 12:45:29 -07001633 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1634 skb->len, DMA_TO_DEVICE);
1635 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301636 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001637 bf->bf_buf_addr = 0;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001638 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1639 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301640 return -ENOMEM;
1641 }
1642
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001643 bf->bf_tx_aborted = false;
1644
Sujithe8324352009-01-16 21:38:42 +05301645 return 0;
1646}
1647
1648/* FIXME: tx power */
1649static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1650 struct ath_tx_control *txctl)
1651{
Sujitha22be222009-03-30 15:28:36 +05301652 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301653 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301654 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301655 struct ath_node *an = NULL;
1656 struct list_head bf_head;
1657 struct ath_desc *ds;
1658 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301659 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301660 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301661 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301662
1663 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301664 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301665
1666 INIT_LIST_HEAD(&bf_head);
1667 list_add_tail(&bf->list, &bf_head);
1668
1669 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001670 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301671
1672 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1673 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1674
1675 ath9k_hw_filltxdesc(ah, ds,
1676 skb->len, /* segment length */
1677 true, /* first segment */
1678 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001679 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001680 bf->bf_buf_addr,
1681 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301682
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001683 if (bf->bf_state.bfs_paprd)
1684 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1685
Sujithe8324352009-01-16 21:38:42 +05301686 spin_lock_bh(&txctl->txq->axq_lock);
1687
1688 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1689 tx_info->control.sta) {
1690 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1691 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1692
Sujithc37452b2009-03-09 09:31:57 +05301693 if (!ieee80211_is_data_qos(fc)) {
1694 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1695 goto tx_done;
1696 }
1697
Felix Fietkau066dae92010-11-07 14:59:39 +01001698 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau4fdec032010-03-12 04:02:43 +01001699 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301700 /*
1701 * Try aggregation if it's a unicast data frame
1702 * and the destination is HT capable.
1703 */
1704 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1705 } else {
1706 /*
1707 * Send this frame as regular when ADDBA
1708 * exchange is neither complete nor pending.
1709 */
Sujithc37452b2009-03-09 09:31:57 +05301710 ath_tx_send_ht_normal(sc, txctl->txq,
1711 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301712 }
1713 } else {
Sujithc37452b2009-03-09 09:31:57 +05301714 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301715 }
1716
Sujithc37452b2009-03-09 09:31:57 +05301717tx_done:
Sujithe8324352009-01-16 21:38:42 +05301718 spin_unlock_bh(&txctl->txq->axq_lock);
1719}
1720
1721/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001722int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301723 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001724{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001725 struct ath_wiphy *aphy = hw->priv;
1726 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001727 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001728 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001729 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001730 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001731
Sujithe8324352009-01-16 21:38:42 +05301732 bf = ath_tx_get_buffer(sc);
1733 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001734 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301735 return -1;
1736 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001737
Felix Fietkau066dae92010-11-07 14:59:39 +01001738 q = skb_get_queue_mapping(skb);
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001739 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301740 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001741 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001742
Sujithe8324352009-01-16 21:38:42 +05301743 /* upon ath_tx_processq() this TX queue will be resumed, we
1744 * guarantee this will happen by knowing beforehand that
1745 * we will at least have to run TX completionon one buffer
1746 * on the queue */
1747 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001748 if (txq == sc->tx.txq_map[q] && !txq->stopped &&
1749 txq->axq_depth > 1) {
1750 ath_mac80211_stop_queue(sc, q);
Sujithe8324352009-01-16 21:38:42 +05301751 txq->stopped = 1;
1752 }
1753 spin_unlock_bh(&txq->axq_lock);
1754
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001755 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301756
1757 return r;
1758 }
1759
Felix Fietkau97923b12010-06-12 00:33:55 -04001760 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001761 if (txq == sc->tx.txq_map[q] &&
1762 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1763 ath_mac80211_stop_queue(sc, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001764 txq->stopped = 1;
1765 }
1766 spin_unlock_bh(&txq->axq_lock);
1767
Sujithe8324352009-01-16 21:38:42 +05301768 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001769
1770 return 0;
1771}
1772
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001773void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001774{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001775 struct ath_wiphy *aphy = hw->priv;
1776 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001777 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001778 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1779 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301780 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1781 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001782
Sujithe8324352009-01-16 21:38:42 +05301783 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001784
Sujithe8324352009-01-16 21:38:42 +05301785 /*
1786 * As a temporary workaround, assign seq# here; this will likely need
1787 * to be cleaned up to work better with Beacon transmission and virtual
1788 * BSSes.
1789 */
1790 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301791 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1792 sc->tx.seq_no += 0x10;
1793 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1794 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001795 }
1796
Sujithe8324352009-01-16 21:38:42 +05301797 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001798 padpos = ath9k_cmn_padpos(hdr->frame_control);
1799 padsize = padpos & 3;
1800 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301801 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001802 ath_print(common, ATH_DBG_XMIT,
1803 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301804 dev_kfree_skb_any(skb);
1805 return;
1806 }
1807 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001808 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001809 }
1810
Sujithe8324352009-01-16 21:38:42 +05301811 txctl.txq = sc->beacon.cabq;
1812
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001813 ath_print(common, ATH_DBG_XMIT,
1814 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301815
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001816 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001817 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301818 goto exit;
1819 }
1820
1821 return;
1822exit:
1823 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001824}
1825
Sujithe8324352009-01-16 21:38:42 +05301826/*****************/
1827/* TX Completion */
1828/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001829
Sujithe8324352009-01-16 21:38:42 +05301830static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau066dae92010-11-07 14:59:39 +01001831 struct ath_wiphy *aphy, int tx_flags,
1832 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001833{
Sujithe8324352009-01-16 21:38:42 +05301834 struct ieee80211_hw *hw = sc->hw;
1835 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001836 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001837 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001838 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301839
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001840 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301841
Felix Fietkau827e69b2009-11-15 23:09:25 +01001842 if (aphy)
1843 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301844
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301845 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301846 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301847
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301848 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301849 /* Frame was ACKed */
1850 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1851 }
1852
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001853 padpos = ath9k_cmn_padpos(hdr->frame_control);
1854 padsize = padpos & 3;
1855 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301856 /*
1857 * Remove MAC header padding before giving the frame back to
1858 * mac80211.
1859 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001860 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301861 skb_pull(skb, padsize);
1862 }
1863
Sujith1b04b932010-01-08 10:36:05 +05301864 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1865 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001866 ath_print(common, ATH_DBG_PS,
1867 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001868 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301869 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1870 PS_WAIT_FOR_CAB |
1871 PS_WAIT_FOR_PSPOLL_DATA |
1872 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001873 }
1874
Felix Fietkau827e69b2009-11-15 23:09:25 +01001875 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c2009-03-03 19:23:31 +02001876 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001877 else {
1878 q = skb_get_queue_mapping(skb);
Felix Fietkau066dae92010-11-07 14:59:39 +01001879 if (txq == sc->tx.txq_map[q]) {
1880 spin_lock_bh(&txq->axq_lock);
1881 if (WARN_ON(--txq->pending_frames < 0))
1882 txq->pending_frames = 0;
1883 spin_unlock_bh(&txq->axq_lock);
1884 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001885
Felix Fietkau827e69b2009-11-15 23:09:25 +01001886 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001887 }
Sujithe8324352009-01-16 21:38:42 +05301888}
1889
1890static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001891 struct ath_txq *txq, struct list_head *bf_q,
1892 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301893{
1894 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301895 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301896 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301897
Sujithe8324352009-01-16 21:38:42 +05301898 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301899 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301900
1901 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301902 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301903
1904 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301905 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301906 }
1907
Ben Greearc1739eb32010-10-14 12:45:29 -07001908 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001909 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001910
1911 if (bf->bf_state.bfs_paprd) {
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001912 if (time_after(jiffies,
1913 bf->bf_state.bfs_paprd_timestamp +
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001914 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001915 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001916 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001917 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001918 } else {
Felix Fietkau066dae92010-11-07 14:59:39 +01001919 ath_debug_stat_tx(sc, bf, ts);
1920 ath_tx_complete(sc, skb, bf->aphy, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001921 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001922 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1923 * accidentally reference it later.
1924 */
1925 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301926
1927 /*
1928 * Return the list of ath_buf of this mpdu to free queue
1929 */
1930 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1931 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1932 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1933}
1934
1935static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001936 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301937{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001938 u16 seq_st = 0;
1939 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301940 int ba_index;
1941 int nbad = 0;
1942 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001943
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001944 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301945 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301946
Sujithcd3d39a2008-08-11 14:03:34 +05301947 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001948 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001949 seq_st = ts->ts_seqnum;
1950 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001951 }
1952
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001953 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301954 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1955 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1956 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001957
Sujithe8324352009-01-16 21:38:42 +05301958 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001959 }
1960
Sujithe8324352009-01-16 21:38:42 +05301961 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001962}
1963
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001964static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301965 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301966{
Sujitha22be222009-03-30 15:28:36 +05301967 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301968 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301969 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001970 struct ieee80211_hw *hw = bf->aphy->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001971 struct ath_softc *sc = bf->aphy->sc;
1972 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301973 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301974
Sujith95e4acb2009-03-13 08:56:09 +05301975 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001976 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301977
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001978 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301979 WARN_ON(tx_rateindex >= hw->max_rates);
1980
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001981 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301982 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001983 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001984 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301985
Björn Smedmanebd02282010-10-10 22:44:39 +02001986 BUG_ON(nbad > bf->bf_nframes);
1987
1988 tx_info->status.ampdu_len = bf->bf_nframes;
1989 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
1990 }
1991
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001992 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301993 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001994 /*
1995 * If an underrun error is seen assume it as an excessive
1996 * retry only if max frame trigger level has been reached
1997 * (2 KB for single stream, and 4 KB for dual stream).
1998 * Adjust the long retry as if the frame was tried
1999 * hw->max_rate_tries times to affect how rate control updates
2000 * PER for the failed rate.
2001 * In case of congestion on the bus penalizing this type of
2002 * underruns should help hardware actually transmit new frames
2003 * successfully by eventually preferring slower rates.
2004 * This itself should also alleviate congestion on the bus.
2005 */
2006 if (ieee80211_is_data(hdr->frame_control) &&
2007 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2008 ATH9K_TX_DELIM_UNDERRUN)) &&
2009 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
2010 tx_info->status.rates[tx_rateindex].count =
2011 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302012 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302013
Felix Fietkau545750d2009-11-23 22:21:01 +01002014 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302015 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002016 tx_info->status.rates[i].idx = -1;
2017 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302018
Felix Fietkau78c46532010-06-25 01:26:16 +02002019 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302020}
2021
Felix Fietkau066dae92010-11-07 14:59:39 +01002022static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
Sujith059d8062009-01-16 21:38:49 +05302023{
Felix Fietkau066dae92010-11-07 14:59:39 +01002024 struct ath_txq *txq;
Sujith059d8062009-01-16 21:38:49 +05302025
Felix Fietkau066dae92010-11-07 14:59:39 +01002026 txq = sc->tx.txq_map[qnum];
Sujith059d8062009-01-16 21:38:49 +05302027 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01002028 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
Vasanthakumar Thiagarajan68e8f2f2010-07-22 02:24:11 -07002029 if (ath_mac80211_start_queue(sc, qnum))
2030 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302031 }
2032 spin_unlock_bh(&txq->axq_lock);
2033}
2034
Sujithc4288392008-11-18 09:09:30 +05302035static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002036{
Sujithcbe61d82009-02-09 13:27:12 +05302037 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002038 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002039 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2040 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302041 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002042 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302043 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002044 int status;
Felix Fietkau066dae92010-11-07 14:59:39 +01002045 int qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002046
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002047 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2048 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2049 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002050
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002051 for (;;) {
2052 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002053 if (list_empty(&txq->axq_q)) {
2054 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002055 spin_unlock_bh(&txq->axq_lock);
2056 break;
2057 }
2058 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2059
2060 /*
2061 * There is a race condition that a BH gets scheduled
2062 * after sw writes TxE and before hw re-load the last
2063 * descriptor to get the newly chained one.
2064 * Software must keep the last DONE descriptor as a
2065 * holding descriptor - software does so by marking
2066 * it with the STALE flag.
2067 */
2068 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302069 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002070 bf_held = bf;
2071 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302072 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002073 break;
2074 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002075 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302076 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002077 }
2078 }
2079
2080 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302081 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002082
Felix Fietkau29bffa92010-03-29 20:14:23 -07002083 memset(&ts, 0, sizeof(ts));
2084 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002085 if (status == -EINPROGRESS) {
2086 spin_unlock_bh(&txq->axq_lock);
2087 break;
2088 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089
2090 /*
2091 * Remove ath_buf's of the same transmit unit from txq,
2092 * however leave the last descriptor back as the holding
2093 * descriptor for hw.
2094 */
Sujitha119cc42009-03-30 15:28:38 +05302095 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002096 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002097 if (!list_is_singular(&lastbf->list))
2098 list_cut_position(&bf_head,
2099 &txq->axq_q, lastbf->list.prev);
2100
2101 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002102 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002103 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002104 if (bf_held)
2105 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002106 spin_unlock_bh(&txq->axq_lock);
2107
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002108 if (bf_held)
2109 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002110
Sujithcd3d39a2008-08-11 14:03:34 +05302111 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112 /*
2113 * This frame is sent out as a single frame.
2114 * Use hardware retry status for this frame.
2115 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002116 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302117 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002118 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002119 }
Johannes Berge6a98542008-10-21 12:40:02 +02002120
Felix Fietkau066dae92010-11-07 14:59:39 +01002121 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2122
Sujithcd3d39a2008-08-11 14:03:34 +05302123 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002124 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002125 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002126 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127
Felix Fietkau066dae92010-11-07 14:59:39 +01002128 if (txq == sc->tx.txq_map[qnum])
2129 ath_wake_mac80211_queue(sc, qnum);
Sujith059d8062009-01-16 21:38:49 +05302130
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302132 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002133 ath_txq_schedule(sc, txq);
2134 spin_unlock_bh(&txq->axq_lock);
2135 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002136}
2137
Sujith305fe472009-07-23 15:32:29 +05302138static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002139{
2140 struct ath_softc *sc = container_of(work, struct ath_softc,
2141 tx_complete_work.work);
2142 struct ath_txq *txq;
2143 int i;
2144 bool needreset = false;
2145
2146 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2147 if (ATH_TXQ_SETUP(sc, i)) {
2148 txq = &sc->tx.txq[i];
2149 spin_lock_bh(&txq->axq_lock);
2150 if (txq->axq_depth) {
2151 if (txq->axq_tx_inprogress) {
2152 needreset = true;
2153 spin_unlock_bh(&txq->axq_lock);
2154 break;
2155 } else {
2156 txq->axq_tx_inprogress = true;
2157 }
2158 }
2159 spin_unlock_bh(&txq->axq_lock);
2160 }
2161
2162 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002163 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2164 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302165 ath9k_ps_wakeup(sc);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002166 ath_reset(sc, true);
Sujith332c5562009-10-09 09:51:28 +05302167 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002168 }
2169
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002170 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002171 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2172}
2173
2174
Sujithe8324352009-01-16 21:38:42 +05302175
2176void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177{
Sujithe8324352009-01-16 21:38:42 +05302178 int i;
2179 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002180
Sujithe8324352009-01-16 21:38:42 +05302181 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002182
2183 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302184 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2185 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002186 }
2187}
2188
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002189void ath_tx_edma_tasklet(struct ath_softc *sc)
2190{
2191 struct ath_tx_status txs;
2192 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2193 struct ath_hw *ah = sc->sc_ah;
2194 struct ath_txq *txq;
2195 struct ath_buf *bf, *lastbf;
2196 struct list_head bf_head;
2197 int status;
2198 int txok;
Felix Fietkau066dae92010-11-07 14:59:39 +01002199 int qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002200
2201 for (;;) {
2202 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2203 if (status == -EINPROGRESS)
2204 break;
2205 if (status == -EIO) {
2206 ath_print(common, ATH_DBG_XMIT,
2207 "Error processing tx status\n");
2208 break;
2209 }
2210
2211 /* Skip beacon completions */
2212 if (txs.qid == sc->beacon.beaconq)
2213 continue;
2214
2215 txq = &sc->tx.txq[txs.qid];
2216
2217 spin_lock_bh(&txq->axq_lock);
2218 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2219 spin_unlock_bh(&txq->axq_lock);
2220 return;
2221 }
2222
2223 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2224 struct ath_buf, list);
2225 lastbf = bf->bf_lastbf;
2226
2227 INIT_LIST_HEAD(&bf_head);
2228 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2229 &lastbf->list);
2230 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2231 txq->axq_depth--;
2232 txq->axq_tx_inprogress = false;
2233 spin_unlock_bh(&txq->axq_lock);
2234
2235 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2236
2237 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002238 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2239 bf->bf_state.bf_type |= BUF_XRETRY;
Björn Smedmanebd02282010-10-10 22:44:39 +02002240 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002241 }
2242
Felix Fietkau066dae92010-11-07 14:59:39 +01002243 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2244
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002245 if (bf_isampdu(bf))
2246 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2247 else
2248 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2249 &txs, txok, 0);
2250
Felix Fietkau066dae92010-11-07 14:59:39 +01002251 if (txq == sc->tx.txq_map[qnum])
2252 ath_wake_mac80211_queue(sc, qnum);
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002253
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002254 spin_lock_bh(&txq->axq_lock);
2255 if (!list_empty(&txq->txq_fifo_pending)) {
2256 INIT_LIST_HEAD(&bf_head);
2257 bf = list_first_entry(&txq->txq_fifo_pending,
2258 struct ath_buf, list);
2259 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2260 &bf->bf_lastbf->list);
2261 ath_tx_txqaddbuf(sc, txq, &bf_head);
2262 } else if (sc->sc_flags & SC_OP_TXAGGR)
2263 ath_txq_schedule(sc, txq);
2264 spin_unlock_bh(&txq->axq_lock);
2265 }
2266}
2267
Sujithe8324352009-01-16 21:38:42 +05302268/*****************/
2269/* Init, Cleanup */
2270/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002271
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002272static int ath_txstatus_setup(struct ath_softc *sc, int size)
2273{
2274 struct ath_descdma *dd = &sc->txsdma;
2275 u8 txs_len = sc->sc_ah->caps.txs_len;
2276
2277 dd->dd_desc_len = size * txs_len;
2278 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2279 &dd->dd_desc_paddr, GFP_KERNEL);
2280 if (!dd->dd_desc)
2281 return -ENOMEM;
2282
2283 return 0;
2284}
2285
2286static int ath_tx_edma_init(struct ath_softc *sc)
2287{
2288 int err;
2289
2290 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2291 if (!err)
2292 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2293 sc->txsdma.dd_desc_paddr,
2294 ATH_TXSTATUS_RING_SIZE);
2295
2296 return err;
2297}
2298
2299static void ath_tx_edma_cleanup(struct ath_softc *sc)
2300{
2301 struct ath_descdma *dd = &sc->txsdma;
2302
2303 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2304 dd->dd_desc_paddr);
2305}
2306
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002307int ath_tx_init(struct ath_softc *sc, int nbufs)
2308{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002309 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002310 int error = 0;
2311
Sujith797fe5cb2009-03-30 15:28:45 +05302312 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002313
Sujith797fe5cb2009-03-30 15:28:45 +05302314 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002315 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302316 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002317 ath_print(common, ATH_DBG_FATAL,
2318 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302319 goto err;
2320 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002321
Sujith797fe5cb2009-03-30 15:28:45 +05302322 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002323 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302324 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002325 ath_print(common, ATH_DBG_FATAL,
2326 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302327 goto err;
2328 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002329
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002330 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2331
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002332 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2333 error = ath_tx_edma_init(sc);
2334 if (error)
2335 goto err;
2336 }
2337
Sujith797fe5cb2009-03-30 15:28:45 +05302338err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002339 if (error != 0)
2340 ath_tx_cleanup(sc);
2341
2342 return error;
2343}
2344
Sujith797fe5cb2009-03-30 15:28:45 +05302345void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002346{
Sujithb77f4832008-12-07 21:44:03 +05302347 if (sc->beacon.bdma.dd_desc_len != 0)
2348 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349
Sujithb77f4832008-12-07 21:44:03 +05302350 if (sc->tx.txdma.dd_desc_len != 0)
2351 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002352
2353 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2354 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002355}
2356
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002357void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2358{
Sujithc5170162008-10-29 10:13:59 +05302359 struct ath_atx_tid *tid;
2360 struct ath_atx_ac *ac;
2361 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002362
Sujith8ee5afb2008-12-07 21:43:36 +05302363 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302364 tidno < WME_NUM_TID;
2365 tidno++, tid++) {
2366 tid->an = an;
2367 tid->tidno = tidno;
2368 tid->seq_start = tid->seq_next = 0;
2369 tid->baw_size = WME_MAX_BA;
2370 tid->baw_head = tid->baw_tail = 0;
2371 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302372 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302373 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302374 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302375 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302376 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302377 tid->state &= ~AGGR_ADDBA_COMPLETE;
2378 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302379 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380
Sujith8ee5afb2008-12-07 21:43:36 +05302381 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302382 acno < WME_NUM_AC; acno++, ac++) {
2383 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002384 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302385 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002386 }
2387}
2388
Sujithb5aa9bf2008-10-29 10:13:31 +05302389void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002390{
Felix Fietkau2b409942010-07-07 19:42:08 +02002391 struct ath_atx_ac *ac;
2392 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002394 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302395
Felix Fietkau2b409942010-07-07 19:42:08 +02002396 for (tidno = 0, tid = &an->tid[tidno];
2397 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002398
Felix Fietkau2b409942010-07-07 19:42:08 +02002399 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002400 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401
Felix Fietkau2b409942010-07-07 19:42:08 +02002402 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002403
Felix Fietkau2b409942010-07-07 19:42:08 +02002404 if (tid->sched) {
2405 list_del(&tid->list);
2406 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002408
2409 if (ac->sched) {
2410 list_del(&ac->list);
2411 tid->ac->sched = false;
2412 }
2413
2414 ath_tid_drain(sc, txq, tid);
2415 tid->state &= ~AGGR_ADDBA_COMPLETE;
2416 tid->state &= ~AGGR_CLEANUP;
2417
2418 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002419 }
2420}