blob: f777ddcd11726920a4492e5ce1d190e143e27ca4 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +010056 struct ath_tx_status *ts, int txok);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
Felix Fietkau81357a22012-05-24 14:32:20 +020067 struct sk_buff *skb,
68 bool dequeue);
Sujithe8324352009-01-16 21:38:42 +053069
Felix Fietkau545750d2009-11-23 22:21:01 +010070enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071 MCS_HT20,
72 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010073 MCS_HT40,
74 MCS_HT40_SGI,
75};
76
Felix Fietkau0e668cd2010-04-19 19:57:32 +020077static int ath_max_4ms_framelen[4][32] = {
78 [MCS_HT20] = {
79 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
80 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
81 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
82 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
83 },
84 [MCS_HT20_SGI] = {
85 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
86 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
87 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
88 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
92 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
93 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
94 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 },
96 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020097 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
98 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
99 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
100 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100101 }
102};
103
Sujithe8324352009-01-16 21:38:42 +0530104/*********************/
105/* Aggregation logic */
106/*********************/
107
Sujith Manoharanef1b6cd2012-06-04 20:23:37 +0530108void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguez1512a482011-12-20 10:46:09 -0800109 __acquires(&txq->axq_lock)
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100110{
111 spin_lock_bh(&txq->axq_lock);
112}
113
Sujith Manoharanef1b6cd2012-06-04 20:23:37 +0530114void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguez1512a482011-12-20 10:46:09 -0800115 __releases(&txq->axq_lock)
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100116{
117 spin_unlock_bh(&txq->axq_lock);
118}
119
Sujith Manoharanef1b6cd2012-06-04 20:23:37 +0530120void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguez1512a482011-12-20 10:46:09 -0800121 __releases(&txq->axq_lock)
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100122{
123 struct sk_buff_head q;
124 struct sk_buff *skb;
125
126 __skb_queue_head_init(&q);
127 skb_queue_splice_init(&txq->complete_q, &q);
128 spin_unlock_bh(&txq->axq_lock);
129
130 while ((skb = __skb_dequeue(&q)))
131 ieee80211_tx_status(sc->hw, skb);
132}
133
Sujithe8324352009-01-16 21:38:42 +0530134static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
135{
136 struct ath_atx_ac *ac = tid->ac;
137
138 if (tid->paused)
139 return;
140
141 if (tid->sched)
142 return;
143
144 tid->sched = true;
145 list_add_tail(&tid->list, &ac->tid_q);
146
147 if (ac->sched)
148 return;
149
150 ac->sched = true;
151 list_add_tail(&ac->list, &txq->axq_acq);
152}
153
Sujithe8324352009-01-16 21:38:42 +0530154static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
155{
Felix Fietkau066dae92010-11-07 14:59:39 +0100156 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530157
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200158 WARN_ON(!tid->paused);
159
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100160 ath_txq_lock(sc, txq);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200161 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530162
Felix Fietkau56dc6332011-08-28 00:32:22 +0200163 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530164 goto unlock;
165
166 ath_tx_queue_tid(txq, tid);
167 ath_txq_schedule(sc, txq);
168unlock:
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100169 ath_txq_unlock_complete(sc, txq);
Sujithe8324352009-01-16 21:38:42 +0530170}
171
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100172static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100173{
174 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100175 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
176 sizeof(tx_info->rate_driver_data));
177 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100178}
179
Felix Fietkau156369f2011-12-14 22:08:04 +0100180static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
181{
182 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
183 seqno << IEEE80211_SEQ_SEQ_SHIFT);
184}
185
Sujithe8324352009-01-16 21:38:42 +0530186static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
187{
Felix Fietkau066dae92010-11-07 14:59:39 +0100188 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200189 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530190 struct ath_buf *bf;
191 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200192 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100193 struct ath_frame_info *fi;
Felix Fietkau156369f2011-12-14 22:08:04 +0100194 bool sendbar = false;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200195
Sujithe8324352009-01-16 21:38:42 +0530196 INIT_LIST_HEAD(&bf_head);
197
Felix Fietkau90fa5392010-09-20 13:45:38 +0200198 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530199
Felix Fietkau56dc6332011-08-28 00:32:22 +0200200 while ((skb = __skb_dequeue(&tid->buf_q))) {
201 fi = get_frame_info(skb);
202 bf = fi->bf;
203
Felix Fietkau44f1d262011-08-28 00:32:25 +0200204 if (bf && fi->retries) {
205 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200206 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100207 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
208 sendbar = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200209 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200210 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200211 }
Sujithe8324352009-01-16 21:38:42 +0530212 }
213
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500214 if (tid->baw_head == tid->baw_tail) {
215 tid->state &= ~AGGR_ADDBA_COMPLETE;
216 tid->state &= ~AGGR_CLEANUP;
217 }
218
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100219 if (sendbar) {
220 ath_txq_unlock(sc, txq);
Felix Fietkau156369f2011-12-14 22:08:04 +0100221 ath_send_bar(tid, tid->seq_start);
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100222 ath_txq_lock(sc, txq);
223 }
Sujithe8324352009-01-16 21:38:42 +0530224}
225
226static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
227 int seqno)
228{
229 int index, cindex;
230
231 index = ATH_BA_INDEX(tid->seq_start, seqno);
232 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
233
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200234 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530235
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200236 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530237 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
238 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
Felix Fietkauf9437542011-12-14 22:08:08 +0100239 if (tid->bar_index >= 0)
240 tid->bar_index--;
Sujithe8324352009-01-16 21:38:42 +0530241 }
242}
243
244static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100245 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530246{
247 int index, cindex;
248
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100249 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530250 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200251 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530252
253 if (index >= ((tid->baw_tail - tid->baw_head) &
254 (ATH_TID_MAX_BUFS - 1))) {
255 tid->baw_tail = cindex;
256 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
257 }
258}
259
260/*
261 * TODO: For frame(s) that are in the retry state, we will reuse the
262 * sequence number(s) without setting the retry bit. The
263 * alternative is to give up on these and BAR the receiver's window
264 * forward.
265 */
266static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
267 struct ath_atx_tid *tid)
268
269{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200270 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530271 struct ath_buf *bf;
272 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700273 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100274 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700275
276 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530277 INIT_LIST_HEAD(&bf_head);
278
Felix Fietkau56dc6332011-08-28 00:32:22 +0200279 while ((skb = __skb_dequeue(&tid->buf_q))) {
280 fi = get_frame_info(skb);
281 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530282
Felix Fietkau44f1d262011-08-28 00:32:25 +0200283 if (!bf) {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200284 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200285 continue;
286 }
287
Felix Fietkau56dc6332011-08-28 00:32:22 +0200288 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530289
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100290 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200291 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530292
Felix Fietkau156369f2011-12-14 22:08:04 +0100293 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +0530294 }
295
296 tid->seq_next = tid->seq_start;
297 tid->baw_tail = tid->baw_head;
Felix Fietkauf9437542011-12-14 22:08:08 +0100298 tid->bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530299}
300
Sujithfec247c2009-07-27 12:08:16 +0530301static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkauda647622011-12-14 22:08:03 +0100302 struct sk_buff *skb, int count)
Sujithe8324352009-01-16 21:38:42 +0530303{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100304 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200305 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530306 struct ieee80211_hdr *hdr;
Felix Fietkauda647622011-12-14 22:08:03 +0100307 int prev = fi->retries;
Sujithe8324352009-01-16 21:38:42 +0530308
Sujithfec247c2009-07-27 12:08:16 +0530309 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkauda647622011-12-14 22:08:03 +0100310 fi->retries += count;
311
312 if (prev > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100313 return;
Sujithe8324352009-01-16 21:38:42 +0530314
Sujithe8324352009-01-16 21:38:42 +0530315 hdr = (struct ieee80211_hdr *)skb->data;
316 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200317 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
318 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530319}
320
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200321static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
322{
323 struct ath_buf *bf = NULL;
324
325 spin_lock_bh(&sc->tx.txbuflock);
326
327 if (unlikely(list_empty(&sc->tx.txbuf))) {
328 spin_unlock_bh(&sc->tx.txbuflock);
329 return NULL;
330 }
331
332 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
333 list_del(&bf->list);
334
335 spin_unlock_bh(&sc->tx.txbuflock);
336
337 return bf;
338}
339
340static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
341{
342 spin_lock_bh(&sc->tx.txbuflock);
343 list_add_tail(&bf->list, &sc->tx.txbuf);
344 spin_unlock_bh(&sc->tx.txbuflock);
345}
346
Sujithd43f30152009-01-16 21:38:53 +0530347static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
348{
349 struct ath_buf *tbf;
350
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200351 tbf = ath_tx_get_buffer(sc);
352 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530353 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530354
355 ATH_TXBUF_RESET(tbf);
356
357 tbf->bf_mpdu = bf->bf_mpdu;
358 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400359 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530360 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530361
362 return tbf;
363}
364
Felix Fietkaub572d032010-11-14 15:20:07 +0100365static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
366 struct ath_tx_status *ts, int txok,
367 int *nframes, int *nbad)
368{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100369 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100370 u16 seq_st = 0;
371 u32 ba[WME_BA_BMP_SIZE >> 5];
372 int ba_index;
373 int isaggr = 0;
374
375 *nbad = 0;
376 *nframes = 0;
377
Felix Fietkaub572d032010-11-14 15:20:07 +0100378 isaggr = bf_isaggr(bf);
379 if (isaggr) {
380 seq_st = ts->ts_seqnum;
381 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
382 }
383
384 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100385 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200386 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100387
388 (*nframes)++;
389 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
390 (*nbad)++;
391
392 bf = bf->bf_next;
393 }
394}
395
396
Sujithd43f30152009-01-16 21:38:53 +0530397static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
398 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100399 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530400{
401 struct ath_node *an = NULL;
402 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530403 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100404 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530405 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800406 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530407 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530408 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200409 struct list_head bf_head;
410 struct sk_buff_head bf_pending;
Felix Fietkau156369f2011-12-14 22:08:04 +0100411 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
Sujithe8324352009-01-16 21:38:42 +0530412 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530413 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
414 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200415 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100416 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200417 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100418 u8 tidno;
Felix Fietkaudaa5c402011-10-07 02:28:15 +0200419 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
Felix Fietkauda647622011-12-14 22:08:03 +0100420 int i, retries;
Felix Fietkau156369f2011-12-14 22:08:04 +0100421 int bar_index = -1;
Sujithe8324352009-01-16 21:38:42 +0530422
Sujitha22be222009-03-30 15:28:36 +0530423 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530424 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530425
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800426 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800427
Felix Fietkau78c46532010-06-25 01:26:16 +0200428 memcpy(rates, tx_info->control.rates, sizeof(rates));
429
Felix Fietkauda647622011-12-14 22:08:03 +0100430 retries = ts->ts_longretry + 1;
431 for (i = 0; i < ts->ts_rateindex; i++)
432 retries += rates[i].count;
433
Sujith1286ec62009-01-27 13:30:37 +0530434 rcu_read_lock();
435
Ben Greear686b9cb2010-09-23 09:44:36 -0700436 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530437 if (!sta) {
438 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200439
Felix Fietkau31e79a52010-07-12 23:16:34 +0200440 INIT_LIST_HEAD(&bf_head);
441 while (bf) {
442 bf_next = bf->bf_next;
443
Felix Fietkaufce041b2011-05-19 12:20:25 +0200444 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200445 list_move_tail(&bf->list, &bf_head);
446
Felix Fietkau156369f2011-12-14 22:08:04 +0100447 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200448
449 bf = bf_next;
450 }
Sujith1286ec62009-01-27 13:30:37 +0530451 return;
Sujithe8324352009-01-16 21:38:42 +0530452 }
453
Sujith1286ec62009-01-27 13:30:37 +0530454 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100455 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
456 tid = ATH_AN_2_TID(an, tidno);
Felix Fietkau156369f2011-12-14 22:08:04 +0100457 seq_first = tid->seq_start;
Sujith1286ec62009-01-27 13:30:37 +0530458
Felix Fietkaub11b1602010-07-11 12:48:44 +0200459 /*
460 * The hardware occasionally sends a tx status for the wrong TID.
461 * In this case, the BA status cannot be considered valid and all
462 * subframes need to be retransmitted
463 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100464 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200465 txok = false;
466
Sujithe8324352009-01-16 21:38:42 +0530467 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530468 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530469
Sujithd43f30152009-01-16 21:38:53 +0530470 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700471 if (ts->ts_flags & ATH9K_TX_BA) {
472 seq_st = ts->ts_seqnum;
473 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530474 } else {
Sujithd43f30152009-01-16 21:38:53 +0530475 /*
476 * AR5416 can become deaf/mute when BA
477 * issue happens. Chip needs to be reset.
478 * But AP code may have sychronization issues
479 * when perform internal reset in this routine.
480 * Only enable reset in STA mode for now.
481 */
Sujith2660b812009-02-09 13:27:26 +0530482 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530483 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530484 }
485 }
486
Felix Fietkau56dc6332011-08-28 00:32:22 +0200487 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530488
Felix Fietkaub572d032010-11-14 15:20:07 +0100489 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530490 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200491 u16 seqno = bf->bf_state.seqno;
492
Felix Fietkauf0b82202011-01-15 14:30:15 +0100493 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530494 bf_next = bf->bf_next;
495
Felix Fietkau78c46532010-06-25 01:26:16 +0200496 skb = bf->bf_mpdu;
497 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100498 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200499
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200500 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530501 /* transmit completion, subframe is
502 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530503 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530504 } else if (!isaggr && txok) {
505 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530506 acked_cnt++;
Felix Fietkaub0477012011-12-14 22:08:05 +0100507 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
508 /*
509 * cleanup in progress, just fail
510 * the un-acked sub-frames
511 */
512 txfail = 1;
513 } else if (flush) {
514 txpending = 1;
515 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
516 if (txok || !an->sleeping)
517 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
518 retries);
Felix Fietkau55195412011-04-17 23:28:09 +0200519
Felix Fietkaub0477012011-12-14 22:08:05 +0100520 txpending = 1;
521 } else {
522 txfail = 1;
523 txfail_cnt++;
524 bar_index = max_t(int, bar_index,
525 ATH_BA_INDEX(seq_first, seqno));
Sujithe8324352009-01-16 21:38:42 +0530526 }
527
Felix Fietkaufce041b2011-05-19 12:20:25 +0200528 /*
529 * Make sure the last desc is reclaimed if it
530 * not a holding desc.
531 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200532 INIT_LIST_HEAD(&bf_head);
533 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
534 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530535 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530536
Felix Fietkau90fa5392010-09-20 13:45:38 +0200537 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530538 /*
539 * complete the acked-ones/xretried ones; update
540 * block-ack window
541 */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200542 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530543
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530544 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200545 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200546 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530547 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530548 }
549
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700550 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
Felix Fietkau156369f2011-12-14 22:08:04 +0100551 !txfail);
Sujithe8324352009-01-16 21:38:42 +0530552 } else {
Sujithd43f30152009-01-16 21:38:53 +0530553 /* retry the un-acked ones */
Felix Fietkaub0477012011-12-14 22:08:05 +0100554 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
555 bf->bf_next == NULL && bf_last->bf_stale) {
556 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530557
Felix Fietkaub0477012011-12-14 22:08:05 +0100558 tbf = ath_clone_txbuf(sc, bf_last);
559 /*
560 * Update tx baw and complete the
561 * frame with failed status if we
562 * run out of tx buf.
563 */
564 if (!tbf) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100565 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400566
Felix Fietkaub0477012011-12-14 22:08:05 +0100567 ath_tx_complete_buf(sc, bf, txq,
568 &bf_head, ts, 0);
569 bar_index = max_t(int, bar_index,
570 ATH_BA_INDEX(seq_first, seqno));
571 break;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400572 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100573
574 fi->bf = tbf;
Sujithe8324352009-01-16 21:38:42 +0530575 }
576
577 /*
578 * Put this buffer to the temporary pending
579 * queue to retain ordering
580 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200581 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530582 }
583
584 bf = bf_next;
585 }
586
Felix Fietkau4cee7862010-07-23 03:53:16 +0200587 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200588 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200589 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200590 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200591
Felix Fietkau56dc6332011-08-28 00:32:22 +0200592 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200593 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600594 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200595
596 if (ts->ts_status & ATH9K_TXERR_FILT)
597 tid->ac->clear_ps_filter = true;
598 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200599 }
600
Felix Fietkau23de5dc2011-12-19 16:45:54 +0100601 if (bar_index >= 0) {
602 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
603
604 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
605 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
606
607 ath_txq_unlock(sc, txq);
608 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
609 ath_txq_lock(sc, txq);
610 }
611
Nikolay Martynov4eb287a2011-11-21 17:32:06 -0500612 if (tid->state & AGGR_CLEANUP)
Felix Fietkau90fa5392010-09-20 13:45:38 +0200613 ath_tx_flush_tid(sc, tid);
614
Sujith1286ec62009-01-27 13:30:37 +0530615 rcu_read_unlock();
616
Felix Fietkau030d6292011-10-07 02:28:13 +0200617 if (needreset) {
618 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200619 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200620 }
Sujithe8324352009-01-16 21:38:42 +0530621}
622
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530623static bool ath_lookup_legacy(struct ath_buf *bf)
624{
625 struct sk_buff *skb;
626 struct ieee80211_tx_info *tx_info;
627 struct ieee80211_tx_rate *rates;
628 int i;
629
630 skb = bf->bf_mpdu;
631 tx_info = IEEE80211_SKB_CB(skb);
632 rates = tx_info->control.rates;
633
Felix Fietkau059ee092011-08-27 10:25:27 +0200634 for (i = 0; i < 4; i++) {
635 if (!rates[i].count || rates[i].idx < 0)
636 break;
637
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530638 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
639 return true;
640 }
641
642 return false;
643}
644
Sujithe8324352009-01-16 21:38:42 +0530645static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
646 struct ath_atx_tid *tid)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb;
649 struct ieee80211_tx_info *tx_info;
650 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530651 u32 max_4ms_framelen, frmlen;
Sujith Manoharanc0ac53f2012-02-22 12:40:38 +0530652 u16 aggr_limit, bt_aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530653 int i;
654
Sujitha22be222009-03-30 15:28:36 +0530655 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530656 tx_info = IEEE80211_SKB_CB(skb);
657 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530658
659 /*
660 * Find the lowest frame length among the rate series that will have a
661 * 4ms transmit duration.
662 * TODO - TXOP limit needs to be considered.
663 */
664 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
665
666 for (i = 0; i < 4; i++) {
Felix Fietkaub0477012011-12-14 22:08:05 +0100667 int modeidx;
Sujithe8324352009-01-16 21:38:42 +0530668
Felix Fietkaub0477012011-12-14 22:08:05 +0100669 if (!rates[i].count)
670 continue;
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200671
Felix Fietkaub0477012011-12-14 22:08:05 +0100672 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
673 legacy = 1;
674 break;
Sujithe8324352009-01-16 21:38:42 +0530675 }
Felix Fietkaub0477012011-12-14 22:08:05 +0100676
677 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
678 modeidx = MCS_HT40;
679 else
680 modeidx = MCS_HT20;
681
682 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
683 modeidx++;
684
685 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
686 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530687 }
688
689 /*
690 * limit aggregate size by the minimum rate if rate selected is
691 * not a probe rate, if rate selected is a probe rate then
692 * avoid aggregation of this packet.
693 */
694 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
695 return 0;
696
Sujith Manoharanc0ac53f2012-02-22 12:40:38 +0530697 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
698
699 /*
700 * Override the default aggregation limit for BTCOEX.
701 */
702 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
703 if (bt_aggr_limit)
704 aggr_limit = bt_aggr_limit;
Sujithe8324352009-01-16 21:38:42 +0530705
706 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300707 * h/w can accept aggregates up to 16 bit lengths (65535).
708 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530709 * as zero. Ignore 65536 since we are constrained by hw.
710 */
Sujith4ef70842009-07-23 15:32:41 +0530711 if (tid->an->maxampdu)
712 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530713
714 return aggr_limit;
715}
716
717/*
Sujithd43f30152009-01-16 21:38:53 +0530718 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530719 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530720 */
721static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530722 struct ath_buf *bf, u16 frmlen,
723 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530724{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530725#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530726 struct sk_buff *skb = bf->bf_mpdu;
727 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530728 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530729 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100730 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200731 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100732 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530733
734 /* Select standard number of delimiters based on frame length alone */
735 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
736
737 /*
738 * If encryption enabled, hardware requires some more padding between
739 * subframes.
740 * TODO - this could be improved to be dependent on the rate.
741 * The hardware can keep up at lower rates, but not higher rates
742 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530743 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
744 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530745 ndelim += ATH_AGGR_ENCRYPTDELIM;
746
747 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530748 * Add delimiter when using RTS/CTS with aggregation
749 * and non enterprise AR9003 card
750 */
Felix Fietkau34597312011-08-29 18:57:54 +0200751 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
752 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530753 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
754
755 /*
Sujithe8324352009-01-16 21:38:42 +0530756 * Convert desired mpdu density from microeconds to bytes based
757 * on highest rate in rate series (i.e. first rate) to determine
758 * required minimum length for subframe. Take into account
759 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530760 *
Sujithe8324352009-01-16 21:38:42 +0530761 * If there is no mpdu density restriction, no further calculation
762 * is needed.
763 */
Sujith4ef70842009-07-23 15:32:41 +0530764
765 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530766 return ndelim;
767
768 rix = tx_info->control.rates[0].idx;
769 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530770 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
771 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
772
773 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530774 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530775 else
Sujith4ef70842009-07-23 15:32:41 +0530776 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530777
778 if (nsymbols == 0)
779 nsymbols = 1;
780
Felix Fietkauc6663872010-04-19 19:57:33 +0200781 streams = HT_RC_2_STREAMS(rix);
782 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530783 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
784
Sujithe8324352009-01-16 21:38:42 +0530785 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530786 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
787 ndelim = max(mindelim, ndelim);
788 }
789
790 return ndelim;
791}
792
793static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530794 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100796 struct list_head *bf_q,
797 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530798{
799#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200800 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530801 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530802 u16 aggr_limit = 0, al = 0, bpad = 0,
803 al_delta, h_baw = tid->baw_size / 2;
804 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200805 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100806 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200807 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200808 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530809
810 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200811 skb = skb_peek(&tid->buf_q);
812 fi = get_frame_info(skb);
813 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200814 if (!fi->bf)
Felix Fietkau81357a22012-05-24 14:32:20 +0200815 bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200816
Felix Fietkau44f1d262011-08-28 00:32:25 +0200817 if (!bf)
818 continue;
819
Felix Fietkau399c6482011-09-14 21:24:17 +0200820 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200821 seqno = bf->bf_state.seqno;
Sujithe8324352009-01-16 21:38:42 +0530822
Sujithd43f30152009-01-16 21:38:53 +0530823 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200824 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530825 status = ATH_AGGR_BAW_CLOSED;
826 break;
827 }
828
Felix Fietkauf9437542011-12-14 22:08:08 +0100829 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
830 struct ath_tx_status ts = {};
831 struct list_head bf_head;
832
833 INIT_LIST_HEAD(&bf_head);
834 list_add(&bf->list, &bf_head);
835 __skb_unlink(skb, &tid->buf_q);
836 ath_tx_update_baw(sc, tid, seqno);
837 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
838 continue;
839 }
840
841 if (!bf_first)
842 bf_first = bf;
843
Sujithe8324352009-01-16 21:38:42 +0530844 if (!rl) {
845 aggr_limit = ath_lookup_rate(sc, bf, tid);
846 rl = 1;
847 }
848
Sujithd43f30152009-01-16 21:38:53 +0530849 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100850 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530851
Sujithd43f30152009-01-16 21:38:53 +0530852 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530853 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
854 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530855 status = ATH_AGGR_LIMITED;
856 break;
857 }
858
Felix Fietkau0299a502010-10-21 02:47:24 +0200859 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200860 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200861 break;
862
Sujithd43f30152009-01-16 21:38:53 +0530863 /* do not exceed subframe limit */
864 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530865 status = ATH_AGGR_LIMITED;
866 break;
867 }
868
Sujithd43f30152009-01-16 21:38:53 +0530869 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530870 al += bpad + al_delta;
871
872 /*
873 * Get the delimiters needed to meet the MPDU
874 * density for this node.
875 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530876 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
877 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530878 bpad = PADBYTES(al_delta) + (ndelim << 2);
879
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530880 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530881 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530882
Sujithd43f30152009-01-16 21:38:53 +0530883 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100884 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200885 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200886 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200887
888 __skb_unlink(skb, &tid->buf_q);
889 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200890 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530891 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200892
Sujithe8324352009-01-16 21:38:42 +0530893 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530894
Felix Fietkau56dc6332011-08-28 00:32:22 +0200895 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530896
Felix Fietkau269c44b2010-11-14 15:20:06 +0100897 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530898
Sujithe8324352009-01-16 21:38:42 +0530899 return status;
900#undef PADBYTES
901}
902
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200903/*
904 * rix - rate index
905 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
906 * width - 0 for 20 MHz, 1 for 40 MHz
907 * half_gi - to use 4us v/s 3.6 us for symbol time
908 */
909static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
910 int width, int half_gi, bool shortPreamble)
911{
912 u32 nbits, nsymbits, duration, nsymbols;
913 int streams;
914
915 /* find number of symbols: PLCP + data */
916 streams = HT_RC_2_STREAMS(rix);
917 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
918 nsymbits = bits_per_symbol[rix % 8][width] * streams;
919 nsymbols = (nbits + nsymbits - 1) / nsymbits;
920
921 if (!half_gi)
922 duration = SYMBOL_TIME(nsymbols);
923 else
924 duration = SYMBOL_TIME_HALFGI(nsymbols);
925
926 /* addup duration for legacy/ht training and signal fields */
927 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
928
929 return duration;
930}
931
Felix Fietkau493cf042011-09-14 21:24:22 +0200932static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
933 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200934{
935 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200936 struct sk_buff *skb;
937 struct ieee80211_tx_info *tx_info;
938 struct ieee80211_tx_rate *rates;
939 const struct ieee80211_rate *rate;
940 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200941 int i;
942 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200943
944 skb = bf->bf_mpdu;
945 tx_info = IEEE80211_SKB_CB(skb);
946 rates = tx_info->control.rates;
947 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200948
949 /* set dur_update_en for l-sig computation except for PS-Poll frames */
950 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200951
952 /*
953 * We check if Short Preamble is needed for the CTS rate by
954 * checking the BSS's global flag.
955 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
956 */
957 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200958 info->rtscts_rate = rate->hw_value;
Sujith Manoharand47a61a2012-03-14 14:41:05 +0530959
960 if (tx_info->control.vif &&
961 tx_info->control.vif->bss_conf.use_short_preamble)
Felix Fietkau493cf042011-09-14 21:24:22 +0200962 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200963
964 for (i = 0; i < 4; i++) {
965 bool is_40, is_sgi, is_sp;
966 int phy;
967
968 if (!rates[i].count || (rates[i].idx < 0))
969 continue;
970
971 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200972 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200973
974 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200975 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
976 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200977 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200978 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
979 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200980 }
981
982 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200983 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200984 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200985 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200986
987 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
988 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
989 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
990
991 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
992 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200993 info->rates[i].Rate = rix | 0x80;
994 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
995 ah->txchainmask, info->rates[i].Rate);
996 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200997 is_40, is_sgi, is_sp);
998 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200999 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001000 continue;
1001 }
1002
1003 /* legacy rates */
1004 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1005 !(rate->flags & IEEE80211_RATE_ERP_G))
1006 phy = WLAN_RC_PHY_CCK;
1007 else
1008 phy = WLAN_RC_PHY_OFDM;
1009
1010 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +02001011 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001012 if (rate->hw_value_short) {
1013 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +02001014 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001015 } else {
1016 is_sp = false;
1017 }
1018
1019 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +02001020 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001021 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001022 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1023 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001024
Felix Fietkau493cf042011-09-14 21:24:22 +02001025 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001026 phy, rate->bitrate * 100, len, rix, is_sp);
1027 }
1028
1029 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1030 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +02001031 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001032
1033 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +02001034 if (info->flags & ATH9K_TXDESC_RTSENA)
1035 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +02001036}
1037
Felix Fietkau493cf042011-09-14 21:24:22 +02001038static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1039{
1040 struct ieee80211_hdr *hdr;
1041 enum ath9k_pkt_type htype;
1042 __le16 fc;
1043
1044 hdr = (struct ieee80211_hdr *)skb->data;
1045 fc = hdr->frame_control;
1046
1047 if (ieee80211_is_beacon(fc))
1048 htype = ATH9K_PKT_TYPE_BEACON;
1049 else if (ieee80211_is_probe_resp(fc))
1050 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1051 else if (ieee80211_is_atim(fc))
1052 htype = ATH9K_PKT_TYPE_ATIM;
1053 else if (ieee80211_is_pspoll(fc))
1054 htype = ATH9K_PKT_TYPE_PSPOLL;
1055 else
1056 htype = ATH9K_PKT_TYPE_NORMAL;
1057
1058 return htype;
1059}
1060
1061static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1062 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +02001063{
1064 struct ath_hw *ah = sc->sc_ah;
1065 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1066 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001067 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +02001068 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001069
Felix Fietkau493cf042011-09-14 21:24:22 +02001070 memset(&info, 0, sizeof(info));
1071 info.is_first = true;
1072 info.is_last = true;
1073 info.txpower = MAX_RATE_POWER;
1074 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001075
Felix Fietkau493cf042011-09-14 21:24:22 +02001076 info.flags = ATH9K_TXDESC_INTREQ;
1077 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1078 info.flags |= ATH9K_TXDESC_NOACK;
1079 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1080 info.flags |= ATH9K_TXDESC_LDPC;
1081
1082 ath_buf_set_rate(sc, bf, &info, len);
1083
1084 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1085 info.flags |= ATH9K_TXDESC_CLRDMASK;
1086
1087 if (bf->bf_state.bfs_paprd)
1088 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1089
Felix Fietkau399c6482011-09-14 21:24:17 +02001090
1091 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001092 struct sk_buff *skb = bf->bf_mpdu;
1093 struct ath_frame_info *fi = get_frame_info(skb);
1094
1095 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001096 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001097 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001098 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001099 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001100
John W. Linville42cecc32011-09-19 15:42:31 -04001101 info.buf_addr[0] = bf->bf_buf_addr;
1102 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001103 info.pkt_len = fi->framelen;
1104 info.keyix = fi->keyix;
1105 info.keytype = fi->keytype;
1106
1107 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001108 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001109 info.aggr = AGGR_BUF_FIRST;
1110 else if (!bf->bf_next)
1111 info.aggr = AGGR_BUF_LAST;
1112 else
1113 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001114
Felix Fietkau493cf042011-09-14 21:24:22 +02001115 info.ndelim = bf->bf_state.ndelim;
1116 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001117 }
1118
Felix Fietkau493cf042011-09-14 21:24:22 +02001119 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001120 bf = bf->bf_next;
1121 }
1122}
1123
Sujithe8324352009-01-16 21:38:42 +05301124static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1125 struct ath_atx_tid *tid)
1126{
Sujithd43f30152009-01-16 21:38:53 +05301127 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301128 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001129 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301130 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001131 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301132
1133 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001134 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301135 return;
1136
1137 INIT_LIST_HEAD(&bf_q);
1138
Felix Fietkau269c44b2010-11-14 15:20:06 +01001139 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301140
1141 /*
Sujithd43f30152009-01-16 21:38:53 +05301142 * no frames picked up to be aggregated;
1143 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301144 */
1145 if (list_empty(&bf_q))
1146 break;
1147
1148 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301149 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001150 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301151
Felix Fietkau55195412011-04-17 23:28:09 +02001152 if (tid->ac->clear_ps_filter) {
1153 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001154 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1155 } else {
1156 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001157 }
1158
Sujithd43f30152009-01-16 21:38:53 +05301159 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001160 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001161 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1162 bf->bf_state.bf_type = BUF_AMPDU;
1163 } else {
1164 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301165 }
1166
Felix Fietkau493cf042011-09-14 21:24:22 +02001167 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001168 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001169 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301170 status != ATH_AGGR_BAW_CLOSED);
1171}
1172
Felix Fietkau231c3a12010-09-20 19:35:28 +02001173int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1174 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301175{
1176 struct ath_atx_tid *txtid;
1177 struct ath_node *an;
1178
1179 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301180 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001181
1182 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1183 return -EAGAIN;
1184
Sujithf83da962009-07-23 15:32:37 +05301185 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001186 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001187 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkauf9437542011-12-14 22:08:08 +01001188 txtid->bar_index = -1;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001189
Felix Fietkau2ed72222011-01-10 17:05:49 -07001190 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1191 txtid->baw_head = txtid->baw_tail = 0;
1192
Felix Fietkau231c3a12010-09-20 19:35:28 +02001193 return 0;
Sujithe8324352009-01-16 21:38:42 +05301194}
1195
Sujithf83da962009-07-23 15:32:37 +05301196void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301197{
1198 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1199 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001200 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301201
1202 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301203 return;
Sujithe8324352009-01-16 21:38:42 +05301204
1205 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301206 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301207 return;
Sujithe8324352009-01-16 21:38:42 +05301208 }
1209
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001210 ath_txq_lock(sc, txq);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001211 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001212
1213 /*
1214 * If frames are still being transmitted for this TID, they will be
1215 * cleaned up during tx completion. To prevent race conditions, this
1216 * TID can only be reused after all in-progress subframes have been
1217 * completed.
1218 */
1219 if (txtid->baw_head != txtid->baw_tail)
1220 txtid->state |= AGGR_CLEANUP;
1221 else
1222 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +05301223
Felix Fietkau90fa5392010-09-20 13:45:38 +02001224 ath_tx_flush_tid(sc, txtid);
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001225 ath_txq_unlock_complete(sc, txq);
Sujithe8324352009-01-16 21:38:42 +05301226}
1227
Johannes Berg042ec452011-09-29 16:04:26 +02001228void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1229 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001230{
1231 struct ath_atx_tid *tid;
1232 struct ath_atx_ac *ac;
1233 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001234 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001235 int tidno;
1236
1237 for (tidno = 0, tid = &an->tid[tidno];
1238 tidno < WME_NUM_TID; tidno++, tid++) {
1239
1240 if (!tid->sched)
1241 continue;
1242
1243 ac = tid->ac;
1244 txq = ac->txq;
1245
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001246 ath_txq_lock(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001247
Johannes Berg042ec452011-09-29 16:04:26 +02001248 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001249
1250 tid->sched = false;
1251 list_del(&tid->list);
1252
1253 if (ac->sched) {
1254 ac->sched = false;
1255 list_del(&ac->list);
1256 }
1257
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001258 ath_txq_unlock(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001259
Johannes Berg042ec452011-09-29 16:04:26 +02001260 ieee80211_sta_set_buffered(sta, tidno, buffered);
1261 }
Felix Fietkau55195412011-04-17 23:28:09 +02001262}
1263
1264void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1265{
1266 struct ath_atx_tid *tid;
1267 struct ath_atx_ac *ac;
1268 struct ath_txq *txq;
1269 int tidno;
1270
1271 for (tidno = 0, tid = &an->tid[tidno];
1272 tidno < WME_NUM_TID; tidno++, tid++) {
1273
1274 ac = tid->ac;
1275 txq = ac->txq;
1276
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001277 ath_txq_lock(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001278 ac->clear_ps_filter = true;
1279
Felix Fietkau56dc6332011-08-28 00:32:22 +02001280 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001281 ath_tx_queue_tid(txq, tid);
1282 ath_txq_schedule(sc, txq);
1283 }
1284
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001285 ath_txq_unlock_complete(sc, txq);
Felix Fietkau55195412011-04-17 23:28:09 +02001286 }
1287}
1288
Sujithe8324352009-01-16 21:38:42 +05301289void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1290{
1291 struct ath_atx_tid *txtid;
1292 struct ath_node *an;
1293
1294 an = (struct ath_node *)sta->drv_priv;
1295
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05301296 txtid = ATH_AN_2_TID(an, tid);
1297 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1298 txtid->state |= AGGR_ADDBA_COMPLETE;
1299 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1300 ath_tx_resume_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301301}
1302
Sujithe8324352009-01-16 21:38:42 +05301303/********************/
1304/* Queue Management */
1305/********************/
1306
Sujithe8324352009-01-16 21:38:42 +05301307static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1308 struct ath_txq *txq)
1309{
1310 struct ath_atx_ac *ac, *ac_tmp;
1311 struct ath_atx_tid *tid, *tid_tmp;
1312
1313 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1314 list_del(&ac->list);
1315 ac->sched = false;
1316 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1317 list_del(&tid->list);
1318 tid->sched = false;
1319 ath_tid_drain(sc, txq, tid);
1320 }
1321 }
1322}
1323
1324struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1325{
Sujithcbe61d82009-02-09 13:27:12 +05301326 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301327 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001328 static const int subtype_txq_to_hwq[] = {
1329 [WME_AC_BE] = ATH_TXQ_AC_BE,
1330 [WME_AC_BK] = ATH_TXQ_AC_BK,
1331 [WME_AC_VI] = ATH_TXQ_AC_VI,
1332 [WME_AC_VO] = ATH_TXQ_AC_VO,
1333 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001334 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301335
1336 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001337 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301338 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1339 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1340 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1341 qi.tqi_physCompBuf = 0;
1342
1343 /*
1344 * Enable interrupts only for EOL and DESC conditions.
1345 * We mark tx descriptors to receive a DESC interrupt
1346 * when a tx queue gets deep; otherwise waiting for the
1347 * EOL to reap descriptors. Note that this is done to
1348 * reduce interrupt load and this only defers reaping
1349 * descriptors, never transmitting frames. Aside from
1350 * reducing interrupts this also permits more concurrency.
1351 * The only potential downside is if the tx queue backs
1352 * up in which case the top half of the kernel may backup
1353 * due to a lack of tx descriptors.
1354 *
1355 * The UAPSD queue is an exception, since we take a desc-
1356 * based intr on the EOSP frames.
1357 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001358 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
Felix Fietkauce8fdf62012-03-14 16:40:22 +01001359 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001360 } else {
1361 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1362 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1363 else
1364 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1365 TXQ_FLAG_TXDESCINT_ENABLE;
1366 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001367 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1368 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301369 /*
1370 * NB: don't print a message, this happens
1371 * normally on parts with too few tx queues
1372 */
1373 return NULL;
1374 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001375 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1376 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301377
Ben Greear60f2d1d2011-01-09 23:11:52 -08001378 txq->axq_qnum = axq_qnum;
1379 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301380 txq->axq_link = NULL;
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001381 __skb_queue_head_init(&txq->complete_q);
Sujithe8324352009-01-16 21:38:42 +05301382 INIT_LIST_HEAD(&txq->axq_q);
1383 INIT_LIST_HEAD(&txq->axq_acq);
1384 spin_lock_init(&txq->axq_lock);
1385 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001386 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001387 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001388 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001389
1390 txq->txq_headidx = txq->txq_tailidx = 0;
1391 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1392 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301393 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001394 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301395}
1396
Sujithe8324352009-01-16 21:38:42 +05301397int ath_txq_update(struct ath_softc *sc, int qnum,
1398 struct ath9k_tx_queue_info *qinfo)
1399{
Sujithcbe61d82009-02-09 13:27:12 +05301400 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301401 int error = 0;
1402 struct ath9k_tx_queue_info qi;
1403
1404 if (qnum == sc->beacon.beaconq) {
1405 /*
1406 * XXX: for beacon queue, we just save the parameter.
1407 * It will be picked up by ath_beaconq_config when
1408 * it's necessary.
1409 */
1410 sc->beacon.beacon_qi = *qinfo;
1411 return 0;
1412 }
1413
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001414 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301415
1416 ath9k_hw_get_txq_props(ah, qnum, &qi);
1417 qi.tqi_aifs = qinfo->tqi_aifs;
1418 qi.tqi_cwmin = qinfo->tqi_cwmin;
1419 qi.tqi_cwmax = qinfo->tqi_cwmax;
1420 qi.tqi_burstTime = qinfo->tqi_burstTime;
1421 qi.tqi_readyTime = qinfo->tqi_readyTime;
1422
1423 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001424 ath_err(ath9k_hw_common(sc->sc_ah),
1425 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301426 error = -EIO;
1427 } else {
1428 ath9k_hw_resettxqueue(ah, qnum);
1429 }
1430
1431 return error;
1432}
1433
1434int ath_cabq_update(struct ath_softc *sc)
1435{
1436 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001437 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301438 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301439
1440 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1441 /*
1442 * Ensure the readytime % is within the bounds.
1443 */
Sujith17d79042009-02-09 13:27:03 +05301444 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1445 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1446 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1447 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301448
Steve Brown9814f6b2011-02-07 17:10:39 -07001449 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301450 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301451 ath_txq_update(sc, qnum, &qi);
1452
1453 return 0;
1454}
1455
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001456static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1457{
1458 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1459 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1460}
1461
Felix Fietkaufce041b2011-05-19 12:20:25 +02001462static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1463 struct list_head *list, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301464{
1465 struct ath_buf *bf, *lastbf;
1466 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001467 struct ath_tx_status ts;
1468
1469 memset(&ts, 0, sizeof(ts));
Felix Fietkaudaa5c402011-10-07 02:28:15 +02001470 ts.ts_status = ATH9K_TX_FLUSH;
Sujithe8324352009-01-16 21:38:42 +05301471 INIT_LIST_HEAD(&bf_head);
1472
Felix Fietkaufce041b2011-05-19 12:20:25 +02001473 while (!list_empty(list)) {
1474 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301475
Felix Fietkaufce041b2011-05-19 12:20:25 +02001476 if (bf->bf_stale) {
1477 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301478
Felix Fietkaufce041b2011-05-19 12:20:25 +02001479 ath_tx_return_buffer(sc, bf);
1480 continue;
Sujithe8324352009-01-16 21:38:42 +05301481 }
1482
1483 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001484 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001485
Sujithe8324352009-01-16 21:38:42 +05301486 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001487 if (bf_is_ampdu_not_probing(bf))
1488 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301489
1490 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001491 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1492 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301493 else
Felix Fietkau156369f2011-12-14 22:08:04 +01001494 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001495 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001496}
1497
1498/*
1499 * Drain a given TX queue (could be Beacon or Data)
1500 *
1501 * This assumes output has been stopped and
1502 * we do not need to block ath_tx_tasklet.
1503 */
1504void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1505{
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001506 ath_txq_lock(sc, txq);
1507
Felix Fietkaufce041b2011-05-19 12:20:25 +02001508 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1509 int idx = txq->txq_tailidx;
1510
1511 while (!list_empty(&txq->txq_fifo[idx])) {
1512 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1513 retry_tx);
1514
1515 INCR(idx, ATH_TXFIFO_DEPTH);
1516 }
1517 txq->txq_tailidx = idx;
1518 }
1519
1520 txq->axq_link = NULL;
1521 txq->axq_tx_inprogress = false;
1522 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001523
1524 /* flush any pending frames if aggregation is enabled */
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05301525 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
Felix Fietkaufce041b2011-05-19 12:20:25 +02001526 ath_txq_drain_pending_buffers(sc, txq);
1527
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001528 ath_txq_unlock_complete(sc, txq);
Sujithe8324352009-01-16 21:38:42 +05301529}
1530
Felix Fietkau080e1a22010-12-05 20:17:53 +01001531bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301532{
Sujithcbe61d82009-02-09 13:27:12 +05301533 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001534 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301535 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001536 int i;
1537 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301538
Sujith Manoharan781b14a2012-06-04 20:23:55 +05301539 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
Felix Fietkau080e1a22010-12-05 20:17:53 +01001540 return true;
Sujith043a0402009-01-16 21:38:47 +05301541
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001542 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301543
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001544 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301545 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001546 if (!ATH_TXQ_SETUP(sc, i))
1547 continue;
1548
Felix Fietkau34d25812011-10-07 02:28:12 +02001549 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1550 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301551 }
1552
Felix Fietkau080e1a22010-12-05 20:17:53 +01001553 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001554 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301555
1556 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001557 if (!ATH_TXQ_SETUP(sc, i))
1558 continue;
1559
1560 /*
1561 * The caller will resume queues with ieee80211_wake_queues.
1562 * Mark the queue as not stopped to prevent ath_tx_complete
1563 * from waking the queue too early.
1564 */
1565 txq = &sc->tx.txq[i];
1566 txq->stopped = false;
1567 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301568 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001569
1570 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301571}
1572
Sujithe8324352009-01-16 21:38:42 +05301573void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1574{
1575 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1576 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1577}
1578
Ben Greear7755bad2011-01-18 17:30:00 -08001579/* For each axq_acq entry, for each tid, try to schedule packets
1580 * for transmit until ampdu_depth has reached min Q depth.
1581 */
Sujithe8324352009-01-16 21:38:42 +05301582void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1583{
Ben Greear7755bad2011-01-18 17:30:00 -08001584 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1585 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301586
Felix Fietkau236de512011-09-03 01:40:25 +02001587 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001588 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301589 return;
1590
1591 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001592 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301593
Ben Greear7755bad2011-01-18 17:30:00 -08001594 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1595 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1596 list_del(&ac->list);
1597 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301598
Ben Greear7755bad2011-01-18 17:30:00 -08001599 while (!list_empty(&ac->tid_q)) {
1600 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1601 list);
1602 list_del(&tid->list);
1603 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301604
Ben Greear7755bad2011-01-18 17:30:00 -08001605 if (tid->paused)
1606 continue;
Sujithe8324352009-01-16 21:38:42 +05301607
Ben Greear7755bad2011-01-18 17:30:00 -08001608 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301609
Ben Greear7755bad2011-01-18 17:30:00 -08001610 /*
1611 * add tid to round-robin queue if more frames
1612 * are pending for the tid
1613 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001614 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001615 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301616
Ben Greear7755bad2011-01-18 17:30:00 -08001617 if (tid == last_tid ||
1618 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1619 break;
Sujithe8324352009-01-16 21:38:42 +05301620 }
Ben Greear7755bad2011-01-18 17:30:00 -08001621
Felix Fietkaub0477012011-12-14 22:08:05 +01001622 if (!list_empty(&ac->tid_q) && !ac->sched) {
1623 ac->sched = true;
1624 list_add_tail(&ac->list, &txq->axq_acq);
Ben Greear7755bad2011-01-18 17:30:00 -08001625 }
1626
1627 if (ac == last_ac ||
1628 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1629 return;
Sujithe8324352009-01-16 21:38:42 +05301630 }
1631}
1632
Sujithe8324352009-01-16 21:38:42 +05301633/***********/
1634/* TX, DMA */
1635/***********/
1636
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001637/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001638 * Insert a chain of ath_buf (descriptors) on a txq and
1639 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001640 */
Sujith102e0572008-10-29 10:15:16 +05301641static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001642 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001643{
Sujithcbe61d82009-02-09 13:27:12 +05301644 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001645 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001646 struct ath_buf *bf, *bf_last;
1647 bool puttxbuf = false;
1648 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301649
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001650 /*
1651 * Insert the frame on the outbound list and
1652 * pass it on to the hardware.
1653 */
1654
1655 if (list_empty(head))
1656 return;
1657
Felix Fietkaufce041b2011-05-19 12:20:25 +02001658 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001659 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001660 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001661
Joe Perchesd2182b62011-12-15 14:55:53 -08001662 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
1663 txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001664
Felix Fietkaufce041b2011-05-19 12:20:25 +02001665 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1666 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001667 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001668 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001669 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001670 list_splice_tail_init(head, &txq->axq_q);
1671
Felix Fietkaufce041b2011-05-19 12:20:25 +02001672 if (txq->axq_link) {
1673 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perchesd2182b62011-12-15 14:55:53 -08001674 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
Joe Perches226afe62010-12-02 19:12:37 -08001675 txq->axq_qnum, txq->axq_link,
1676 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001677 } else if (!edma)
1678 puttxbuf = true;
1679
1680 txq->axq_link = bf_last->bf_desc;
1681 }
1682
1683 if (puttxbuf) {
1684 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1685 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perchesd2182b62011-12-15 14:55:53 -08001686 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
Felix Fietkaufce041b2011-05-19 12:20:25 +02001687 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1688 }
1689
1690 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001691 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001692 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001693 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001694
1695 if (!internal) {
1696 txq->axq_depth++;
1697 if (bf_is_ampdu_not_probing(bf))
1698 txq->axq_ampdu_depth++;
1699 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001700}
1701
Sujithe8324352009-01-16 21:38:42 +05301702static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001703 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301704{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001705 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001706 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001707 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301708
1709 /*
1710 * Do not queue to h/w when any of the following conditions is true:
1711 * - there are pending frames in software queue
1712 * - the TID is currently paused for ADDBA/BAR request
1713 * - seqno is not within block-ack window
1714 * - h/w queue depth exceeds low water mark
1715 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001716 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001717 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001718 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001719 /*
Sujithe8324352009-01-16 21:38:42 +05301720 * Add this frame to software queue for scheduling later
1721 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001722 */
Ben Greearbda8add2011-01-09 23:11:48 -08001723 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001724 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001725 if (!txctl->an || !txctl->an->sleeping)
1726 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301727 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001728 }
1729
Felix Fietkau81357a22012-05-24 14:32:20 +02001730 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001731 if (!bf)
1732 return;
1733
Felix Fietkau399c6482011-09-14 21:24:17 +02001734 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001735 INIT_LIST_HEAD(&bf_head);
1736 list_add(&bf->list, &bf_head);
1737
Sujithe8324352009-01-16 21:38:42 +05301738 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001739 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301740
1741 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001742 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301743 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001744 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001745 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301746}
1747
Felix Fietkau82b873a2010-11-11 03:18:37 +01001748static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001749 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001750{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001751 struct ath_frame_info *fi = get_frame_info(skb);
1752 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301753 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001754
Felix Fietkau44f1d262011-08-28 00:32:25 +02001755 bf = fi->bf;
1756 if (!bf)
Felix Fietkau81357a22012-05-24 14:32:20 +02001757 bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001758
1759 if (!bf)
1760 return;
1761
1762 INIT_LIST_HEAD(&bf_head);
1763 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001764 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301765
Sujithd43f30152009-01-16 21:38:53 +05301766 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001767 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001768 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301769 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001770}
1771
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001772static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1773 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301774{
1775 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001776 struct ieee80211_sta *sta = tx_info->control.sta;
1777 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001778 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001779 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001780 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001781 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301782
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001783 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301784
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001785 if (sta)
1786 an = (struct ath_node *) sta->drv_priv;
1787
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001788 memset(fi, 0, sizeof(*fi));
1789 if (hw_key)
1790 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001791 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1792 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001793 else
1794 fi->keyix = ATH9K_TXKEYIX_INVALID;
1795 fi->keytype = keytype;
1796 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301797}
1798
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301799u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1800{
1801 struct ath_hw *ah = sc->sc_ah;
1802 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301803 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1804 (curchan->channelFlags & CHANNEL_5GHZ) &&
1805 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301806 return 0x3;
1807 else
1808 return chainmask;
1809}
1810
Felix Fietkau44f1d262011-08-28 00:32:25 +02001811/*
1812 * Assign a descriptor (and sequence number if necessary,
1813 * and map buffer for DMA. Frees skb on error
1814 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001815static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001816 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001817 struct ath_atx_tid *tid,
Felix Fietkau81357a22012-05-24 14:32:20 +02001818 struct sk_buff *skb,
1819 bool dequeue)
Sujithe8324352009-01-16 21:38:42 +05301820{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001821 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001822 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001824 struct ath_buf *bf;
Sujith Manoharanfd09c852012-04-17 08:34:50 +05301825 int fragno;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001826 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001827
1828 bf = ath_tx_get_buffer(sc);
1829 if (!bf) {
Joe Perchesd2182b62011-12-15 14:55:53 -08001830 ath_dbg(common, XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001831 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001832 }
Sujithe8324352009-01-16 21:38:42 +05301833
Sujithe8324352009-01-16 21:38:42 +05301834 ATH_TXBUF_RESET(bf);
1835
Felix Fietkaufa05f872011-08-28 00:32:24 +02001836 if (tid) {
Sujith Manoharanfd09c852012-04-17 08:34:50 +05301837 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001838 seqno = tid->seq_next;
1839 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Sujith Manoharanfd09c852012-04-17 08:34:50 +05301840
1841 if (fragno)
1842 hdr->seq_ctrl |= cpu_to_le16(fragno);
1843
1844 if (!ieee80211_has_morefrags(hdr->frame_control))
1845 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1846
Felix Fietkaufa05f872011-08-28 00:32:24 +02001847 bf->bf_state.seqno = seqno;
1848 }
1849
Sujithe8324352009-01-16 21:38:42 +05301850 bf->bf_mpdu = skb;
1851
Ben Greearc1739eb32010-10-14 12:45:29 -07001852 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1853 skb->len, DMA_TO_DEVICE);
1854 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301855 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001856 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001857 ath_err(ath9k_hw_common(sc->sc_ah),
1858 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001859 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001860 goto error;
Sujithe8324352009-01-16 21:38:42 +05301861 }
1862
Felix Fietkau56dc6332011-08-28 00:32:22 +02001863 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001864
1865 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001866
1867error:
Felix Fietkau81357a22012-05-24 14:32:20 +02001868 if (dequeue)
1869 __skb_unlink(skb, &tid->buf_q);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001870 dev_kfree_skb_any(skb);
1871 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001872}
1873
1874/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001875static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001876 struct ath_tx_control *txctl)
1877{
Felix Fietkau04caf862010-11-14 15:20:12 +01001878 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1879 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001880 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001881 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001882 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301883
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05301884 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301885 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001886 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1887 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001888 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001889
Felix Fietkau066dae92010-11-07 14:59:39 +01001890 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001891 }
1892
1893 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001894 /*
1895 * Try aggregation if it's a unicast data frame
1896 * and the destination is HT capable.
1897 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001898 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301899 } else {
Felix Fietkau81357a22012-05-24 14:32:20 +02001900 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001901 if (!bf)
Felix Fietkau3ad29522011-12-14 22:08:07 +01001902 return;
Felix Fietkau04caf862010-11-14 15:20:12 +01001903
Felix Fietkau82b873a2010-11-11 03:18:37 +01001904 bf->bf_state.bfs_paprd = txctl->paprd;
1905
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301906 if (txctl->paprd)
1907 bf->bf_state.bfs_paprd_timestamp = jiffies;
1908
Felix Fietkau44f1d262011-08-28 00:32:25 +02001909 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301910 }
Sujithe8324352009-01-16 21:38:42 +05301911}
1912
1913/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001914int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301915 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001916{
Felix Fietkau28d16702010-11-14 15:20:10 +01001917 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1918 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001919 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001920 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac58612011-01-24 19:23:18 +01001921 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001922 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001923 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001924 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001925 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001926
Ben Greeara9927ba2010-12-06 21:13:49 -08001927 /* NOTE: sta can be NULL according to net/mac80211.h */
1928 if (sta)
1929 txctl->an = (struct ath_node *)sta->drv_priv;
1930
Felix Fietkau04caf862010-11-14 15:20:12 +01001931 if (info->control.hw_key)
1932 frmlen += info->control.hw_key->icv_len;
1933
Felix Fietkau28d16702010-11-14 15:20:10 +01001934 /*
1935 * As a temporary workaround, assign seq# here; this will likely need
1936 * to be cleaned up to work better with Beacon transmission and virtual
1937 * BSSes.
1938 */
1939 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1940 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1941 sc->tx.seq_no += 0x10;
1942 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1943 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1944 }
1945
John W. Linville42cecc32011-09-19 15:42:31 -04001946 /* Add the padding after the header if this is not already done */
1947 padpos = ath9k_cmn_padpos(hdr->frame_control);
1948 padsize = padpos & 3;
1949 if (padsize && skb->len > padpos) {
1950 if (skb_headroom(skb) < padsize)
1951 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001952
John W. Linville42cecc32011-09-19 15:42:31 -04001953 skb_push(skb, padsize);
1954 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001955 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001956 }
1957
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001958 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1959 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1960 !ieee80211_is_data(hdr->frame_control))
1961 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1962
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001963 setup_frame_info(hw, skb, frmlen);
1964
1965 /*
1966 * At this point, the vif, hw_key and sta pointers in the tx control
1967 * info are no longer valid (overwritten by the ath_frame_info data.
1968 */
1969
Felix Fietkau066dae92010-11-07 14:59:39 +01001970 q = skb_get_queue_mapping(skb);
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001971
1972 ath_txq_lock(sc, txq);
Felix Fietkau066dae92010-11-07 14:59:39 +01001973 if (txq == sc->tx.txq_map[q] &&
1974 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001975 ieee80211_stop_queue(sc->hw, q);
Rusty Russell3db1cd52011-12-19 13:56:45 +00001976 txq->stopped = true;
Felix Fietkau97923b12010-06-12 00:33:55 -04001977 }
Felix Fietkau97923b12010-06-12 00:33:55 -04001978
Felix Fietkau44f1d262011-08-28 00:32:25 +02001979 ath_tx_start_dma(sc, skb, txctl);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001980
Felix Fietkau23de5dc2011-12-19 16:45:54 +01001981 ath_txq_unlock(sc, txq);
Felix Fietkau3ad29522011-12-14 22:08:07 +01001982
Felix Fietkau44f1d262011-08-28 00:32:25 +02001983 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001984}
1985
Sujithe8324352009-01-16 21:38:42 +05301986/*****************/
1987/* TX Completion */
1988/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001989
Sujithe8324352009-01-16 21:38:42 +05301990static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301991 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001992{
Sujithe8324352009-01-16 21:38:42 +05301993 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001994 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001995 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001996 int q, padpos, padsize;
Sujith Manoharan07c15a32012-06-04 20:24:07 +05301997 unsigned long flags;
Sujithe8324352009-01-16 21:38:42 +05301998
Joe Perchesd2182b62011-12-15 14:55:53 -08001999 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05302000
Felix Fietkau55797b12011-09-14 21:24:16 +02002001 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05302002 /* Frame was ACKed */
2003 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05302004
John W. Linville42cecc32011-09-19 15:42:31 -04002005 padpos = ath9k_cmn_padpos(hdr->frame_control);
2006 padsize = padpos & 3;
2007 if (padsize && skb->len>padpos+padsize) {
2008 /*
2009 * Remove MAC header padding before giving the frame back to
2010 * mac80211.
2011 */
2012 memmove(skb->data + padsize, skb->data, padpos);
2013 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05302014 }
2015
Sujith Manoharan07c15a32012-06-04 20:24:07 +05302016 spin_lock_irqsave(&sc->sc_pm_lock, flags);
Felix Fietkauc8e88682011-11-16 13:08:40 +01002017 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
Sujith1b04b932010-01-08 10:36:05 +05302018 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perchesd2182b62011-12-15 14:55:53 -08002019 ath_dbg(common, PS,
Joe Perches226afe62010-12-02 19:12:37 -08002020 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05302021 sc->ps_flags & (PS_WAIT_FOR_BEACON |
2022 PS_WAIT_FOR_CAB |
2023 PS_WAIT_FOR_PSPOLL_DATA |
2024 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03002025 }
Sujith Manoharan07c15a32012-06-04 20:24:07 +05302026 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03002027
Felix Fietkau7545daf2011-01-24 19:23:16 +01002028 q = skb_get_queue_mapping(skb);
2029 if (txq == sc->tx.txq_map[q]) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01002030 if (WARN_ON(--txq->pending_frames < 0))
2031 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01002032
Felix Fietkau7545daf2011-01-24 19:23:16 +01002033 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2034 ieee80211_wake_queue(sc->hw, q);
Rusty Russell3db1cd52011-12-19 13:56:45 +00002035 txq->stopped = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002036 }
Felix Fietkau97923b12010-06-12 00:33:55 -04002037 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01002038
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002039 __skb_queue_tail(&txq->complete_q, skb);
Sujithe8324352009-01-16 21:38:42 +05302040}
2041
2042static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002043 struct ath_txq *txq, struct list_head *bf_q,
Felix Fietkau156369f2011-12-14 22:08:04 +01002044 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05302045{
2046 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002047 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05302048 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302049 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302050
Felix Fietkau55797b12011-09-14 21:24:16 +02002051 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302052 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302053
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002054 if (ts->ts_status & ATH9K_TXERR_FILT)
2055 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2056
Ben Greearc1739eb32010-10-14 12:45:29 -07002057 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002058 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002059
2060 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302061 if (time_after(jiffies,
2062 bf->bf_state.bfs_paprd_timestamp +
2063 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002064 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002065 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002066 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002067 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002068 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302069 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002070 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002071 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2072 * accidentally reference it later.
2073 */
2074 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302075
2076 /*
2077 * Return the list of ath_buf of this mpdu to free queue
2078 */
2079 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2080 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2081 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2082}
2083
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002084static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2085 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002086 int txok)
Sujithc4288392008-11-18 09:09:30 +05302087{
Sujitha22be222009-03-30 15:28:36 +05302088 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302089 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302090 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002091 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002092 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302093 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302094
Sujith95e4acb2009-03-13 08:56:09 +05302095 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002096 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302097
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002098 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302099 WARN_ON(tx_rateindex >= hw->max_rates);
2100
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002101 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002102 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302103
Felix Fietkaub572d032010-11-14 15:20:07 +01002104 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002105 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302106 tx_info->status.ampdu_len = nframes;
2107 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002108
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002109 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002110 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002111 /*
2112 * If an underrun error is seen assume it as an excessive
2113 * retry only if max frame trigger level has been reached
2114 * (2 KB for single stream, and 4 KB for dual stream).
2115 * Adjust the long retry as if the frame was tried
2116 * hw->max_rate_tries times to affect how rate control updates
2117 * PER for the failed rate.
2118 * In case of congestion on the bus penalizing this type of
2119 * underruns should help hardware actually transmit new frames
2120 * successfully by eventually preferring slower rates.
2121 * This itself should also alleviate congestion on the bus.
2122 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002123 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2124 ATH9K_TX_DELIM_UNDERRUN)) &&
2125 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002126 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002127 tx_info->status.rates[tx_rateindex].count =
2128 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302129 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302130
Felix Fietkau545750d2009-11-23 22:21:01 +01002131 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302132 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002133 tx_info->status.rates[i].idx = -1;
2134 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302135
Felix Fietkau78c46532010-06-25 01:26:16 +02002136 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302137}
2138
Felix Fietkaufce041b2011-05-19 12:20:25 +02002139static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2140 struct ath_tx_status *ts, struct ath_buf *bf,
2141 struct list_head *bf_head)
2142{
2143 int txok;
2144
2145 txq->axq_depth--;
2146 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2147 txq->axq_tx_inprogress = false;
2148 if (bf_is_ampdu_not_probing(bf))
2149 txq->axq_ampdu_depth--;
2150
Felix Fietkaufce041b2011-05-19 12:20:25 +02002151 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002152 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkau156369f2011-12-14 22:08:04 +01002153 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002154 } else
2155 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2156
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05302157 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002158 ath_txq_schedule(sc, txq);
2159}
2160
Sujithc4288392008-11-18 09:09:30 +05302161static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002162{
Sujithcbe61d82009-02-09 13:27:12 +05302163 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002164 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002165 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2166 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302167 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002168 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002169 int status;
2170
Joe Perchesd2182b62011-12-15 14:55:53 -08002171 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
Joe Perches226afe62010-12-02 19:12:37 -08002172 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2173 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002175 ath_txq_lock(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002176 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002177 if (work_pending(&sc->hw_reset_work))
2178 break;
2179
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002180 if (list_empty(&txq->axq_q)) {
2181 txq->axq_link = NULL;
Sujith Manoharan3d4e20f2012-03-14 14:40:58 +05302182 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
Ben Greear082f6532011-01-09 23:11:47 -08002183 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002184 break;
2185 }
2186 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2187
2188 /*
2189 * There is a race condition that a BH gets scheduled
2190 * after sw writes TxE and before hw re-load the last
2191 * descriptor to get the newly chained one.
2192 * Software must keep the last DONE descriptor as a
2193 * holding descriptor - software does so by marking
2194 * it with the STALE flag.
2195 */
2196 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302197 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002198 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002199 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002200 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002201
2202 bf = list_entry(bf_held->list.next, struct ath_buf,
2203 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002204 }
2205
2206 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302207 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002208
Felix Fietkau29bffa92010-03-29 20:14:23 -07002209 memset(&ts, 0, sizeof(ts));
2210 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002211 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002212 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002213
Ben Greear2dac4fb2011-01-09 23:11:45 -08002214 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002215
2216 /*
2217 * Remove ath_buf's of the same transmit unit from txq,
2218 * however leave the last descriptor back as the holding
2219 * descriptor for hw.
2220 */
Sujitha119cc42009-03-30 15:28:38 +05302221 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002222 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002223 if (!list_is_singular(&lastbf->list))
2224 list_cut_position(&bf_head,
2225 &txq->axq_q, lastbf->list.prev);
2226
Felix Fietkaufce041b2011-05-19 12:20:25 +02002227 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002228 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002229 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002230 }
Johannes Berge6a98542008-10-21 12:40:02 +02002231
Felix Fietkaufce041b2011-05-19 12:20:25 +02002232 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002233 }
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002234 ath_txq_unlock_complete(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002235}
2236
Sujithe8324352009-01-16 21:38:42 +05302237void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002238{
Felix Fietkau239c7952012-03-14 16:40:26 +01002239 struct ath_hw *ah = sc->sc_ah;
2240 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
Sujithe8324352009-01-16 21:38:42 +05302241 int i;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002242
2243 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302244 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2245 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002246 }
2247}
2248
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002249void ath_tx_edma_tasklet(struct ath_softc *sc)
2250{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002251 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002252 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2253 struct ath_hw *ah = sc->sc_ah;
2254 struct ath_txq *txq;
2255 struct ath_buf *bf, *lastbf;
2256 struct list_head bf_head;
2257 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002258
2259 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002260 if (work_pending(&sc->hw_reset_work))
2261 break;
2262
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264 if (status == -EINPROGRESS)
2265 break;
2266 if (status == -EIO) {
Joe Perchesd2182b62011-12-15 14:55:53 -08002267 ath_dbg(common, XMIT, "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002268 break;
2269 }
2270
Felix Fietkau4e0ad252012-02-27 19:58:42 +01002271 /* Process beacon completions separately */
2272 if (ts.qid == sc->beacon.beaconq) {
2273 sc->beacon.tx_processed = true;
2274 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002275 continue;
Felix Fietkau4e0ad252012-02-27 19:58:42 +01002276 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002277
Felix Fietkaufce041b2011-05-19 12:20:25 +02002278 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002279
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002280 ath_txq_lock(sc, txq);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002281
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002282 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002283 ath_txq_unlock(sc, txq);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002284 return;
2285 }
2286
2287 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2288 struct ath_buf, list);
2289 lastbf = bf->bf_lastbf;
2290
2291 INIT_LIST_HEAD(&bf_head);
2292 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2293 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002294
Felix Fietkaufce041b2011-05-19 12:20:25 +02002295 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2296 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002297
Felix Fietkaufce041b2011-05-19 12:20:25 +02002298 if (!list_empty(&txq->axq_q)) {
2299 struct list_head bf_q;
2300
2301 INIT_LIST_HEAD(&bf_q);
2302 txq->axq_link = NULL;
2303 list_splice_tail_init(&txq->axq_q, &bf_q);
2304 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2305 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002306 }
2307
Felix Fietkaufce041b2011-05-19 12:20:25 +02002308 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002309 ath_txq_unlock_complete(sc, txq);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002310 }
2311}
2312
Sujithe8324352009-01-16 21:38:42 +05302313/*****************/
2314/* Init, Cleanup */
2315/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002316
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002317static int ath_txstatus_setup(struct ath_softc *sc, int size)
2318{
2319 struct ath_descdma *dd = &sc->txsdma;
2320 u8 txs_len = sc->sc_ah->caps.txs_len;
2321
2322 dd->dd_desc_len = size * txs_len;
2323 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2324 &dd->dd_desc_paddr, GFP_KERNEL);
2325 if (!dd->dd_desc)
2326 return -ENOMEM;
2327
2328 return 0;
2329}
2330
2331static int ath_tx_edma_init(struct ath_softc *sc)
2332{
2333 int err;
2334
2335 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2336 if (!err)
2337 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2338 sc->txsdma.dd_desc_paddr,
2339 ATH_TXSTATUS_RING_SIZE);
2340
2341 return err;
2342}
2343
2344static void ath_tx_edma_cleanup(struct ath_softc *sc)
2345{
2346 struct ath_descdma *dd = &sc->txsdma;
2347
2348 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2349 dd->dd_desc_paddr);
2350}
2351
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002352int ath_tx_init(struct ath_softc *sc, int nbufs)
2353{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002354 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002355 int error = 0;
2356
Sujith797fe5cb2009-03-30 15:28:45 +05302357 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002358
Sujith797fe5cb2009-03-30 15:28:45 +05302359 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002360 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302361 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002362 ath_err(common,
2363 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302364 goto err;
2365 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366
Sujith797fe5cb2009-03-30 15:28:45 +05302367 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002368 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302369 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002370 ath_err(common,
2371 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302372 goto err;
2373 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002375 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2376
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002377 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2378 error = ath_tx_edma_init(sc);
2379 if (error)
2380 goto err;
2381 }
2382
Sujith797fe5cb2009-03-30 15:28:45 +05302383err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384 if (error != 0)
2385 ath_tx_cleanup(sc);
2386
2387 return error;
2388}
2389
Sujith797fe5cb2009-03-30 15:28:45 +05302390void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391{
Sujithb77f4832008-12-07 21:44:03 +05302392 if (sc->beacon.bdma.dd_desc_len != 0)
2393 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002394
Sujithb77f4832008-12-07 21:44:03 +05302395 if (sc->tx.txdma.dd_desc_len != 0)
2396 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002397
2398 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2399 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002400}
2401
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002402void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2403{
Sujithc5170162008-10-29 10:13:59 +05302404 struct ath_atx_tid *tid;
2405 struct ath_atx_ac *ac;
2406 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407
Sujith8ee5afb2008-12-07 21:43:36 +05302408 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302409 tidno < WME_NUM_TID;
2410 tidno++, tid++) {
2411 tid->an = an;
2412 tid->tidno = tidno;
2413 tid->seq_start = tid->seq_next = 0;
2414 tid->baw_size = WME_MAX_BA;
2415 tid->baw_head = tid->baw_tail = 0;
2416 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302417 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302418 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002419 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302420 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302421 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302422 tid->state &= ~AGGR_ADDBA_COMPLETE;
2423 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302424 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002425
Sujith8ee5afb2008-12-07 21:43:36 +05302426 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302427 acno < WME_NUM_AC; acno++, ac++) {
2428 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002429 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302430 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431 }
2432}
2433
Sujithb5aa9bf2008-10-29 10:13:31 +05302434void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002435{
Felix Fietkau2b409942010-07-07 19:42:08 +02002436 struct ath_atx_ac *ac;
2437 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002438 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002439 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302440
Felix Fietkau2b409942010-07-07 19:42:08 +02002441 for (tidno = 0, tid = &an->tid[tidno];
2442 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002443
Felix Fietkau2b409942010-07-07 19:42:08 +02002444 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002445 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002446
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002447 ath_txq_lock(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002448
Felix Fietkau2b409942010-07-07 19:42:08 +02002449 if (tid->sched) {
2450 list_del(&tid->list);
2451 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002452 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002453
2454 if (ac->sched) {
2455 list_del(&ac->list);
2456 tid->ac->sched = false;
2457 }
2458
2459 ath_tid_drain(sc, txq, tid);
2460 tid->state &= ~AGGR_ADDBA_COMPLETE;
2461 tid->state &= ~AGGR_CLEANUP;
2462
Felix Fietkau23de5dc2011-12-19 16:45:54 +01002463 ath_txq_unlock(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002464 }
2465}