blob: 3bfc3b90f2569013092e82959a03bfebdf39b979 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070017#include "core.h"
18
19#define BITS_PER_BYTE 8
20#define OFDM_PLCP_BITS 22
21#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
34#define OFDM_SIFS_TIME 16
35
36static u32 bits_per_symbol[][2] = {
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
54};
55
56#define IS_HT_RATE(_rate) ((_rate) & 0x80)
57
58/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070059 * Insert a chain of ath_buf (descriptors) on a txq and
60 * assume the descriptors are already chained together by caller.
61 * NB: must be called with txq lock held
62 */
63
Sujith102e0572008-10-29 10:15:16 +053064static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070066{
67 struct ath_hal *ah = sc->sc_ah;
68 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +053069
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070070 /*
71 * Insert the frame on the outbound list and
72 * pass it on to the hardware.
73 */
74
75 if (list_empty(head))
76 return;
77
78 bf = list_first_entry(head, struct ath_buf, list);
79
80 list_splice_tail_init(head, &txq->axq_q);
81 txq->axq_depth++;
82 txq->axq_totalqueued++;
83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
84
85 DPRINTF(sc, ATH_DBG_QUEUE,
Sujith04bd46382008-11-28 22:18:05 +053086 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070087
88 if (txq->axq_link == NULL) {
89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
90 DPRINTF(sc, ATH_DBG_XMIT,
Sujith04bd46382008-11-28 22:18:05 +053091 "TXDP[%u] = %llx (%p)\n",
92 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070093 } else {
94 *txq->axq_link = bf->bf_daddr;
Sujith04bd46382008-11-28 22:18:05 +053095 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070096 txq->axq_qnum, txq->axq_link,
97 ito64(bf->bf_daddr), bf->bf_desc);
98 }
99 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
100 ath9k_hw_txstart(ah, txq->axq_qnum);
101}
102
Sujithc4288392008-11-18 09:09:30 +0530103static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
104 struct ath_xmit_status *tx_status)
105{
106 struct ieee80211_hw *hw = sc->hw;
107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
108 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
Jouni Malinenf7a276a2008-12-15 16:02:04 +0200109 int hdrlen, padsize;
Sujithc4288392008-11-18 09:09:30 +0530110
Sujith04bd46382008-11-28 22:18:05 +0530111 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithc4288392008-11-18 09:09:30 +0530112
113 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
114 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
115 kfree(tx_info_priv);
116 tx_info->rate_driver_data[0] = NULL;
117 }
118
119 if (tx_status->flags & ATH_TX_BAR) {
120 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
121 tx_status->flags &= ~ATH_TX_BAR;
122 }
123
124 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
125 /* Frame was ACKed */
126 tx_info->flags |= IEEE80211_TX_STAT_ACK;
127 }
128
Jouni Malinenda027ca2008-12-15 15:44:53 +0200129 tx_info->status.rates[0].count = tx_status->retries;
130 if (tx_info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
131 /* Change idx from internal table index to MCS index */
132 int idx = tx_info->status.rates[0].idx;
133 struct ath_rate_table *rate_table = sc->cur_rate_table;
134 if (idx >= 0 && idx < rate_table->rate_cnt)
135 tx_info->status.rates[0].idx =
136 rate_table->info[idx].ratecode & 0x7f;
137 }
Sujithc4288392008-11-18 09:09:30 +0530138
Jouni Malinenf7a276a2008-12-15 16:02:04 +0200139 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
140 padsize = hdrlen & 3;
141 if (padsize && hdrlen >= 24) {
142 /*
143 * Remove MAC header padding before giving the frame back to
144 * mac80211.
145 */
146 memmove(skb->data + padsize, skb->data, hdrlen);
147 skb_pull(skb, padsize);
148 }
149
Sujithc4288392008-11-18 09:09:30 +0530150 ieee80211_tx_status(hw, skb);
151}
152
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700153/* Check if it's okay to send out aggregates */
154
Sujitha37c2c72008-10-29 10:15:40 +0530155static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700156{
157 struct ath_atx_tid *tid;
158 tid = ATH_AN_2_TID(an, tidno);
159
Sujitha37c2c72008-10-29 10:15:40 +0530160 if (tid->state & AGGR_ADDBA_COMPLETE ||
161 tid->state & AGGR_ADDBA_PROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700162 return 1;
163 else
164 return 0;
165}
166
Sujithff37e332008-11-24 12:07:55 +0530167static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
168 struct ath_beacon_config *conf)
169{
170 struct ieee80211_hw *hw = sc->hw;
171
172 /* fill in beacon config data */
173
174 conf->beacon_interval = hw->conf.beacon_int;
175 conf->listen_interval = 100;
176 conf->dtim_count = 1;
177 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
178}
179
Sujith528f0c62008-10-29 10:14:26 +0530180/* Calculate Atheros packet type from IEEE80211 packet header */
181
182static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700183{
Sujith528f0c62008-10-29 10:14:26 +0530184 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700185 enum ath9k_pkt_type htype;
186 __le16 fc;
187
Sujith528f0c62008-10-29 10:14:26 +0530188 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700189 fc = hdr->frame_control;
190
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700191 if (ieee80211_is_beacon(fc))
192 htype = ATH9K_PKT_TYPE_BEACON;
193 else if (ieee80211_is_probe_resp(fc))
194 htype = ATH9K_PKT_TYPE_PROBE_RESP;
195 else if (ieee80211_is_atim(fc))
196 htype = ATH9K_PKT_TYPE_ATIM;
197 else if (ieee80211_is_pspoll(fc))
198 htype = ATH9K_PKT_TYPE_PSPOLL;
199 else
200 htype = ATH9K_PKT_TYPE_NORMAL;
201
202 return htype;
203}
204
Sujitha8efee42008-11-18 09:07:30 +0530205static bool is_pae(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700206{
207 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700208 __le16 fc;
209
210 hdr = (struct ieee80211_hdr *)skb->data;
211 fc = hdr->frame_control;
Johannes Berge6a98542008-10-21 12:40:02 +0200212
Sujitha8efee42008-11-18 09:07:30 +0530213 if (ieee80211_is_data(fc)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700214 if (ieee80211_is_nullfunc(fc) ||
Sujith528f0c62008-10-29 10:14:26 +0530215 /* Port Access Entity (IEEE 802.1X) */
216 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
Sujitha8efee42008-11-18 09:07:30 +0530217 return true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700218 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700219 }
220
Sujitha8efee42008-11-18 09:07:30 +0530221 return false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700222}
223
Sujith528f0c62008-10-29 10:14:26 +0530224static int get_hw_crypto_keytype(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700225{
Sujith528f0c62008-10-29 10:14:26 +0530226 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
227
228 if (tx_info->control.hw_key) {
229 if (tx_info->control.hw_key->alg == ALG_WEP)
230 return ATH9K_KEY_TYPE_WEP;
231 else if (tx_info->control.hw_key->alg == ALG_TKIP)
232 return ATH9K_KEY_TYPE_TKIP;
233 else if (tx_info->control.hw_key->alg == ALG_CCMP)
234 return ATH9K_KEY_TYPE_AES;
235 }
236
237 return ATH9K_KEY_TYPE_CLEAR;
238}
239
Sujith528f0c62008-10-29 10:14:26 +0530240/* Called only when tx aggregation is enabled and HT is supported */
241
242static void assign_aggr_tid_seqno(struct sk_buff *skb,
243 struct ath_buf *bf)
244{
245 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
246 struct ieee80211_hdr *hdr;
247 struct ath_node *an;
248 struct ath_atx_tid *tid;
249 __le16 fc;
250 u8 *qc;
251
252 if (!tx_info->control.sta)
253 return;
254
255 an = (struct ath_node *)tx_info->control.sta->drv_priv;
256 hdr = (struct ieee80211_hdr *)skb->data;
257 fc = hdr->frame_control;
258
259 /* Get tidno */
260
261 if (ieee80211_is_data_qos(fc)) {
262 qc = ieee80211_get_qos_ctl(hdr);
263 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +0530264 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700265
Sujith528f0c62008-10-29 10:14:26 +0530266 /* Get seqno */
267
Sujitha8efee42008-11-18 09:07:30 +0530268 if (ieee80211_is_data(fc) && !is_pae(skb)) {
Sujith528f0c62008-10-29 10:14:26 +0530269 /* For HT capable stations, we save tidno for later use.
270 * We also override seqno set by upper layer with the one
271 * in tx aggregation state.
272 *
273 * If fragmentation is on, the sequence number is
274 * not overridden, since it has been
275 * incremented by the fragmentation routine.
276 *
277 * FIXME: check if the fragmentation threshold exceeds
278 * IEEE80211 max.
279 */
280 tid = ATH_AN_2_TID(an, bf->bf_tidno);
281 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
282 IEEE80211_SEQ_SEQ_SHIFT);
283 bf->bf_seqno = tid->seq_next;
284 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
285 }
286}
287
288static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
289 struct ath_txq *txq)
290{
291 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
292 int flags = 0;
293
294 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
295 flags |= ATH9K_TXDESC_INTREQ;
296
297 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
298 flags |= ATH9K_TXDESC_NOACK;
299 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
300 flags |= ATH9K_TXDESC_RTSENA;
301
302 return flags;
303}
304
305static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
306{
307 struct ath_buf *bf = NULL;
308
Sujithb77f4832008-12-07 21:44:03 +0530309 spin_lock_bh(&sc->tx.txbuflock);
Sujith528f0c62008-10-29 10:14:26 +0530310
Sujithb77f4832008-12-07 21:44:03 +0530311 if (unlikely(list_empty(&sc->tx.txbuf))) {
312 spin_unlock_bh(&sc->tx.txbuflock);
Sujith528f0c62008-10-29 10:14:26 +0530313 return NULL;
314 }
315
Sujithb77f4832008-12-07 21:44:03 +0530316 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
Sujith528f0c62008-10-29 10:14:26 +0530317 list_del(&bf->list);
318
Sujithb77f4832008-12-07 21:44:03 +0530319 spin_unlock_bh(&sc->tx.txbuflock);
Sujith528f0c62008-10-29 10:14:26 +0530320
321 return bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700322}
323
324/* To complete a chain of buffers associated a frame */
325
326static void ath_tx_complete_buf(struct ath_softc *sc,
327 struct ath_buf *bf,
328 struct list_head *bf_q,
329 int txok, int sendbar)
330{
331 struct sk_buff *skb = bf->bf_mpdu;
332 struct ath_xmit_status tx_status;
Senthil Balasubramaniana07d3612008-12-09 17:23:33 +0530333 unsigned long flags;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700334
335 /*
336 * Set retry information.
337 * NB: Don't use the information in the descriptor, because the frame
338 * could be software retried.
339 */
340 tx_status.retries = bf->bf_retries;
341 tx_status.flags = 0;
342
343 if (sendbar)
344 tx_status.flags = ATH_TX_BAR;
345
346 if (!txok) {
347 tx_status.flags |= ATH_TX_ERROR;
348
Sujithcd3d39a2008-08-11 14:03:34 +0530349 if (bf_isxretried(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700350 tx_status.flags |= ATH_TX_XRETRY;
351 }
Sujith102e0572008-10-29 10:15:16 +0530352
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700353 /* Unmap this frame */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700354 pci_unmap_single(sc->pdev,
Sujithff9b6622008-08-14 13:27:16 +0530355 bf->bf_dmacontext,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700356 skb->len,
357 PCI_DMA_TODEVICE);
358 /* complete this frame */
Sujith528f0c62008-10-29 10:14:26 +0530359 ath_tx_complete(sc, skb, &tx_status);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700360
361 /*
362 * Return the list of ath_buf of this mpdu to free queue
363 */
Sujithb77f4832008-12-07 21:44:03 +0530364 spin_lock_irqsave(&sc->tx.txbuflock, flags);
365 list_splice_tail_init(bf_q, &sc->tx.txbuf);
366 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700367}
368
369/*
370 * queue up a dest/ac pair for tx scheduling
371 * NB: must be called with txq lock held
372 */
373
374static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
375{
376 struct ath_atx_ac *ac = tid->ac;
377
378 /*
379 * if tid is paused, hold off
380 */
381 if (tid->paused)
382 return;
383
384 /*
385 * add tid to ac atmost once
386 */
387 if (tid->sched)
388 return;
389
390 tid->sched = true;
391 list_add_tail(&tid->list, &ac->tid_q);
392
393 /*
394 * add node ac to txq atmost once
395 */
396 if (ac->sched)
397 return;
398
399 ac->sched = true;
400 list_add_tail(&ac->list, &txq->axq_acq);
401}
402
403/* pause a tid */
404
405static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
406{
Sujithb77f4832008-12-07 21:44:03 +0530407 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700408
409 spin_lock_bh(&txq->axq_lock);
410
411 tid->paused++;
412
413 spin_unlock_bh(&txq->axq_lock);
414}
415
416/* resume a tid and schedule aggregate */
417
418void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
419{
Sujithb77f4832008-12-07 21:44:03 +0530420 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700421
422 ASSERT(tid->paused > 0);
423 spin_lock_bh(&txq->axq_lock);
424
425 tid->paused--;
426
427 if (tid->paused > 0)
428 goto unlock;
429
430 if (list_empty(&tid->buf_q))
431 goto unlock;
432
433 /*
434 * Add this TID to scheduler and try to send out aggregates
435 */
436 ath_tx_queue_tid(txq, tid);
437 ath_txq_schedule(sc, txq);
438unlock:
439 spin_unlock_bh(&txq->axq_lock);
440}
441
442/* Compute the number of bad frames */
443
Sujithb5aa9bf2008-10-29 10:13:31 +0530444static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
445 int txok)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700446{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700447 struct ath_buf *bf_last = bf->bf_lastbf;
448 struct ath_desc *ds = bf_last->bf_desc;
449 u16 seq_st = 0;
450 u32 ba[WME_BA_BMP_SIZE >> 5];
451 int ba_index;
452 int nbad = 0;
453 int isaggr = 0;
454
Sujithb5aa9bf2008-10-29 10:13:31 +0530455 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700456 return 0;
457
Sujithcd3d39a2008-08-11 14:03:34 +0530458 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700459 if (isaggr) {
460 seq_st = ATH_DS_BA_SEQ(ds);
461 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
462 }
463
464 while (bf) {
465 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
466 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
467 nbad++;
468
469 bf = bf->bf_next;
470 }
471
472 return nbad;
473}
474
475static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
476{
477 struct sk_buff *skb;
478 struct ieee80211_hdr *hdr;
479
Sujithcd3d39a2008-08-11 14:03:34 +0530480 bf->bf_state.bf_type |= BUF_RETRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700481 bf->bf_retries++;
482
483 skb = bf->bf_mpdu;
484 hdr = (struct ieee80211_hdr *)skb->data;
485 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
486}
487
488/* Update block ack window */
489
Sujith102e0572008-10-29 10:15:16 +0530490static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
491 int seqno)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700492{
493 int index, cindex;
494
495 index = ATH_BA_INDEX(tid->seq_start, seqno);
496 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
497
498 tid->tx_buf[cindex] = NULL;
499
500 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
501 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
502 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
503 }
504}
505
506/*
507 * ath_pkt_dur - compute packet duration (NB: not NAV)
508 *
509 * rix - rate index
510 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
511 * width - 0 for 20 MHz, 1 for 40 MHz
512 * half_gi - to use 4us v/s 3.6 us for symbol time
513 */
Sujith102e0572008-10-29 10:15:16 +0530514static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
515 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700516{
Sujith3706de62008-12-07 21:42:10 +0530517 struct ath_rate_table *rate_table = sc->cur_rate_table;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700518 u32 nbits, nsymbits, duration, nsymbols;
519 u8 rc;
520 int streams, pktlen;
521
Sujithcd3d39a2008-08-11 14:03:34 +0530522 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +0530523 rc = rate_table->info[rix].ratecode;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700524
Sujithe63835b2008-11-18 09:07:53 +0530525 /* for legacy rates, use old function to compute packet duration */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700526 if (!IS_HT_RATE(rc))
Sujithe63835b2008-11-18 09:07:53 +0530527 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
528 rix, shortPreamble);
529
530 /* find number of symbols: PLCP + data */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700531 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
532 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
533 nsymbols = (nbits + nsymbits - 1) / nsymbits;
534
535 if (!half_gi)
536 duration = SYMBOL_TIME(nsymbols);
537 else
538 duration = SYMBOL_TIME_HALFGI(nsymbols);
539
Sujithe63835b2008-11-18 09:07:53 +0530540 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700541 streams = HT_RC_2_STREAMS(rc);
542 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +0530543
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700544 return duration;
545}
546
547/* Rate module function to set rate related fields in tx descriptor */
548
549static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
550{
551 struct ath_hal *ah = sc->sc_ah;
Sujithe63835b2008-11-18 09:07:53 +0530552 struct ath_rate_table *rt;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700553 struct ath_desc *ds = bf->bf_desc;
554 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
555 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +0530556 struct sk_buff *skb;
557 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +0530558 struct ieee80211_tx_rate *rates;
Sujithe63835b2008-11-18 09:07:53 +0530559 struct ieee80211_hdr *hdr;
560 int i, flags, rtsctsena = 0;
561 u32 ctsduration = 0;
562 u8 rix = 0, cix, ctsrate = 0;
563 __le16 fc;
564
565 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +0530566
567 skb = (struct sk_buff *)bf->bf_mpdu;
Sujithe63835b2008-11-18 09:07:53 +0530568 hdr = (struct ieee80211_hdr *)skb->data;
569 fc = hdr->frame_control;
Sujith528f0c62008-10-29 10:14:26 +0530570 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +0530571 rates = tx_info->control.rates;
Sujith528f0c62008-10-29 10:14:26 +0530572
Sujithe63835b2008-11-18 09:07:53 +0530573 if (ieee80211_has_morefrags(fc) ||
574 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
575 rates[1].count = rates[2].count = rates[3].count = 0;
576 rates[1].idx = rates[2].idx = rates[3].idx = 0;
577 rates[0].count = ATH_TXMAXTRY;
578 }
579
580 /* get the cix for the lowest valid rix */
Sujith3706de62008-12-07 21:42:10 +0530581 rt = sc->cur_rate_table;
Sujitha8efee42008-11-18 09:07:30 +0530582 for (i = 3; i >= 0; i--) {
Sujithe63835b2008-11-18 09:07:53 +0530583 if (rates[i].count && (rates[i].idx >= 0)) {
Sujitha8efee42008-11-18 09:07:30 +0530584 rix = rates[i].idx;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700585 break;
586 }
587 }
Sujithe63835b2008-11-18 09:07:53 +0530588
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700589 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
Sujithe63835b2008-11-18 09:07:53 +0530590 cix = rt->info[rix].ctrl_rate;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700591
592 /*
Sujithe63835b2008-11-18 09:07:53 +0530593 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
594 * just CTS. Note that this is only done for OFDM/HT unicast frames.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700595 */
Sujithe63835b2008-11-18 09:07:53 +0530596 if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
Sujith46d14a52008-11-18 09:08:13 +0530597 && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
Sujithe63835b2008-11-18 09:07:53 +0530598 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700599 if (sc->sc_protmode == PROT_M_RTSCTS)
600 flags = ATH9K_TXDESC_RTSENA;
601 else if (sc->sc_protmode == PROT_M_CTSONLY)
602 flags = ATH9K_TXDESC_CTSENA;
603
Sujithe63835b2008-11-18 09:07:53 +0530604 cix = rt->info[sc->sc_protrix].ctrl_rate;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700605 rtsctsena = 1;
606 }
607
Sujithe63835b2008-11-18 09:07:53 +0530608 /* For 11n, the default behavior is to enable RTS for hw retried frames.
609 * We enable the global flag here and let rate series flags determine
610 * which rates will actually use RTS.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700611 */
Sujithcd3d39a2008-08-11 14:03:34 +0530612 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
Sujithe63835b2008-11-18 09:07:53 +0530613 /* 802.11g protection not needed, use our default behavior */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700614 if (!rtsctsena)
615 flags = ATH9K_TXDESC_RTSENA;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700616 }
617
Sujithe63835b2008-11-18 09:07:53 +0530618 /* Set protection if aggregate protection on */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700619 if (sc->sc_config.ath_aggr_prot &&
Sujithcd3d39a2008-08-11 14:03:34 +0530620 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700621 flags = ATH9K_TXDESC_RTSENA;
Sujithe63835b2008-11-18 09:07:53 +0530622 cix = rt->info[sc->sc_protrix].ctrl_rate;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700623 rtsctsena = 1;
624 }
625
Sujithe63835b2008-11-18 09:07:53 +0530626 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
627 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700628 flags &= ~(ATH9K_TXDESC_RTSENA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700629
630 /*
Sujithe63835b2008-11-18 09:07:53 +0530631 * CTS transmit rate is derived from the transmit rate by looking in the
632 * h/w rate table. We must also factor in whether or not a short
633 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700634 */
Sujithe63835b2008-11-18 09:07:53 +0530635 ctsrate = rt->info[cix].ratecode |
636 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700637
638 for (i = 0; i < 4; i++) {
Sujithe63835b2008-11-18 09:07:53 +0530639 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700640 continue;
641
Sujitha8efee42008-11-18 09:07:30 +0530642 rix = rates[i].idx;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700643
Sujithe63835b2008-11-18 09:07:53 +0530644 series[i].Rate = rt->info[rix].ratecode |
645 (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700646
Sujitha8efee42008-11-18 09:07:30 +0530647 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700648
649 series[i].RateFlags = (
Sujitha8efee42008-11-18 09:07:30 +0530650 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700651 ATH9K_RATESERIES_RTS_CTS : 0) |
Sujitha8efee42008-11-18 09:07:30 +0530652 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700653 ATH9K_RATESERIES_2040 : 0) |
Sujitha8efee42008-11-18 09:07:30 +0530654 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700655 ATH9K_RATESERIES_HALFGI : 0);
656
Sujith102e0572008-10-29 10:15:16 +0530657 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
Sujitha8efee42008-11-18 09:07:30 +0530658 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
659 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
Sujith102e0572008-10-29 10:15:16 +0530660 bf_isshpreamble(bf));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700661
Sujithff37e332008-11-24 12:07:55 +0530662 series[i].ChSel = sc->sc_tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700663
664 if (rtsctsena)
665 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700666 }
667
Sujithe63835b2008-11-18 09:07:53 +0530668 /* set dur_update_en for l-sig computation except for PS-Poll frames */
669 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
670 ctsrate, ctsduration,
Sujithcd3d39a2008-08-11 14:03:34 +0530671 series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +0530672
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700673 if (sc->sc_config.ath_aggr_prot && flags)
674 ath9k_hw_set11n_burstduration(ah, ds, 8192);
675}
676
677/*
678 * Function to send a normal HT (non-AMPDU) frame
679 * NB: must be called with txq lock held
680 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700681static int ath_tx_send_normal(struct ath_softc *sc,
682 struct ath_txq *txq,
683 struct ath_atx_tid *tid,
684 struct list_head *bf_head)
685{
686 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700687
688 BUG_ON(list_empty(bf_head));
689
690 bf = list_first_entry(bf_head, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +0530691 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700692
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700693 /* update starting sequence number for subsequent ADDBA request */
694 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
695
696 /* Queue to h/w without aggregation */
697 bf->bf_nframes = 1;
698 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
699 ath_buf_set_rate(sc, bf);
700 ath_tx_txqaddbuf(sc, txq, bf_head);
701
702 return 0;
703}
704
705/* flush tid's software queue and send frames as non-ampdu's */
706
707static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
708{
Sujithb77f4832008-12-07 21:44:03 +0530709 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700710 struct ath_buf *bf;
711 struct list_head bf_head;
712 INIT_LIST_HEAD(&bf_head);
713
714 ASSERT(tid->paused > 0);
715 spin_lock_bh(&txq->axq_lock);
716
717 tid->paused--;
718
719 if (tid->paused > 0) {
720 spin_unlock_bh(&txq->axq_lock);
721 return;
722 }
723
724 while (!list_empty(&tid->buf_q)) {
725 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +0530726 ASSERT(!bf_isretried(bf));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700727 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
728 ath_tx_send_normal(sc, txq, tid, &bf_head);
729 }
730
731 spin_unlock_bh(&txq->axq_lock);
732}
733
734/* Completion routine of an aggregate */
735
736static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
737 struct ath_txq *txq,
738 struct ath_buf *bf,
739 struct list_head *bf_q,
740 int txok)
741{
Sujith528f0c62008-10-29 10:14:26 +0530742 struct ath_node *an = NULL;
743 struct sk_buff *skb;
744 struct ieee80211_tx_info *tx_info;
745 struct ath_atx_tid *tid = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700746 struct ath_buf *bf_last = bf->bf_lastbf;
747 struct ath_desc *ds = bf_last->bf_desc;
748 struct ath_buf *bf_next, *bf_lastq = NULL;
749 struct list_head bf_head, bf_pending;
750 u16 seq_st = 0;
751 u32 ba[WME_BA_BMP_SIZE >> 5];
752 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700753
Sujith528f0c62008-10-29 10:14:26 +0530754 skb = (struct sk_buff *)bf->bf_mpdu;
755 tx_info = IEEE80211_SKB_CB(skb);
756
757 if (tx_info->control.sta) {
758 an = (struct ath_node *)tx_info->control.sta->drv_priv;
759 tid = ATH_AN_2_TID(an, bf->bf_tidno);
760 }
761
Sujithcd3d39a2008-08-11 14:03:34 +0530762 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700763 if (isaggr) {
764 if (txok) {
765 if (ATH_DS_TX_BA(ds)) {
766 /*
767 * extract starting sequence and
768 * block-ack bitmap
769 */
770 seq_st = ATH_DS_BA_SEQ(ds);
771 memcpy(ba,
772 ATH_DS_BA_BITMAP(ds),
773 WME_BA_BMP_SIZE >> 3);
774 } else {
Luis R. Rodriguez0345f372008-10-03 15:45:25 -0700775 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700776
777 /*
778 * AR5416 can become deaf/mute when BA
779 * issue happens. Chip needs to be reset.
780 * But AP code may have sychronization issues
781 * when perform internal reset in this routine.
782 * Only enable reset in STA mode for now.
783 */
Colin McCabed97809d2008-12-01 13:38:55 -0800784 if (sc->sc_ah->ah_opmode ==
785 NL80211_IFTYPE_STATION)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700786 needreset = 1;
787 }
788 } else {
Luis R. Rodriguez0345f372008-10-03 15:45:25 -0700789 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700790 }
791 }
792
793 INIT_LIST_HEAD(&bf_pending);
794 INIT_LIST_HEAD(&bf_head);
795
796 while (bf) {
797 txfail = txpending = 0;
798 bf_next = bf->bf_next;
799
800 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
801 /* transmit completion, subframe is
802 * acked by block ack */
803 } else if (!isaggr && txok) {
804 /* transmit completion */
805 } else {
806
Sujitha37c2c72008-10-29 10:15:40 +0530807 if (!(tid->state & AGGR_CLEANUP) &&
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700808 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
809 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
810 ath_tx_set_retry(sc, bf);
811 txpending = 1;
812 } else {
Sujithcd3d39a2008-08-11 14:03:34 +0530813 bf->bf_state.bf_type |= BUF_XRETRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700814 txfail = 1;
815 sendbar = 1;
816 }
817 } else {
818 /*
819 * cleanup in progress, just fail
820 * the un-acked sub-frames
821 */
822 txfail = 1;
823 }
824 }
825 /*
826 * Remove ath_buf's of this sub-frame from aggregate queue.
827 */
828 if (bf_next == NULL) { /* last subframe in the aggregate */
829 ASSERT(bf->bf_lastfrm == bf_last);
830
831 /*
832 * The last descriptor of the last sub frame could be
833 * a holding descriptor for h/w. If that's the case,
834 * bf->bf_lastfrm won't be in the bf_q.
835 * Make sure we handle bf_q properly here.
836 */
837
838 if (!list_empty(bf_q)) {
839 bf_lastq = list_entry(bf_q->prev,
840 struct ath_buf, list);
841 list_cut_position(&bf_head,
842 bf_q, &bf_lastq->list);
843 } else {
844 /*
845 * XXX: if the last subframe only has one
846 * descriptor which is also being used as
847 * a holding descriptor. Then the ath_buf
848 * is not in the bf_q at all.
849 */
850 INIT_LIST_HEAD(&bf_head);
851 }
852 } else {
853 ASSERT(!list_empty(bf_q));
854 list_cut_position(&bf_head,
855 bf_q, &bf->bf_lastfrm->list);
856 }
857
858 if (!txpending) {
859 /*
860 * complete the acked-ones/xretried ones; update
861 * block-ack window
862 */
863 spin_lock_bh(&txq->axq_lock);
864 ath_tx_update_baw(sc, tid, bf->bf_seqno);
865 spin_unlock_bh(&txq->axq_lock);
866
867 /* complete this sub-frame */
868 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
869 } else {
870 /*
871 * retry the un-acked ones
872 */
873 /*
874 * XXX: if the last descriptor is holding descriptor,
875 * in order to requeue the frame to software queue, we
876 * need to allocate a new descriptor and
877 * copy the content of holding descriptor to it.
878 */
879 if (bf->bf_next == NULL &&
880 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
881 struct ath_buf *tbf;
882
883 /* allocate new descriptor */
Sujithb77f4832008-12-07 21:44:03 +0530884 spin_lock_bh(&sc->tx.txbuflock);
885 ASSERT(!list_empty((&sc->tx.txbuf)));
886 tbf = list_first_entry(&sc->tx.txbuf,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700887 struct ath_buf, list);
888 list_del(&tbf->list);
Sujithb77f4832008-12-07 21:44:03 +0530889 spin_unlock_bh(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700890
891 ATH_TXBUF_RESET(tbf);
892
893 /* copy descriptor content */
894 tbf->bf_mpdu = bf_last->bf_mpdu;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700895 tbf->bf_buf_addr = bf_last->bf_buf_addr;
896 *(tbf->bf_desc) = *(bf_last->bf_desc);
897
898 /* link it to the frame */
899 if (bf_lastq) {
900 bf_lastq->bf_desc->ds_link =
901 tbf->bf_daddr;
902 bf->bf_lastfrm = tbf;
903 ath9k_hw_cleartxdesc(sc->sc_ah,
904 bf->bf_lastfrm->bf_desc);
905 } else {
906 tbf->bf_state = bf_last->bf_state;
907 tbf->bf_lastfrm = tbf;
908 ath9k_hw_cleartxdesc(sc->sc_ah,
909 tbf->bf_lastfrm->bf_desc);
910
911 /* copy the DMA context */
Sujithff9b6622008-08-14 13:27:16 +0530912 tbf->bf_dmacontext =
913 bf_last->bf_dmacontext;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700914 }
915 list_add_tail(&tbf->list, &bf_head);
916 } else {
917 /*
918 * Clear descriptor status words for
919 * software retry
920 */
921 ath9k_hw_cleartxdesc(sc->sc_ah,
Sujithff9b6622008-08-14 13:27:16 +0530922 bf->bf_lastfrm->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700923 }
924
925 /*
926 * Put this buffer to the temporary pending
927 * queue to retain ordering
928 */
929 list_splice_tail_init(&bf_head, &bf_pending);
930 }
931
932 bf = bf_next;
933 }
934
Sujitha37c2c72008-10-29 10:15:40 +0530935 if (tid->state & AGGR_CLEANUP) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700936 /* check to see if we're done with cleaning the h/w queue */
937 spin_lock_bh(&txq->axq_lock);
938
939 if (tid->baw_head == tid->baw_tail) {
Sujitha37c2c72008-10-29 10:15:40 +0530940 tid->state &= ~AGGR_ADDBA_COMPLETE;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700941 tid->addba_exchangeattempts = 0;
942 spin_unlock_bh(&txq->axq_lock);
943
Sujitha37c2c72008-10-29 10:15:40 +0530944 tid->state &= ~AGGR_CLEANUP;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700945
946 /* send buffered frames as singles */
947 ath_tx_flush_tid(sc, tid);
948 } else
949 spin_unlock_bh(&txq->axq_lock);
950
951 return;
952 }
953
954 /*
955 * prepend un-acked frames to the beginning of the pending frame queue
956 */
957 if (!list_empty(&bf_pending)) {
958 spin_lock_bh(&txq->axq_lock);
959 /* Note: we _prepend_, we _do_not_ at to
960 * the end of the queue ! */
961 list_splice(&bf_pending, &tid->buf_q);
962 ath_tx_queue_tid(txq, tid);
963 spin_unlock_bh(&txq->axq_lock);
964 }
965
966 if (needreset)
Sujithf45144e2008-08-11 14:02:53 +0530967 ath_reset(sc, false);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700968
969 return;
970}
971
Sujithc4288392008-11-18 09:09:30 +0530972static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
973{
974 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
975 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
976 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
977
Vasanthakumar Thiagarajan7ac47012008-11-20 11:51:18 +0530978 tx_info_priv->update_rc = false;
Sujithc4288392008-11-18 09:09:30 +0530979 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
980 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
981
982 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
983 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
984 if (bf_isdata(bf)) {
985 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
986 sizeof(tx_info_priv->tx));
987 tx_info_priv->n_frames = bf->bf_nframes;
988 tx_info_priv->n_bad_frames = nbad;
Vasanthakumar Thiagarajan7ac47012008-11-20 11:51:18 +0530989 tx_info_priv->update_rc = true;
Sujithc4288392008-11-18 09:09:30 +0530990 }
991 }
992}
993
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700994/* Process completed xmit descriptors from the specified queue */
995
Sujithc4288392008-11-18 09:09:30 +0530996static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700997{
998 struct ath_hal *ah = sc->sc_ah;
999 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1000 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05301001 struct ath_desc *ds;
1002 int txok, nbad = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001003 int status;
1004
Sujith04bd46382008-11-28 22:18:05 +05301005 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001006 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1007 txq->axq_link);
1008
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001009 for (;;) {
1010 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001011 if (list_empty(&txq->axq_q)) {
1012 txq->axq_link = NULL;
1013 txq->axq_linkbuf = NULL;
1014 spin_unlock_bh(&txq->axq_lock);
1015 break;
1016 }
1017 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1018
1019 /*
1020 * There is a race condition that a BH gets scheduled
1021 * after sw writes TxE and before hw re-load the last
1022 * descriptor to get the newly chained one.
1023 * Software must keep the last DONE descriptor as a
1024 * holding descriptor - software does so by marking
1025 * it with the STALE flag.
1026 */
1027 bf_held = NULL;
1028 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1029 bf_held = bf;
1030 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1031 /* FIXME:
1032 * The holding descriptor is the last
1033 * descriptor in queue. It's safe to remove
1034 * the last holding descriptor in BH context.
1035 */
1036 spin_unlock_bh(&txq->axq_lock);
1037 break;
1038 } else {
1039 /* Lets work with the next buffer now */
1040 bf = list_entry(bf_held->list.next,
1041 struct ath_buf, list);
1042 }
1043 }
1044
1045 lastbf = bf->bf_lastbf;
1046 ds = lastbf->bf_desc; /* NB: last decriptor */
1047
1048 status = ath9k_hw_txprocdesc(ah, ds);
1049 if (status == -EINPROGRESS) {
1050 spin_unlock_bh(&txq->axq_lock);
1051 break;
1052 }
1053 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1054 txq->axq_lastdsWithCTS = NULL;
1055 if (ds == txq->axq_gatingds)
1056 txq->axq_gatingds = NULL;
1057
1058 /*
1059 * Remove ath_buf's of the same transmit unit from txq,
1060 * however leave the last descriptor back as the holding
1061 * descriptor for hw.
1062 */
1063 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1064 INIT_LIST_HEAD(&bf_head);
1065
1066 if (!list_is_singular(&lastbf->list))
1067 list_cut_position(&bf_head,
1068 &txq->axq_q, lastbf->list.prev);
1069
1070 txq->axq_depth--;
1071
Sujithcd3d39a2008-08-11 14:03:34 +05301072 if (bf_isaggr(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001073 txq->axq_aggr_depth--;
1074
1075 txok = (ds->ds_txstat.ts_status == 0);
1076
1077 spin_unlock_bh(&txq->axq_lock);
1078
1079 if (bf_held) {
1080 list_del(&bf_held->list);
Sujithb77f4832008-12-07 21:44:03 +05301081 spin_lock_bh(&sc->tx.txbuflock);
1082 list_add_tail(&bf_held->list, &sc->tx.txbuf);
1083 spin_unlock_bh(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001084 }
1085
Sujithcd3d39a2008-08-11 14:03:34 +05301086 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001087 /*
1088 * This frame is sent out as a single frame.
1089 * Use hardware retry status for this frame.
1090 */
1091 bf->bf_retries = ds->ds_txstat.ts_longretry;
1092 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05301093 bf->bf_state.bf_type |= BUF_XRETRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001094 nbad = 0;
1095 } else {
1096 nbad = ath_tx_num_badfrms(sc, bf, txok);
1097 }
Johannes Berge6a98542008-10-21 12:40:02 +02001098
Sujithc4288392008-11-18 09:09:30 +05301099 ath_tx_rc_status(bf, ds, nbad);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001100
1101 /*
1102 * Complete this transmit unit
1103 */
Sujithcd3d39a2008-08-11 14:03:34 +05301104 if (bf_isampdu(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001105 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1106 else
1107 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1108
1109 /* Wake up mac80211 queue */
1110
1111 spin_lock_bh(&txq->axq_lock);
1112 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1113 (ATH_TXBUF - 20)) {
1114 int qnum;
1115 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1116 if (qnum != -1) {
1117 ieee80211_wake_queue(sc->hw, qnum);
1118 txq->stopped = 0;
1119 }
1120
1121 }
1122
1123 /*
1124 * schedule any pending packets if aggregation is enabled
1125 */
Sujith672840a2008-08-11 14:05:08 +05301126 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001127 ath_txq_schedule(sc, txq);
1128 spin_unlock_bh(&txq->axq_lock);
1129 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001130}
1131
1132static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1133{
1134 struct ath_hal *ah = sc->sc_ah;
1135
1136 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
Sujith04bd46382008-11-28 22:18:05 +05301137 DPRINTF(sc, ATH_DBG_XMIT, "tx queue [%u] %x, link %p\n",
1138 txq->axq_qnum, ath9k_hw_gettxbuf(ah, txq->axq_qnum),
1139 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001140}
1141
1142/* Drain only the data queues */
1143
1144static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1145{
1146 struct ath_hal *ah = sc->sc_ah;
Sujith102e0572008-10-29 10:15:16 +05301147 int i, status, npend = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001148
Sujith672840a2008-08-11 14:05:08 +05301149 if (!(sc->sc_flags & SC_OP_INVALID)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001150 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1151 if (ATH_TXQ_SETUP(sc, i)) {
Sujithb77f4832008-12-07 21:44:03 +05301152 ath_tx_stopdma(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001153 /* The TxDMA may not really be stopped.
1154 * Double check the hal tx pending count */
1155 npend += ath9k_hw_numtxpending(ah,
Sujithb77f4832008-12-07 21:44:03 +05301156 sc->tx.txq[i].axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001157 }
1158 }
1159 }
1160
1161 if (npend) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001162 /* TxDMA not stopped, reset the hal */
Sujith04bd46382008-11-28 22:18:05 +05301163 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001164
1165 spin_lock_bh(&sc->sc_resetlock);
Sujithb4696c8b2008-08-11 14:04:52 +05301166 if (!ath9k_hw_reset(ah,
Sujith927e70e2008-08-14 13:26:34 +05301167 sc->sc_ah->ah_curchan,
Sujith99405f92008-11-24 12:08:35 +05301168 sc->tx_chan_width,
Sujith927e70e2008-08-14 13:26:34 +05301169 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1170 sc->sc_ht_extprotspacing, true, &status)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001171
1172 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301173 "Unable to reset hardware; hal status %u\n",
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001174 status);
1175 }
1176 spin_unlock_bh(&sc->sc_resetlock);
1177 }
1178
1179 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1180 if (ATH_TXQ_SETUP(sc, i))
Sujithb77f4832008-12-07 21:44:03 +05301181 ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001182 }
1183}
1184
1185/* Add a sub-frame to block ack window */
1186
1187static void ath_tx_addto_baw(struct ath_softc *sc,
1188 struct ath_atx_tid *tid,
1189 struct ath_buf *bf)
1190{
1191 int index, cindex;
1192
Sujithcd3d39a2008-08-11 14:03:34 +05301193 if (bf_isretried(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001194 return;
1195
1196 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1197 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1198
1199 ASSERT(tid->tx_buf[cindex] == NULL);
1200 tid->tx_buf[cindex] = bf;
1201
1202 if (index >= ((tid->baw_tail - tid->baw_head) &
1203 (ATH_TID_MAX_BUFS - 1))) {
1204 tid->baw_tail = cindex;
1205 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1206 }
1207}
1208
1209/*
1210 * Function to send an A-MPDU
1211 * NB: must be called with txq lock held
1212 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001213static int ath_tx_send_ampdu(struct ath_softc *sc,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001214 struct ath_atx_tid *tid,
1215 struct list_head *bf_head,
1216 struct ath_tx_control *txctl)
1217{
1218 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001219
1220 BUG_ON(list_empty(bf_head));
1221
1222 bf = list_first_entry(bf_head, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +05301223 bf->bf_state.bf_type |= BUF_AMPDU;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001224
1225 /*
1226 * Do not queue to h/w when any of the following conditions is true:
1227 * - there are pending frames in software queue
1228 * - the TID is currently paused for ADDBA/BAR request
1229 * - seqno is not within block-ack window
1230 * - h/w queue depth exceeds low water mark
1231 */
1232 if (!list_empty(&tid->buf_q) || tid->paused ||
1233 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
Sujith528f0c62008-10-29 10:14:26 +05301234 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001235 /*
1236 * Add this frame to software queue for scheduling later
1237 * for aggregation.
1238 */
1239 list_splice_tail_init(bf_head, &tid->buf_q);
Sujith528f0c62008-10-29 10:14:26 +05301240 ath_tx_queue_tid(txctl->txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001241 return 0;
1242 }
1243
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001244 /* Add sub-frame to BAW */
1245 ath_tx_addto_baw(sc, tid, bf);
1246
1247 /* Queue to h/w without aggregation */
1248 bf->bf_nframes = 1;
1249 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1250 ath_buf_set_rate(sc, bf);
Sujith528f0c62008-10-29 10:14:26 +05301251 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujith102e0572008-10-29 10:15:16 +05301252
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001253 return 0;
1254}
1255
1256/*
1257 * looks up the rate
1258 * returns aggr limit based on lowest of the rates
1259 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001260static u32 ath_lookup_rate(struct ath_softc *sc,
Johannes Bergae5eb022008-10-14 16:58:37 +02001261 struct ath_buf *bf,
1262 struct ath_atx_tid *tid)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001263{
Sujith3706de62008-12-07 21:42:10 +05301264 struct ath_rate_table *rate_table = sc->cur_rate_table;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001265 struct sk_buff *skb;
1266 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301267 struct ieee80211_tx_rate *rates;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001268 struct ath_tx_info_priv *tx_info_priv;
1269 u32 max_4ms_framelen, frame_length;
1270 u16 aggr_limit, legacy = 0, maxampdu;
1271 int i;
1272
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001273 skb = (struct sk_buff *)bf->bf_mpdu;
1274 tx_info = IEEE80211_SKB_CB(skb);
Sujitha8efee42008-11-18 09:07:30 +05301275 rates = tx_info->control.rates;
1276 tx_info_priv =
1277 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001278
1279 /*
1280 * Find the lowest frame length among the rate series that will have a
1281 * 4ms transmit duration.
1282 * TODO - TXOP limit needs to be considered.
1283 */
1284 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1285
1286 for (i = 0; i < 4; i++) {
Sujitha8efee42008-11-18 09:07:30 +05301287 if (rates[i].count) {
Sujithe63835b2008-11-18 09:07:53 +05301288 if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001289 legacy = 1;
1290 break;
1291 }
1292
Sujitha8efee42008-11-18 09:07:30 +05301293 frame_length =
1294 rate_table->info[rates[i].idx].max_4ms_framelen;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001295 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1296 }
1297 }
1298
1299 /*
1300 * limit aggregate size by the minimum rate if rate selected is
1301 * not a probe rate, if rate selected is a probe rate then
1302 * avoid aggregation of this packet.
1303 */
1304 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1305 return 0;
1306
1307 aggr_limit = min(max_4ms_framelen,
1308 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1309
1310 /*
1311 * h/w can accept aggregates upto 16 bit lengths (65535).
1312 * The IE, however can hold upto 65536, which shows up here
1313 * as zero. Ignore 65536 since we are constrained by hw.
1314 */
Johannes Bergae5eb022008-10-14 16:58:37 +02001315 maxampdu = tid->an->maxampdu;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001316 if (maxampdu)
1317 aggr_limit = min(aggr_limit, maxampdu);
1318
1319 return aggr_limit;
1320}
1321
1322/*
1323 * returns the number of delimiters to be added to
1324 * meet the minimum required mpdudensity.
1325 * caller should make sure that the rate is HT rate .
1326 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001327static int ath_compute_num_delims(struct ath_softc *sc,
Johannes Bergae5eb022008-10-14 16:58:37 +02001328 struct ath_atx_tid *tid,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001329 struct ath_buf *bf,
1330 u16 frmlen)
1331{
Sujith3706de62008-12-07 21:42:10 +05301332 struct ath_rate_table *rt = sc->cur_rate_table;
Sujitha8efee42008-11-18 09:07:30 +05301333 struct sk_buff *skb = bf->bf_mpdu;
1334 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001335 u32 nsymbits, nsymbols, mpdudensity;
1336 u16 minlen;
1337 u8 rc, flags, rix;
1338 int width, half_gi, ndelim, mindelim;
1339
1340 /* Select standard number of delimiters based on frame length alone */
1341 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1342
1343 /*
1344 * If encryption enabled, hardware requires some more padding between
1345 * subframes.
1346 * TODO - this could be improved to be dependent on the rate.
1347 * The hardware can keep up at lower rates, but not higher rates
1348 */
1349 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1350 ndelim += ATH_AGGR_ENCRYPTDELIM;
1351
1352 /*
1353 * Convert desired mpdu density from microeconds to bytes based
1354 * on highest rate in rate series (i.e. first rate) to determine
1355 * required minimum length for subframe. Take into account
1356 * whether high rate is 20 or 40Mhz and half or full GI.
1357 */
Johannes Bergae5eb022008-10-14 16:58:37 +02001358 mpdudensity = tid->an->mpdudensity;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001359
1360 /*
1361 * If there is no mpdu density restriction, no further calculation
1362 * is needed.
1363 */
1364 if (mpdudensity == 0)
1365 return ndelim;
1366
Sujitha8efee42008-11-18 09:07:30 +05301367 rix = tx_info->control.rates[0].idx;
1368 flags = tx_info->control.rates[0].flags;
Sujithe63835b2008-11-18 09:07:53 +05301369 rc = rt->info[rix].ratecode;
Sujitha8efee42008-11-18 09:07:30 +05301370 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
1371 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001372
1373 if (half_gi)
1374 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1375 else
1376 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1377
1378 if (nsymbols == 0)
1379 nsymbols = 1;
1380
1381 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1382 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1383
1384 /* Is frame shorter than required minimum length? */
1385 if (frmlen < minlen) {
1386 /* Get the minimum number of delimiters required. */
1387 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1388 ndelim = max(mindelim, ndelim);
1389 }
1390
1391 return ndelim;
1392}
1393
1394/*
1395 * For aggregation from software buffer queue.
1396 * NB: must be called with txq lock held
1397 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001398static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1399 struct ath_atx_tid *tid,
1400 struct list_head *bf_q,
1401 struct ath_buf **bf_last,
1402 struct aggr_rifs_param *param,
1403 int *prev_frames)
1404{
1405#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1406 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1407 struct list_head bf_head;
1408 int rl = 0, nframes = 0, ndelim;
1409 u16 aggr_limit = 0, al = 0, bpad = 0,
1410 al_delta, h_baw = tid->baw_size / 2;
1411 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujitha8efee42008-11-18 09:07:30 +05301412 int prev_al = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001413 INIT_LIST_HEAD(&bf_head);
1414
1415 BUG_ON(list_empty(&tid->buf_q));
1416
1417 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1418
1419 do {
1420 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1421
1422 /*
1423 * do not step over block-ack window
1424 */
1425 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1426 status = ATH_AGGR_BAW_CLOSED;
1427 break;
1428 }
1429
1430 if (!rl) {
Johannes Bergae5eb022008-10-14 16:58:37 +02001431 aggr_limit = ath_lookup_rate(sc, bf, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001432 rl = 1;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001433 }
1434
1435 /*
1436 * do not exceed aggregation limit
1437 */
1438 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1439
1440 if (nframes && (aggr_limit <
1441 (al + bpad + al_delta + prev_al))) {
1442 status = ATH_AGGR_LIMITED;
1443 break;
1444 }
1445
1446 /*
1447 * do not exceed subframe limit
1448 */
1449 if ((nframes + *prev_frames) >=
1450 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1451 status = ATH_AGGR_LIMITED;
1452 break;
1453 }
1454
1455 /*
1456 * add padding for previous frame to aggregation length
1457 */
1458 al += bpad + al_delta;
1459
1460 /*
1461 * Get the delimiters needed to meet the MPDU
1462 * density for this node.
1463 */
Johannes Bergae5eb022008-10-14 16:58:37 +02001464 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001465
1466 bpad = PADBYTES(al_delta) + (ndelim << 2);
1467
1468 bf->bf_next = NULL;
1469 bf->bf_lastfrm->bf_desc->ds_link = 0;
1470
1471 /*
1472 * this packet is part of an aggregate
1473 * - remove all descriptors belonging to this frame from
1474 * software queue
1475 * - add it to block ack window
1476 * - set up descriptors for aggregation
1477 */
1478 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1479 ath_tx_addto_baw(sc, tid, bf);
1480
1481 list_for_each_entry(tbf, &bf_head, list) {
1482 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1483 tbf->bf_desc, ndelim);
1484 }
1485
1486 /*
1487 * link buffers of this frame to the aggregate
1488 */
1489 list_splice_tail_init(&bf_head, bf_q);
1490 nframes++;
1491
1492 if (bf_prev) {
1493 bf_prev->bf_next = bf;
1494 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1495 }
1496 bf_prev = bf;
1497
1498#ifdef AGGR_NOSHORT
1499 /*
1500 * terminate aggregation on a small packet boundary
1501 */
1502 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1503 status = ATH_AGGR_SHORTPKT;
1504 break;
1505 }
1506#endif
1507 } while (!list_empty(&tid->buf_q));
1508
1509 bf_first->bf_al = al;
1510 bf_first->bf_nframes = nframes;
1511 *bf_last = bf_prev;
1512 return status;
1513#undef PADBYTES
1514}
1515
1516/*
1517 * process pending frames possibly doing a-mpdu aggregation
1518 * NB: must be called with txq lock held
1519 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001520static void ath_tx_sched_aggr(struct ath_softc *sc,
1521 struct ath_txq *txq, struct ath_atx_tid *tid)
1522{
1523 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1524 enum ATH_AGGR_STATUS status;
1525 struct list_head bf_q;
1526 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1527 int prev_frames = 0;
1528
1529 do {
1530 if (list_empty(&tid->buf_q))
1531 return;
1532
1533 INIT_LIST_HEAD(&bf_q);
1534
1535 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1536 &prev_frames);
1537
1538 /*
1539 * no frames picked up to be aggregated; block-ack
1540 * window is not open
1541 */
1542 if (list_empty(&bf_q))
1543 break;
1544
1545 bf = list_first_entry(&bf_q, struct ath_buf, list);
1546 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1547 bf->bf_lastbf = bf_last;
1548
1549 /*
1550 * if only one frame, send as non-aggregate
1551 */
1552 if (bf->bf_nframes == 1) {
1553 ASSERT(bf->bf_lastfrm == bf_last);
1554
Sujithcd3d39a2008-08-11 14:03:34 +05301555 bf->bf_state.bf_type &= ~BUF_AGGR;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001556 /*
1557 * clear aggr bits for every descriptor
1558 * XXX TODO: is there a way to optimize it?
1559 */
1560 list_for_each_entry(tbf, &bf_q, list) {
1561 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1562 }
1563
1564 ath_buf_set_rate(sc, bf);
1565 ath_tx_txqaddbuf(sc, txq, &bf_q);
1566 continue;
1567 }
1568
1569 /*
1570 * setup first desc with rate and aggr info
1571 */
Sujithcd3d39a2008-08-11 14:03:34 +05301572 bf->bf_state.bf_type |= BUF_AGGR;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001573 ath_buf_set_rate(sc, bf);
1574 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1575
1576 /*
1577 * anchor last frame of aggregate correctly
1578 */
1579 ASSERT(bf_lastaggr);
1580 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1581 tbf = bf_lastaggr;
1582 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1583
1584 /* XXX: We don't enter into this loop, consider removing this */
1585 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1586 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1587 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1588 }
1589
1590 txq->axq_aggr_depth++;
1591
1592 /*
1593 * Normal aggregate, queue to hardware
1594 */
1595 ath_tx_txqaddbuf(sc, txq, &bf_q);
1596
1597 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1598 status != ATH_AGGR_BAW_CLOSED);
1599}
1600
1601/* Called with txq lock held */
1602
1603static void ath_tid_drain(struct ath_softc *sc,
1604 struct ath_txq *txq,
Sujithb5aa9bf2008-10-29 10:13:31 +05301605 struct ath_atx_tid *tid)
1606
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607{
1608 struct ath_buf *bf;
1609 struct list_head bf_head;
1610 INIT_LIST_HEAD(&bf_head);
1611
1612 for (;;) {
1613 if (list_empty(&tid->buf_q))
1614 break;
1615 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1616
1617 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1618
1619 /* update baw for software retried frame */
Sujithcd3d39a2008-08-11 14:03:34 +05301620 if (bf_isretried(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001621 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1622
1623 /*
1624 * do not indicate packets while holding txq spinlock.
1625 * unlock is intentional here
1626 */
Sujithb5aa9bf2008-10-29 10:13:31 +05301627 spin_unlock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001628
1629 /* complete this sub-frame */
1630 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1631
Sujithb5aa9bf2008-10-29 10:13:31 +05301632 spin_lock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001633 }
1634
1635 /*
1636 * TODO: For frame(s) that are in the retry state, we will reuse the
1637 * sequence number(s) without setting the retry bit. The
1638 * alternative is to give up on these and BAR the receiver's window
1639 * forward.
1640 */
1641 tid->seq_next = tid->seq_start;
1642 tid->baw_tail = tid->baw_head;
1643}
1644
1645/*
1646 * Drain all pending buffers
1647 * NB: must be called with txq lock held
1648 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001649static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
Sujithb5aa9bf2008-10-29 10:13:31 +05301650 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001651{
1652 struct ath_atx_ac *ac, *ac_tmp;
1653 struct ath_atx_tid *tid, *tid_tmp;
1654
1655 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1656 list_del(&ac->list);
1657 ac->sched = false;
1658 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1659 list_del(&tid->list);
1660 tid->sched = false;
Sujithb5aa9bf2008-10-29 10:13:31 +05301661 ath_tid_drain(sc, txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001662 }
1663 }
1664}
1665
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -08001666static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
Sujith8f93b8b2008-11-18 09:10:42 +05301667 struct sk_buff *skb,
Sujith528f0c62008-10-29 10:14:26 +05301668 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001669{
Sujith528f0c62008-10-29 10:14:26 +05301670 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1671 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001672 struct ath_tx_info_priv *tx_info_priv;
Sujith528f0c62008-10-29 10:14:26 +05301673 int hdrlen;
1674 __le16 fc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001675
Luis R. Rodriguezc112d0c2008-12-03 03:35:30 -08001676 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1677 if (unlikely(!tx_info_priv))
1678 return -ENOMEM;
Sujitha8efee42008-11-18 09:07:30 +05301679 tx_info->rate_driver_data[0] = tx_info_priv;
Sujith528f0c62008-10-29 10:14:26 +05301680 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1681 fc = hdr->frame_control;
Jouni Malinene022edb2008-08-22 17:31:33 +03001682
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001683 ATH_TXBUF_RESET(bf);
Sujith528f0c62008-10-29 10:14:26 +05301684
1685 /* Frame type */
1686
1687 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
Sujithcd3d39a2008-08-11 14:03:34 +05301688
1689 ieee80211_is_data(fc) ?
1690 (bf->bf_state.bf_type |= BUF_DATA) :
1691 (bf->bf_state.bf_type &= ~BUF_DATA);
1692 ieee80211_is_back_req(fc) ?
1693 (bf->bf_state.bf_type |= BUF_BAR) :
1694 (bf->bf_state.bf_type &= ~BUF_BAR);
1695 ieee80211_is_pspoll(fc) ?
1696 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1697 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
Sujith672840a2008-08-11 14:05:08 +05301698 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
Sujithcd3d39a2008-08-11 14:03:34 +05301699 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1700 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
Sujitha8efee42008-11-18 09:07:30 +05301701 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
Sujith528f0c62008-10-29 10:14:26 +05301702 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1703 (bf->bf_state.bf_type |= BUF_HT) :
1704 (bf->bf_state.bf_type &= ~BUF_HT);
Sujithcd3d39a2008-08-11 14:03:34 +05301705
Sujith528f0c62008-10-29 10:14:26 +05301706 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1707
1708 /* Crypto */
1709
1710 bf->bf_keytype = get_hw_crypto_keytype(skb);
1711
1712 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1713 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1714 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1715 } else {
1716 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1717 }
1718
Sujith528f0c62008-10-29 10:14:26 +05301719 /* Assign seqno, tidno */
1720
1721 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1722 assign_aggr_tid_seqno(skb, bf);
1723
1724 /* DMA setup */
1725
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001726 bf->bf_mpdu = skb;
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -08001727
Sujith528f0c62008-10-29 10:14:26 +05301728 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1729 skb->len, PCI_DMA_TODEVICE);
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -08001730 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_dmacontext))) {
1731 bf->bf_mpdu = NULL;
1732 DPRINTF(sc, ATH_DBG_CONFIG,
1733 "pci_dma_mapping_error() on TX\n");
1734 return -ENOMEM;
1735 }
1736
Sujith528f0c62008-10-29 10:14:26 +05301737 bf->bf_buf_addr = bf->bf_dmacontext;
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -08001738 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301739}
1740
1741/* FIXME: tx power */
1742static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
Sujith528f0c62008-10-29 10:14:26 +05301743 struct ath_tx_control *txctl)
1744{
1745 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1746 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1747 struct ath_node *an = NULL;
1748 struct list_head bf_head;
1749 struct ath_desc *ds;
1750 struct ath_atx_tid *tid;
1751 struct ath_hal *ah = sc->sc_ah;
1752 int frm_type;
1753
Sujith528f0c62008-10-29 10:14:26 +05301754 frm_type = get_hw_packet_type(skb);
1755
1756 INIT_LIST_HEAD(&bf_head);
1757 list_add_tail(&bf->list, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001758
1759 /* setup descriptor */
Sujith528f0c62008-10-29 10:14:26 +05301760
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001761 ds = bf->bf_desc;
1762 ds->ds_link = 0;
1763 ds->ds_data = bf->bf_buf_addr;
1764
Sujith528f0c62008-10-29 10:14:26 +05301765 /* Formulate first tx descriptor with tx controls */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001766
Sujith528f0c62008-10-29 10:14:26 +05301767 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1768 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1769
1770 ath9k_hw_filltxdesc(ah, ds,
Sujith8f93b8b2008-11-18 09:10:42 +05301771 skb->len, /* segment length */
1772 true, /* first segment */
1773 true, /* last segment */
1774 ds); /* first descriptor */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001775
1776 bf->bf_lastfrm = bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001777
Sujith528f0c62008-10-29 10:14:26 +05301778 spin_lock_bh(&txctl->txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001779
John W. Linvillef1617962008-10-31 16:45:15 -04001780 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1781 tx_info->control.sta) {
1782 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1783 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1784
Sujith528f0c62008-10-29 10:14:26 +05301785 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001786 /*
1787 * Try aggregation if it's a unicast data frame
1788 * and the destination is HT capable.
1789 */
Sujith528f0c62008-10-29 10:14:26 +05301790 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001791 } else {
1792 /*
Sujith528f0c62008-10-29 10:14:26 +05301793 * Send this frame as regular when ADDBA
1794 * exchange is neither complete nor pending.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001795 */
Sujith528f0c62008-10-29 10:14:26 +05301796 ath_tx_send_normal(sc, txctl->txq,
1797 tid, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001798 }
1799 } else {
1800 bf->bf_lastbf = bf;
1801 bf->bf_nframes = 1;
Sujith528f0c62008-10-29 10:14:26 +05301802
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001803 ath_buf_set_rate(sc, bf);
Sujith528f0c62008-10-29 10:14:26 +05301804 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001805 }
Sujith528f0c62008-10-29 10:14:26 +05301806
1807 spin_unlock_bh(&txctl->txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001808}
1809
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -08001810/* Upon failure caller should free skb */
Sujith528f0c62008-10-29 10:14:26 +05301811int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1812 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001813{
Sujith528f0c62008-10-29 10:14:26 +05301814 struct ath_buf *bf;
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -08001815 int r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001816
Sujith528f0c62008-10-29 10:14:26 +05301817 /* Check if a tx buffer is available */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001818
Sujith528f0c62008-10-29 10:14:26 +05301819 bf = ath_tx_get_buffer(sc);
1820 if (!bf) {
Sujith04bd46382008-11-28 22:18:05 +05301821 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
Sujith528f0c62008-10-29 10:14:26 +05301822 return -1;
1823 }
1824
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -08001825 r = ath_tx_setup_buffer(sc, bf, skb, txctl);
1826 if (unlikely(r)) {
Luis R. Rodriguezc112d0c2008-12-03 03:35:30 -08001827 struct ath_txq *txq = txctl->txq;
1828
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -08001829 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezc112d0c2008-12-03 03:35:30 -08001830
1831 /* upon ath_tx_processq() this TX queue will be resumed, we
1832 * guarantee this will happen by knowing beforehand that
1833 * we will at least have to run TX completionon one buffer
1834 * on the queue */
1835 spin_lock_bh(&txq->axq_lock);
1836 if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
1837 ieee80211_stop_queue(sc->hw,
1838 skb_get_queue_mapping(skb));
1839 txq->stopped = 1;
1840 }
1841 spin_unlock_bh(&txq->axq_lock);
1842
Sujithb77f4832008-12-07 21:44:03 +05301843 spin_lock_bh(&sc->tx.txbuflock);
1844 list_add_tail(&bf->list, &sc->tx.txbuf);
1845 spin_unlock_bh(&sc->tx.txbuflock);
Luis R. Rodriguezc112d0c2008-12-03 03:35:30 -08001846
Luis R. Rodriguezf8316df2008-12-03 03:35:29 -08001847 return r;
1848 }
1849
Sujith8f93b8b2008-11-18 09:10:42 +05301850 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001851
Sujith528f0c62008-10-29 10:14:26 +05301852 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001853}
1854
1855/* Initialize TX queue and h/w */
1856
1857int ath_tx_init(struct ath_softc *sc, int nbufs)
1858{
1859 int error = 0;
1860
1861 do {
Sujithb77f4832008-12-07 21:44:03 +05301862 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001863
1864 /* Setup tx descriptors */
Sujithb77f4832008-12-07 21:44:03 +05301865 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Sujith556bb8f2008-08-11 14:03:53 +05301866 "tx", nbufs, 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001867 if (error != 0) {
1868 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301869 "Failed to allocate tx descriptors: %d\n",
1870 error);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001871 break;
1872 }
1873
1874 /* XXX allocate beacon state together with vap */
Sujithb77f4832008-12-07 21:44:03 +05301875 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001876 "beacon", ATH_BCBUF, 1);
1877 if (error != 0) {
1878 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301879 "Failed to allocate beacon descriptors: %d\n",
1880 error);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001881 break;
1882 }
1883
1884 } while (0);
1885
1886 if (error != 0)
1887 ath_tx_cleanup(sc);
1888
1889 return error;
1890}
1891
1892/* Reclaim all tx queue resources */
1893
1894int ath_tx_cleanup(struct ath_softc *sc)
1895{
1896 /* cleanup beacon descriptors */
Sujithb77f4832008-12-07 21:44:03 +05301897 if (sc->beacon.bdma.dd_desc_len != 0)
1898 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001899
1900 /* cleanup tx descriptors */
Sujithb77f4832008-12-07 21:44:03 +05301901 if (sc->tx.txdma.dd_desc_len != 0)
1902 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001903
1904 return 0;
1905}
1906
1907/* Setup a h/w transmit queue */
1908
1909struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1910{
1911 struct ath_hal *ah = sc->sc_ah;
Sujithea9880f2008-08-07 10:53:10 +05301912 struct ath9k_tx_queue_info qi;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001913 int qnum;
1914
Luis R. Rodriguez0345f372008-10-03 15:45:25 -07001915 memset(&qi, 0, sizeof(qi));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001916 qi.tqi_subtype = subtype;
1917 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1918 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1919 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
Sujithea9880f2008-08-07 10:53:10 +05301920 qi.tqi_physCompBuf = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001921
1922 /*
1923 * Enable interrupts only for EOL and DESC conditions.
1924 * We mark tx descriptors to receive a DESC interrupt
1925 * when a tx queue gets deep; otherwise waiting for the
1926 * EOL to reap descriptors. Note that this is done to
1927 * reduce interrupt load and this only defers reaping
1928 * descriptors, never transmitting frames. Aside from
1929 * reducing interrupts this also permits more concurrency.
1930 * The only potential downside is if the tx queue backs
1931 * up in which case the top half of the kernel may backup
1932 * due to a lack of tx descriptors.
1933 *
1934 * The UAPSD queue is an exception, since we take a desc-
1935 * based intr on the EOSP frames.
1936 */
1937 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1938 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1939 else
1940 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1941 TXQ_FLAG_TXDESCINT_ENABLE;
1942 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1943 if (qnum == -1) {
1944 /*
1945 * NB: don't print a message, this happens
1946 * normally on parts with too few tx queues
1947 */
1948 return NULL;
1949 }
Sujithb77f4832008-12-07 21:44:03 +05301950 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001951 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301952 "qnum %u out of range, max %u!\n",
Sujithb77f4832008-12-07 21:44:03 +05301953 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001954 ath9k_hw_releasetxqueue(ah, qnum);
1955 return NULL;
1956 }
1957 if (!ATH_TXQ_SETUP(sc, qnum)) {
Sujithb77f4832008-12-07 21:44:03 +05301958 struct ath_txq *txq = &sc->tx.txq[qnum];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001959
1960 txq->axq_qnum = qnum;
1961 txq->axq_link = NULL;
1962 INIT_LIST_HEAD(&txq->axq_q);
1963 INIT_LIST_HEAD(&txq->axq_acq);
1964 spin_lock_init(&txq->axq_lock);
1965 txq->axq_depth = 0;
1966 txq->axq_aggr_depth = 0;
1967 txq->axq_totalqueued = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001968 txq->axq_linkbuf = NULL;
Sujithb77f4832008-12-07 21:44:03 +05301969 sc->tx.txqsetup |= 1<<qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001970 }
Sujithb77f4832008-12-07 21:44:03 +05301971 return &sc->tx.txq[qnum];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001972}
1973
1974/* Reclaim resources for a setup queue */
1975
1976void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1977{
1978 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
Sujithb77f4832008-12-07 21:44:03 +05301979 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001980}
1981
1982/*
1983 * Setup a hardware data transmit queue for the specified
1984 * access control. The hal may not support all requested
1985 * queues in which case it will return a reference to a
1986 * previously setup queue. We record the mapping from ac's
1987 * to h/w queues for use by ath_tx_start and also track
1988 * the set of h/w queues being used to optimize work in the
1989 * transmit interrupt handler and related routines.
1990 */
1991
1992int ath_tx_setup(struct ath_softc *sc, int haltype)
1993{
1994 struct ath_txq *txq;
1995
Sujithb77f4832008-12-07 21:44:03 +05301996 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001997 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301998 "HAL AC %u out of range, max %zu!\n",
Sujithb77f4832008-12-07 21:44:03 +05301999 haltype, ARRAY_SIZE(sc->tx.hwq_map));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002000 return 0;
2001 }
2002 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2003 if (txq != NULL) {
Sujithb77f4832008-12-07 21:44:03 +05302004 sc->tx.hwq_map[haltype] = txq->axq_qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002005 return 1;
2006 } else
2007 return 0;
2008}
2009
2010int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2011{
2012 int qnum;
2013
2014 switch (qtype) {
2015 case ATH9K_TX_QUEUE_DATA:
Sujithb77f4832008-12-07 21:44:03 +05302016 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002017 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05302018 "HAL AC %u out of range, max %zu!\n",
Sujithb77f4832008-12-07 21:44:03 +05302019 haltype, ARRAY_SIZE(sc->tx.hwq_map));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002020 return -1;
2021 }
Sujithb77f4832008-12-07 21:44:03 +05302022 qnum = sc->tx.hwq_map[haltype];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002023 break;
2024 case ATH9K_TX_QUEUE_BEACON:
Sujithb77f4832008-12-07 21:44:03 +05302025 qnum = sc->beacon.beaconq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002026 break;
2027 case ATH9K_TX_QUEUE_CAB:
Sujithb77f4832008-12-07 21:44:03 +05302028 qnum = sc->beacon.cabq->axq_qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002029 break;
2030 default:
2031 qnum = -1;
2032 }
2033 return qnum;
2034}
2035
Sujith528f0c62008-10-29 10:14:26 +05302036/* Get a transmit queue, if available */
2037
2038struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2039{
2040 struct ath_txq *txq = NULL;
2041 int qnum;
2042
2043 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
Sujithb77f4832008-12-07 21:44:03 +05302044 txq = &sc->tx.txq[qnum];
Sujith528f0c62008-10-29 10:14:26 +05302045
2046 spin_lock_bh(&txq->axq_lock);
2047
2048 /* Try to avoid running out of descriptors */
2049 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2050 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05302051 "TX queue: %d is full, depth: %d\n",
2052 qnum, txq->axq_depth);
Sujith528f0c62008-10-29 10:14:26 +05302053 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2054 txq->stopped = 1;
2055 spin_unlock_bh(&txq->axq_lock);
2056 return NULL;
2057 }
2058
2059 spin_unlock_bh(&txq->axq_lock);
2060
2061 return txq;
2062}
2063
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002064/* Update parameters for a transmit queue */
2065
Sujithea9880f2008-08-07 10:53:10 +05302066int ath_txq_update(struct ath_softc *sc, int qnum,
2067 struct ath9k_tx_queue_info *qinfo)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002068{
2069 struct ath_hal *ah = sc->sc_ah;
2070 int error = 0;
Sujithea9880f2008-08-07 10:53:10 +05302071 struct ath9k_tx_queue_info qi;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002072
Sujithb77f4832008-12-07 21:44:03 +05302073 if (qnum == sc->beacon.beaconq) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002074 /*
2075 * XXX: for beacon queue, we just save the parameter.
2076 * It will be picked up by ath_beaconq_config when
2077 * it's necessary.
2078 */
Sujithb77f4832008-12-07 21:44:03 +05302079 sc->beacon.beacon_qi = *qinfo;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002080 return 0;
2081 }
2082
Sujithb77f4832008-12-07 21:44:03 +05302083 ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002084
Sujithea9880f2008-08-07 10:53:10 +05302085 ath9k_hw_get_txq_props(ah, qnum, &qi);
2086 qi.tqi_aifs = qinfo->tqi_aifs;
2087 qi.tqi_cwmin = qinfo->tqi_cwmin;
2088 qi.tqi_cwmax = qinfo->tqi_cwmax;
2089 qi.tqi_burstTime = qinfo->tqi_burstTime;
2090 qi.tqi_readyTime = qinfo->tqi_readyTime;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002091
Sujithea9880f2008-08-07 10:53:10 +05302092 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002093 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05302094 "Unable to update hardware queue %u!\n", qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002095 error = -EIO;
2096 } else {
2097 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2098 }
2099
2100 return error;
2101}
2102
2103int ath_cabq_update(struct ath_softc *sc)
2104{
Sujithea9880f2008-08-07 10:53:10 +05302105 struct ath9k_tx_queue_info qi;
Sujithb77f4832008-12-07 21:44:03 +05302106 int qnum = sc->beacon.cabq->axq_qnum;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002107 struct ath_beacon_config conf;
2108
Sujithea9880f2008-08-07 10:53:10 +05302109 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002110 /*
2111 * Ensure the readytime % is within the bounds.
2112 */
2113 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2114 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2115 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2116 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2117
2118 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2119 qi.tqi_readyTime =
2120 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2121 ath_txq_update(sc, qnum, &qi);
2122
2123 return 0;
2124}
2125
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002126/* Deferred processing of transmit interrupt */
2127
2128void ath_tx_tasklet(struct ath_softc *sc)
2129{
Sujith1fe11322008-08-26 08:11:06 +05302130 int i;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2132
2133 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2134
2135 /*
2136 * Process each active queue.
2137 */
2138 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2139 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
Sujithb77f4832008-12-07 21:44:03 +05302140 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002142}
2143
2144void ath_tx_draintxq(struct ath_softc *sc,
2145 struct ath_txq *txq, bool retry_tx)
2146{
2147 struct ath_buf *bf, *lastbf;
2148 struct list_head bf_head;
2149
2150 INIT_LIST_HEAD(&bf_head);
2151
2152 /*
2153 * NB: this assumes output has been stopped and
2154 * we do not need to block ath_tx_tasklet
2155 */
2156 for (;;) {
2157 spin_lock_bh(&txq->axq_lock);
2158
2159 if (list_empty(&txq->axq_q)) {
2160 txq->axq_link = NULL;
2161 txq->axq_linkbuf = NULL;
2162 spin_unlock_bh(&txq->axq_lock);
2163 break;
2164 }
2165
2166 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2167
2168 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2169 list_del(&bf->list);
2170 spin_unlock_bh(&txq->axq_lock);
2171
Sujithb77f4832008-12-07 21:44:03 +05302172 spin_lock_bh(&sc->tx.txbuflock);
2173 list_add_tail(&bf->list, &sc->tx.txbuf);
2174 spin_unlock_bh(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002175 continue;
2176 }
2177
2178 lastbf = bf->bf_lastbf;
2179 if (!retry_tx)
2180 lastbf->bf_desc->ds_txstat.ts_flags =
2181 ATH9K_TX_SW_ABORTED;
2182
2183 /* remove ath_buf's of the same mpdu from txq */
2184 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2185 txq->axq_depth--;
2186
2187 spin_unlock_bh(&txq->axq_lock);
2188
Sujithcd3d39a2008-08-11 14:03:34 +05302189 if (bf_isampdu(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002190 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2191 else
2192 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2193 }
2194
2195 /* flush any pending frames if aggregation is enabled */
Sujith672840a2008-08-11 14:05:08 +05302196 if (sc->sc_flags & SC_OP_TXAGGR) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002197 if (!retry_tx) {
2198 spin_lock_bh(&txq->axq_lock);
Sujithb5aa9bf2008-10-29 10:13:31 +05302199 ath_txq_drain_pending_buffers(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002200 spin_unlock_bh(&txq->axq_lock);
2201 }
2202 }
2203}
2204
2205/* Drain the transmit queues and reclaim resources */
2206
2207void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2208{
2209 /* stop beacon queue. The beacon will be freed when
2210 * we go to INIT state */
Sujith672840a2008-08-11 14:05:08 +05302211 if (!(sc->sc_flags & SC_OP_INVALID)) {
Sujithb77f4832008-12-07 21:44:03 +05302212 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
Sujith04bd46382008-11-28 22:18:05 +05302213 DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
Sujithb77f4832008-12-07 21:44:03 +05302214 ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002215 }
2216
2217 ath_drain_txdataq(sc, retry_tx);
2218}
2219
2220u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2221{
Sujithb77f4832008-12-07 21:44:03 +05302222 return sc->tx.txq[qnum].axq_depth;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002223}
2224
2225u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2226{
Sujithb77f4832008-12-07 21:44:03 +05302227 return sc->tx.txq[qnum].axq_aggr_depth;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002228}
2229
Sujithccc75c52008-10-29 10:18:14 +05302230bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002231{
2232 struct ath_atx_tid *txtid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002233
Sujith672840a2008-08-11 14:05:08 +05302234 if (!(sc->sc_flags & SC_OP_TXAGGR))
Sujithccc75c52008-10-29 10:18:14 +05302235 return false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002236
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002237 txtid = ATH_AN_2_TID(an, tidno);
2238
Sujitha37c2c72008-10-29 10:15:40 +05302239 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2240 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002241 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2242 txtid->addba_exchangeattempts++;
Sujithccc75c52008-10-29 10:18:14 +05302243 return true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002244 }
2245 }
2246
Sujithccc75c52008-10-29 10:18:14 +05302247 return false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002248}
2249
2250/* Start TX aggregation */
2251
Sujithb5aa9bf2008-10-29 10:13:31 +05302252int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2253 u16 tid, u16 *ssn)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002254{
2255 struct ath_atx_tid *txtid;
2256 struct ath_node *an;
2257
Sujithb5aa9bf2008-10-29 10:13:31 +05302258 an = (struct ath_node *)sta->drv_priv;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002259
Sujith672840a2008-08-11 14:05:08 +05302260 if (sc->sc_flags & SC_OP_TXAGGR) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002261 txtid = ATH_AN_2_TID(an, tid);
Sujitha37c2c72008-10-29 10:15:40 +05302262 txtid->state |= AGGR_ADDBA_PROGRESS;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002263 ath_tx_pause_tid(sc, txtid);
2264 }
2265
2266 return 0;
2267}
2268
2269/* Stop tx aggregation */
2270
Sujithb5aa9bf2008-10-29 10:13:31 +05302271int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002272{
Sujithb5aa9bf2008-10-29 10:13:31 +05302273 struct ath_node *an = (struct ath_node *)sta->drv_priv;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002274
2275 ath_tx_aggr_teardown(sc, an, tid);
2276 return 0;
2277}
2278
Sujith8469cde2008-10-29 10:19:28 +05302279/* Resume tx aggregation */
2280
2281void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2282{
2283 struct ath_atx_tid *txtid;
2284 struct ath_node *an;
2285
2286 an = (struct ath_node *)sta->drv_priv;
2287
2288 if (sc->sc_flags & SC_OP_TXAGGR) {
2289 txtid = ATH_AN_2_TID(an, tid);
2290 txtid->baw_size =
2291 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2292 txtid->state |= AGGR_ADDBA_COMPLETE;
2293 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2294 ath_tx_resume_tid(sc, txtid);
2295 }
2296}
2297
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002298/*
2299 * Performs transmit side cleanup when TID changes from aggregated to
2300 * unaggregated.
2301 * - Pause the TID and mark cleanup in progress
2302 * - Discard all retry frames from the s/w queue.
2303 */
2304
Sujithb5aa9bf2008-10-29 10:13:31 +05302305void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002306{
2307 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Sujithb77f4832008-12-07 21:44:03 +05302308 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002309 struct ath_buf *bf;
2310 struct list_head bf_head;
2311 INIT_LIST_HEAD(&bf_head);
2312
Sujitha37c2c72008-10-29 10:15:40 +05302313 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002314 return;
2315
Sujitha37c2c72008-10-29 10:15:40 +05302316 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002317 txtid->addba_exchangeattempts = 0;
2318 return;
2319 }
2320
2321 /* TID must be paused first */
2322 ath_tx_pause_tid(sc, txtid);
2323
2324 /* drop all software retried frames and mark this TID */
2325 spin_lock_bh(&txq->axq_lock);
2326 while (!list_empty(&txtid->buf_q)) {
2327 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +05302328 if (!bf_isretried(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002329 /*
2330 * NB: it's based on the assumption that
2331 * software retried frame will always stay
2332 * at the head of software queue.
2333 */
2334 break;
2335 }
2336 list_cut_position(&bf_head,
2337 &txtid->buf_q, &bf->bf_lastfrm->list);
2338 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2339
2340 /* complete this sub-frame */
2341 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2342 }
2343
2344 if (txtid->baw_head != txtid->baw_tail) {
2345 spin_unlock_bh(&txq->axq_lock);
Sujitha37c2c72008-10-29 10:15:40 +05302346 txtid->state |= AGGR_CLEANUP;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002347 } else {
Sujitha37c2c72008-10-29 10:15:40 +05302348 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349 txtid->addba_exchangeattempts = 0;
2350 spin_unlock_bh(&txq->axq_lock);
2351 ath_tx_flush_tid(sc, txtid);
2352 }
2353}
2354
2355/*
2356 * Tx scheduling logic
2357 * NB: must be called with txq lock held
2358 */
2359
2360void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2361{
2362 struct ath_atx_ac *ac;
2363 struct ath_atx_tid *tid;
2364
2365 /* nothing to schedule */
2366 if (list_empty(&txq->axq_acq))
2367 return;
2368 /*
2369 * get the first node/ac pair on the queue
2370 */
2371 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2372 list_del(&ac->list);
2373 ac->sched = false;
2374
2375 /*
2376 * process a single tid per destination
2377 */
2378 do {
2379 /* nothing to schedule */
2380 if (list_empty(&ac->tid_q))
2381 return;
2382
2383 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2384 list_del(&tid->list);
2385 tid->sched = false;
2386
2387 if (tid->paused) /* check next tid to keep h/w busy */
2388 continue;
2389
Sujith43453b32008-10-29 10:14:52 +05302390 if ((txq->axq_depth % 2) == 0)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391 ath_tx_sched_aggr(sc, txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392
2393 /*
2394 * add tid to round-robin queue if more frames
2395 * are pending for the tid
2396 */
2397 if (!list_empty(&tid->buf_q))
2398 ath_tx_queue_tid(txq, tid);
2399
2400 /* only schedule one TID at a time */
2401 break;
2402 } while (!list_empty(&ac->tid_q));
2403
2404 /*
2405 * schedule AC if more TIDs need processing
2406 */
2407 if (!list_empty(&ac->tid_q)) {
2408 /*
2409 * add dest ac to txq if not already added
2410 */
2411 if (!ac->sched) {
2412 ac->sched = true;
2413 list_add_tail(&ac->list, &txq->axq_acq);
2414 }
2415 }
2416}
2417
2418/* Initialize per-node transmit state */
2419
2420void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2421{
Sujithc5170162008-10-29 10:13:59 +05302422 struct ath_atx_tid *tid;
2423 struct ath_atx_ac *ac;
2424 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002425
Sujithc5170162008-10-29 10:13:59 +05302426 /*
2427 * Init per tid tx state
2428 */
Sujith8ee5afb2008-12-07 21:43:36 +05302429 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302430 tidno < WME_NUM_TID;
2431 tidno++, tid++) {
2432 tid->an = an;
2433 tid->tidno = tidno;
2434 tid->seq_start = tid->seq_next = 0;
2435 tid->baw_size = WME_MAX_BA;
2436 tid->baw_head = tid->baw_tail = 0;
2437 tid->sched = false;
2438 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302439 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302440 INIT_LIST_HEAD(&tid->buf_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002441
Sujithc5170162008-10-29 10:13:59 +05302442 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302443 tid->ac = &an->ac[acno];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002444
Sujithc5170162008-10-29 10:13:59 +05302445 /* ADDBA state */
Sujitha37c2c72008-10-29 10:15:40 +05302446 tid->state &= ~AGGR_ADDBA_COMPLETE;
2447 tid->state &= ~AGGR_ADDBA_PROGRESS;
2448 tid->addba_exchangeattempts = 0;
Sujithc5170162008-10-29 10:13:59 +05302449 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002450
Sujithc5170162008-10-29 10:13:59 +05302451 /*
2452 * Init per ac tx state
2453 */
Sujith8ee5afb2008-12-07 21:43:36 +05302454 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302455 acno < WME_NUM_AC; acno++, ac++) {
2456 ac->sched = false;
2457 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002458
Sujithc5170162008-10-29 10:13:59 +05302459 switch (acno) {
2460 case WME_AC_BE:
2461 ac->qnum = ath_tx_get_qnum(sc,
2462 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2463 break;
2464 case WME_AC_BK:
2465 ac->qnum = ath_tx_get_qnum(sc,
2466 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2467 break;
2468 case WME_AC_VI:
2469 ac->qnum = ath_tx_get_qnum(sc,
2470 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2471 break;
2472 case WME_AC_VO:
2473 ac->qnum = ath_tx_get_qnum(sc,
2474 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2475 break;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002476 }
2477 }
2478}
2479
2480/* Cleanupthe pending buffers for the node. */
2481
Sujithb5aa9bf2008-10-29 10:13:31 +05302482void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002483{
2484 int i;
2485 struct ath_atx_ac *ac, *ac_tmp;
2486 struct ath_atx_tid *tid, *tid_tmp;
2487 struct ath_txq *txq;
2488 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2489 if (ATH_TXQ_SETUP(sc, i)) {
Sujithb77f4832008-12-07 21:44:03 +05302490 txq = &sc->tx.txq[i];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002491
Sujithb5aa9bf2008-10-29 10:13:31 +05302492 spin_lock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002493
2494 list_for_each_entry_safe(ac,
2495 ac_tmp, &txq->axq_acq, list) {
2496 tid = list_first_entry(&ac->tid_q,
2497 struct ath_atx_tid, list);
2498 if (tid && tid->an != an)
2499 continue;
2500 list_del(&ac->list);
2501 ac->sched = false;
2502
2503 list_for_each_entry_safe(tid,
2504 tid_tmp, &ac->tid_q, list) {
2505 list_del(&tid->list);
2506 tid->sched = false;
Sujithb5aa9bf2008-10-29 10:13:31 +05302507 ath_tid_drain(sc, txq, tid);
Sujitha37c2c72008-10-29 10:15:40 +05302508 tid->state &= ~AGGR_ADDBA_COMPLETE;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002509 tid->addba_exchangeattempts = 0;
Sujitha37c2c72008-10-29 10:15:40 +05302510 tid->state &= ~AGGR_CLEANUP;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002511 }
2512 }
2513
Sujithb5aa9bf2008-10-29 10:13:31 +05302514 spin_unlock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002515 }
2516 }
2517}
2518
Jouni Malinene022edb2008-08-22 17:31:33 +03002519void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2520{
2521 int hdrlen, padsize;
2522 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2523 struct ath_tx_control txctl;
2524
Sujith528f0c62008-10-29 10:14:26 +05302525 memset(&txctl, 0, sizeof(struct ath_tx_control));
2526
Jouni Malinene022edb2008-08-22 17:31:33 +03002527 /*
2528 * As a temporary workaround, assign seq# here; this will likely need
2529 * to be cleaned up to work better with Beacon transmission and virtual
2530 * BSSes.
2531 */
2532 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2533 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2534 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
Sujithb77f4832008-12-07 21:44:03 +05302535 sc->tx.seq_no += 0x10;
Jouni Malinene022edb2008-08-22 17:31:33 +03002536 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
Sujithb77f4832008-12-07 21:44:03 +05302537 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Jouni Malinene022edb2008-08-22 17:31:33 +03002538 }
2539
2540 /* Add the padding after the header if this is not already done */
2541 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2542 if (hdrlen & 3) {
2543 padsize = hdrlen % 4;
2544 if (skb_headroom(skb) < padsize) {
Sujith04bd46382008-11-28 22:18:05 +05302545 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
Jouni Malinene022edb2008-08-22 17:31:33 +03002546 dev_kfree_skb_any(skb);
2547 return;
2548 }
2549 skb_push(skb, padsize);
2550 memmove(skb->data, skb->data + padsize, hdrlen);
2551 }
2552
Sujithb77f4832008-12-07 21:44:03 +05302553 txctl.txq = sc->beacon.cabq;
Sujith528f0c62008-10-29 10:14:26 +05302554
Sujith04bd46382008-11-28 22:18:05 +05302555 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
Jouni Malinene022edb2008-08-22 17:31:33 +03002556
Sujith528f0c62008-10-29 10:14:26 +05302557 if (ath_tx_start(sc, skb, &txctl) != 0) {
Sujith04bd46382008-11-28 22:18:05 +05302558 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujith528f0c62008-10-29 10:14:26 +05302559 goto exit;
Jouni Malinene022edb2008-08-22 17:31:33 +03002560 }
Jouni Malinene022edb2008-08-22 17:31:33 +03002561
Sujith528f0c62008-10-29 10:14:26 +05302562 return;
2563exit:
2564 dev_kfree_skb_any(skb);
2565}