blob: 0b4c1562420fcb4535a70538eb5d574b65f83c85 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
Michal Kazioredb82362013-07-05 16:15:14 +030018#include "core.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030019#include "htc.h"
20#include "htt.h"
21#include "txrx.h"
22#include "debug.h"
Kalle Valoa9bf0502013-09-03 11:43:55 +030023#include "trace.h"
Michal Kazioraa5b4fb2014-07-23 12:20:33 +020024#include "mac.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030025
26#include <linux/log2.h>
27
Michal Kaziorc5450702015-01-24 12:14:48 +020028#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
Kalle Valo5e3dd152013-06-12 20:52:10 +030030
31/* when under memory pressure rx ring refill may fail and needs a retry */
32#define HTT_RX_RING_REFILL_RETRY_MS 50
33
Rajkumar Manoharan5c86d972016-03-22 17:22:19 +053034#define HTT_RX_RING_REFILL_RESCHED_MS 5
35
Michal Kaziorf6dc2092013-09-26 10:12:22 +030036static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
Michal Kaziorf6dc2092013-09-26 10:12:22 +030037
Michal Kaziorc5450702015-01-24 12:14:48 +020038static struct sk_buff *
39ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
40{
41 struct ath10k_skb_rxcb *rxcb;
42
43 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
44 if (rxcb->paddr == paddr)
45 return ATH10K_RXCB_SKB(rxcb);
46
47 WARN_ON_ONCE(1);
48 return NULL;
49}
50
Kalle Valo5e3dd152013-06-12 20:52:10 +030051static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
52{
53 struct sk_buff *skb;
Michal Kaziorc5450702015-01-24 12:14:48 +020054 struct ath10k_skb_rxcb *rxcb;
55 struct hlist_node *n;
Kalle Valo5e3dd152013-06-12 20:52:10 +030056 int i;
57
Michal Kaziorc5450702015-01-24 12:14:48 +020058 if (htt->rx_ring.in_ord_rx) {
59 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
60 skb = ATH10K_RXCB_SKB(rxcb);
61 dma_unmap_single(htt->ar->dev, rxcb->paddr,
62 skb->len + skb_tailroom(skb),
63 DMA_FROM_DEVICE);
64 hash_del(&rxcb->hlist);
65 dev_kfree_skb_any(skb);
66 }
67 } else {
68 for (i = 0; i < htt->rx_ring.size; i++) {
69 skb = htt->rx_ring.netbufs_ring[i];
70 if (!skb)
71 continue;
72
73 rxcb = ATH10K_SKB_RXCB(skb);
74 dma_unmap_single(htt->ar->dev, rxcb->paddr,
75 skb->len + skb_tailroom(skb),
76 DMA_FROM_DEVICE);
77 dev_kfree_skb_any(skb);
78 }
Kalle Valo5e3dd152013-06-12 20:52:10 +030079 }
80
81 htt->rx_ring.fill_cnt = 0;
Michal Kaziorc5450702015-01-24 12:14:48 +020082 hash_init(htt->rx_ring.skb_table);
83 memset(htt->rx_ring.netbufs_ring, 0,
84 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
Kalle Valo5e3dd152013-06-12 20:52:10 +030085}
86
87static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
88{
89 struct htt_rx_desc *rx_desc;
Michal Kaziorc5450702015-01-24 12:14:48 +020090 struct ath10k_skb_rxcb *rxcb;
Kalle Valo5e3dd152013-06-12 20:52:10 +030091 struct sk_buff *skb;
92 dma_addr_t paddr;
93 int ret = 0, idx;
94
Michal Kaziorc5450702015-01-24 12:14:48 +020095 /* The Full Rx Reorder firmware has no way of telling the host
96 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
97 * To keep things simple make sure ring is always half empty. This
98 * guarantees there'll be no replenishment overruns possible.
99 */
100 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
101
Kalle Valo8cc7f262014-09-14 12:50:39 +0300102 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300103 while (num > 0) {
104 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
105 if (!skb) {
106 ret = -ENOMEM;
107 goto fail;
108 }
109
110 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
111 skb_pull(skb,
112 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
113 skb->data);
114
115 /* Clear rx_desc attention word before posting to Rx ring */
116 rx_desc = (struct htt_rx_desc *)skb->data;
117 rx_desc->attention.flags = __cpu_to_le32(0);
118
119 paddr = dma_map_single(htt->ar->dev, skb->data,
120 skb->len + skb_tailroom(skb),
121 DMA_FROM_DEVICE);
122
123 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
124 dev_kfree_skb_any(skb);
125 ret = -ENOMEM;
126 goto fail;
127 }
128
Michal Kaziorc5450702015-01-24 12:14:48 +0200129 rxcb = ATH10K_SKB_RXCB(skb);
130 rxcb->paddr = paddr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300131 htt->rx_ring.netbufs_ring[idx] = skb;
132 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
133 htt->rx_ring.fill_cnt++;
134
Michal Kaziorc5450702015-01-24 12:14:48 +0200135 if (htt->rx_ring.in_ord_rx) {
136 hash_add(htt->rx_ring.skb_table,
137 &ATH10K_SKB_RXCB(skb)->hlist,
138 (u32)paddr);
139 }
140
Kalle Valo5e3dd152013-06-12 20:52:10 +0300141 num--;
142 idx++;
143 idx &= htt->rx_ring.size_mask;
144 }
145
146fail:
Vasanthakumar Thiagarajan5de6dfc2015-01-09 22:49:46 +0530147 /*
148 * Make sure the rx buffer is updated before available buffer
149 * index to avoid any potential rx ring corruption.
150 */
151 mb();
Kalle Valo8cc7f262014-09-14 12:50:39 +0300152 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300153 return ret;
154}
155
156static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
157{
158 lockdep_assert_held(&htt->rx_ring.lock);
159 return __ath10k_htt_rx_ring_fill_n(htt, num);
160}
161
162static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
163{
Michal Kazior6e712d42013-09-24 10:18:36 +0200164 int ret, num_deficit, num_to_fill;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300165
Michal Kazior6e712d42013-09-24 10:18:36 +0200166 /* Refilling the whole RX ring buffer proves to be a bad idea. The
167 * reason is RX may take up significant amount of CPU cycles and starve
168 * other tasks, e.g. TX on an ethernet device while acting as a bridge
169 * with ath10k wlan interface. This ended up with very poor performance
170 * once CPU the host system was overwhelmed with RX on ath10k.
171 *
172 * By limiting the number of refills the replenishing occurs
173 * progressively. This in turns makes use of the fact tasklets are
174 * processed in FIFO order. This means actual RX processing can starve
175 * out refilling. If there's not enough buffers on RX ring FW will not
176 * report RX until it is refilled with enough buffers. This
177 * automatically balances load wrt to CPU power.
178 *
179 * This probably comes at a cost of lower maximum throughput but
Ben Greear3eafdfd2015-02-15 16:50:39 +0200180 * improves the average and stability. */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300181 spin_lock_bh(&htt->rx_ring.lock);
Michal Kazior6e712d42013-09-24 10:18:36 +0200182 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
183 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
184 num_deficit -= num_to_fill;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300185 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
186 if (ret == -ENOMEM) {
187 /*
188 * Failed to fill it to the desired level -
189 * we'll start a timer and try again next time.
190 * As long as enough buffers are left in the ring for
191 * another A-MPDU rx, no special recovery is needed.
192 */
193 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
194 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
Michal Kazior6e712d42013-09-24 10:18:36 +0200195 } else if (num_deficit > 0) {
Rajkumar Manoharan5c86d972016-03-22 17:22:19 +0530196 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
197 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
Kalle Valo5e3dd152013-06-12 20:52:10 +0300198 }
199 spin_unlock_bh(&htt->rx_ring.lock);
200}
201
202static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
203{
204 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
Kalle Valoaf762c02014-09-14 12:50:17 +0300205
Kalle Valo5e3dd152013-06-12 20:52:10 +0300206 ath10k_htt_rx_msdu_buff_replenish(htt);
207}
208
Michal Kaziorc5450702015-01-24 12:14:48 +0200209int ath10k_htt_rx_ring_refill(struct ath10k *ar)
Michal Kazior3e841fd2014-05-14 16:23:31 +0300210{
Michal Kaziorc5450702015-01-24 12:14:48 +0200211 struct ath10k_htt *htt = &ar->htt;
212 int ret;
Michal Kazior3e841fd2014-05-14 16:23:31 +0300213
Michal Kaziorc5450702015-01-24 12:14:48 +0200214 spin_lock_bh(&htt->rx_ring.lock);
215 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
216 htt->rx_ring.fill_cnt));
217 spin_unlock_bh(&htt->rx_ring.lock);
Michal Kazior3e841fd2014-05-14 16:23:31 +0300218
Michal Kaziorc5450702015-01-24 12:14:48 +0200219 if (ret)
220 ath10k_htt_rx_ring_free(htt);
221
222 return ret;
Michal Kazior3e841fd2014-05-14 16:23:31 +0300223}
224
Michal Kazior95bf21f2014-05-16 17:15:39 +0300225void ath10k_htt_rx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300226{
Kalle Valo5e3dd152013-06-12 20:52:10 +0300227 del_timer_sync(&htt->rx_ring.refill_retry_timer);
Michal Kazior6c5151a2014-02-27 18:50:04 +0200228
Michal Kazior6c5151a2014-02-27 18:50:04 +0200229 skb_queue_purge(&htt->rx_compl_q);
Michal Kaziorc5450702015-01-24 12:14:48 +0200230 skb_queue_purge(&htt->rx_in_ord_compl_q);
Michal Kazior426e10e2016-03-06 16:14:43 +0200231 skb_queue_purge(&htt->tx_fetch_ind_q);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300232
Michal Kaziorc5450702015-01-24 12:14:48 +0200233 ath10k_htt_rx_ring_free(htt);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300234
235 dma_free_coherent(htt->ar->dev,
236 (htt->rx_ring.size *
237 sizeof(htt->rx_ring.paddrs_ring)),
238 htt->rx_ring.paddrs_ring,
239 htt->rx_ring.base_paddr);
240
241 dma_free_coherent(htt->ar->dev,
242 sizeof(*htt->rx_ring.alloc_idx.vaddr),
243 htt->rx_ring.alloc_idx.vaddr,
244 htt->rx_ring.alloc_idx.paddr);
245
246 kfree(htt->rx_ring.netbufs_ring);
247}
248
249static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
250{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200251 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300252 int idx;
253 struct sk_buff *msdu;
254
Michal Kazior45967082014-02-27 18:50:05 +0200255 lockdep_assert_held(&htt->rx_ring.lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300256
Michal Kazior8d60ee82014-02-27 18:50:05 +0200257 if (htt->rx_ring.fill_cnt == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200258 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
Michal Kazior8d60ee82014-02-27 18:50:05 +0200259 return NULL;
260 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300261
262 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
263 msdu = htt->rx_ring.netbufs_ring[idx];
Michal Kazior3e841fd2014-05-14 16:23:31 +0300264 htt->rx_ring.netbufs_ring[idx] = NULL;
Michal Kaziorc5450702015-01-24 12:14:48 +0200265 htt->rx_ring.paddrs_ring[idx] = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300266
267 idx++;
268 idx &= htt->rx_ring.size_mask;
269 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
270 htt->rx_ring.fill_cnt--;
271
Michal Kazior4de02802014-10-23 17:04:23 +0300272 dma_unmap_single(htt->ar->dev,
Michal Kazior8582bf32015-01-24 12:14:47 +0200273 ATH10K_SKB_RXCB(msdu)->paddr,
Michal Kazior4de02802014-10-23 17:04:23 +0300274 msdu->len + skb_tailroom(msdu),
275 DMA_FROM_DEVICE);
276 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
277 msdu->data, msdu->len + skb_tailroom(msdu));
Michal Kazior4de02802014-10-23 17:04:23 +0300278
Kalle Valo5e3dd152013-06-12 20:52:10 +0300279 return msdu;
280}
281
Janusz Dziedzicd84dd602014-03-24 21:23:20 +0100282/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300283static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
Michal Kaziorf0e27702014-11-18 09:24:49 +0200284 struct sk_buff_head *amsdu)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300285{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200286 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300287 int msdu_len, msdu_chaining = 0;
Michal Kazior9aa505d2014-11-18 09:24:47 +0200288 struct sk_buff *msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300289 struct htt_rx_desc *rx_desc;
290
Michal Kazior45967082014-02-27 18:50:05 +0200291 lockdep_assert_held(&htt->rx_ring.lock);
292
Michal Kazior9aa505d2014-11-18 09:24:47 +0200293 for (;;) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300294 int last_msdu, msdu_len_invalid, msdu_chained;
295
Michal Kazior9aa505d2014-11-18 09:24:47 +0200296 msdu = ath10k_htt_rx_netbuf_pop(htt);
297 if (!msdu) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200298 __skb_queue_purge(amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +0200299 return -ENOENT;
Michal Kazior9aa505d2014-11-18 09:24:47 +0200300 }
301
302 __skb_queue_tail(amsdu, msdu);
303
Kalle Valo5e3dd152013-06-12 20:52:10 +0300304 rx_desc = (struct htt_rx_desc *)msdu->data;
305
306 /* FIXME: we must report msdu payload since this is what caller
307 * expects now */
308 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
309 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
310
311 /*
312 * Sanity check - confirm the HW is finished filling in the
313 * rx data.
314 * If the HW and SW are working correctly, then it's guaranteed
315 * that the HW's MAC DMA is done before this point in the SW.
316 * To prevent the case that we handle a stale Rx descriptor,
317 * just assert for now until we have a way to recover.
318 */
319 if (!(__le32_to_cpu(rx_desc->attention.flags)
320 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200321 __skb_queue_purge(amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +0200322 return -EIO;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300323 }
324
Kalle Valo5e3dd152013-06-12 20:52:10 +0300325 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
326 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
327 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700328 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
Kalle Valo5e3dd152013-06-12 20:52:10 +0300329 RX_MSDU_START_INFO0_MSDU_LENGTH);
330 msdu_chained = rx_desc->frag_info.ring2_more_count;
331
332 if (msdu_len_invalid)
333 msdu_len = 0;
334
335 skb_trim(msdu, 0);
336 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
337 msdu_len -= msdu->len;
338
Michal Kazior9aa505d2014-11-18 09:24:47 +0200339 /* Note: Chained buffers do not contain rx descriptor */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300340 while (msdu_chained--) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200341 msdu = ath10k_htt_rx_netbuf_pop(htt);
342 if (!msdu) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200343 __skb_queue_purge(amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +0200344 return -ENOENT;
Michal Kaziorb30595a2014-10-23 17:04:24 +0300345 }
346
Michal Kazior9aa505d2014-11-18 09:24:47 +0200347 __skb_queue_tail(amsdu, msdu);
348 skb_trim(msdu, 0);
349 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
350 msdu_len -= msdu->len;
Michal Kaziorede9c8e2014-05-14 16:23:31 +0300351 msdu_chaining = 1;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300352 }
353
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700354 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
Kalle Valo5e3dd152013-06-12 20:52:10 +0300355 RX_MSDU_END_INFO0_LAST_MSDU;
356
Michal Kaziorb04e2042014-10-23 17:04:27 +0300357 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
Rajkumar Manoharana0883cf2014-10-03 08:02:47 +0300358 sizeof(*rx_desc) - sizeof(u32));
Michal Kazior9aa505d2014-11-18 09:24:47 +0200359
360 if (last_msdu)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300361 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300362 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300363
Michal Kazior9aa505d2014-11-18 09:24:47 +0200364 if (skb_queue_empty(amsdu))
Janusz Dziedzicd84dd602014-03-24 21:23:20 +0100365 msdu_chaining = -1;
366
Kalle Valo5e3dd152013-06-12 20:52:10 +0300367 /*
368 * Don't refill the ring yet.
369 *
370 * First, the elements popped here are still in use - it is not
371 * safe to overwrite them until the matching call to
372 * mpdu_desc_list_next. Second, for efficiency it is preferable to
373 * refill the rx ring with 1 PPDU's worth of rx buffers (something
374 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
375 * (something like 3 buffers). Consequently, we'll rely on the txrx
376 * SW to tell us when it is done pulling all the PPDU's rx buffers
377 * out of the rx ring, and then refill it just once.
378 */
379
380 return msdu_chaining;
381}
382
Michal Kaziorc5450702015-01-24 12:14:48 +0200383static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
384 u32 paddr)
385{
386 struct ath10k *ar = htt->ar;
387 struct ath10k_skb_rxcb *rxcb;
388 struct sk_buff *msdu;
389
390 lockdep_assert_held(&htt->rx_ring.lock);
391
392 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
393 if (!msdu)
394 return NULL;
395
396 rxcb = ATH10K_SKB_RXCB(msdu);
397 hash_del(&rxcb->hlist);
398 htt->rx_ring.fill_cnt--;
399
400 dma_unmap_single(htt->ar->dev, rxcb->paddr,
401 msdu->len + skb_tailroom(msdu),
402 DMA_FROM_DEVICE);
403 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
404 msdu->data, msdu->len + skb_tailroom(msdu));
405
406 return msdu;
407}
408
409static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
410 struct htt_rx_in_ord_ind *ev,
411 struct sk_buff_head *list)
412{
413 struct ath10k *ar = htt->ar;
414 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
415 struct htt_rx_desc *rxd;
416 struct sk_buff *msdu;
417 int msdu_count;
418 bool is_offload;
419 u32 paddr;
420
421 lockdep_assert_held(&htt->rx_ring.lock);
422
423 msdu_count = __le16_to_cpu(ev->msdu_count);
424 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
425
426 while (msdu_count--) {
427 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
428
429 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
430 if (!msdu) {
431 __skb_queue_purge(list);
432 return -ENOENT;
433 }
434
435 __skb_queue_tail(list, msdu);
436
437 if (!is_offload) {
438 rxd = (void *)msdu->data;
439
440 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
441
442 skb_put(msdu, sizeof(*rxd));
443 skb_pull(msdu, sizeof(*rxd));
444 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
445
446 if (!(__le32_to_cpu(rxd->attention.flags) &
447 RX_ATTENTION_FLAGS_MSDU_DONE)) {
448 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
449 return -EIO;
450 }
451 }
452
453 msdu_desc++;
454 }
455
456 return 0;
457}
458
Michal Kazior95bf21f2014-05-16 17:15:39 +0300459int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300460{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200461 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300462 dma_addr_t paddr;
463 void *vaddr;
Kalle Valobd8bdbb2014-09-14 12:50:00 +0300464 size_t size;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300465 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
466
Michal Kazior51fc7d72014-10-23 17:04:24 +0300467 htt->rx_confused = false;
468
Michal Kaziorfe2407a2014-11-27 11:12:43 +0100469 /* XXX: The fill level could be changed during runtime in response to
470 * the host processing latency. Is this really worth it?
471 */
472 htt->rx_ring.size = HTT_RX_RING_SIZE;
473 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
474 htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
475
Kalle Valo5e3dd152013-06-12 20:52:10 +0300476 if (!is_power_of_2(htt->rx_ring.size)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200477 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300478 return -EINVAL;
479 }
480
Kalle Valo5e3dd152013-06-12 20:52:10 +0300481 htt->rx_ring.netbufs_ring =
Michal Kazior3e841fd2014-05-14 16:23:31 +0300482 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
Kalle Valo5e3dd152013-06-12 20:52:10 +0300483 GFP_KERNEL);
484 if (!htt->rx_ring.netbufs_ring)
485 goto err_netbuf;
486
Kalle Valobd8bdbb2014-09-14 12:50:00 +0300487 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
488
Felix Fietkaud6cb23b52015-11-24 11:36:52 +0100489 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300490 if (!vaddr)
491 goto err_dma_ring;
492
493 htt->rx_ring.paddrs_ring = vaddr;
494 htt->rx_ring.base_paddr = paddr;
495
496 vaddr = dma_alloc_coherent(htt->ar->dev,
497 sizeof(*htt->rx_ring.alloc_idx.vaddr),
Felix Fietkaud6cb23b52015-11-24 11:36:52 +0100498 &paddr, GFP_KERNEL);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300499 if (!vaddr)
500 goto err_dma_idx;
501
502 htt->rx_ring.alloc_idx.vaddr = vaddr;
503 htt->rx_ring.alloc_idx.paddr = paddr;
Michal Kaziorc5450702015-01-24 12:14:48 +0200504 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300505 *htt->rx_ring.alloc_idx.vaddr = 0;
506
507 /* Initialize the Rx refill retry timer */
508 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
509
510 spin_lock_init(&htt->rx_ring.lock);
511
512 htt->rx_ring.fill_cnt = 0;
Michal Kaziorc5450702015-01-24 12:14:48 +0200513 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
514 hash_init(htt->rx_ring.skb_table);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300515
Michal Kazior6c5151a2014-02-27 18:50:04 +0200516 skb_queue_head_init(&htt->rx_compl_q);
Michal Kaziorc5450702015-01-24 12:14:48 +0200517 skb_queue_head_init(&htt->rx_in_ord_compl_q);
Michal Kazior426e10e2016-03-06 16:14:43 +0200518 skb_queue_head_init(&htt->tx_fetch_ind_q);
Rajkumar Manoharan3128b3d2016-03-22 17:22:15 +0530519 atomic_set(&htt->num_mpdus_ready, 0);
Michal Kazior6c5151a2014-02-27 18:50:04 +0200520
Michal Kazior7aa7a722014-08-25 12:09:38 +0200521 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300522 htt->rx_ring.size, htt->rx_ring.fill_level);
523 return 0;
524
Kalle Valo5e3dd152013-06-12 20:52:10 +0300525err_dma_idx:
526 dma_free_coherent(htt->ar->dev,
527 (htt->rx_ring.size *
528 sizeof(htt->rx_ring.paddrs_ring)),
529 htt->rx_ring.paddrs_ring,
530 htt->rx_ring.base_paddr);
531err_dma_ring:
532 kfree(htt->rx_ring.netbufs_ring);
533err_netbuf:
534 return -ENOMEM;
535}
536
Michal Kazior7aa7a722014-08-25 12:09:38 +0200537static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
538 enum htt_rx_mpdu_encrypt_type type)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300539{
540 switch (type) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300541 case HTT_RX_MPDU_ENCRYPT_NONE:
542 return 0;
Michal Kazior890d3b22014-10-23 17:04:22 +0300543 case HTT_RX_MPDU_ENCRYPT_WEP40:
544 case HTT_RX_MPDU_ENCRYPT_WEP104:
545 return IEEE80211_WEP_IV_LEN;
546 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
547 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
548 return IEEE80211_TKIP_IV_LEN;
549 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
550 return IEEE80211_CCMP_HDR_LEN;
551 case HTT_RX_MPDU_ENCRYPT_WEP128:
552 case HTT_RX_MPDU_ENCRYPT_WAPI:
553 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300554 }
555
Michal Kazior890d3b22014-10-23 17:04:22 +0300556 ath10k_warn(ar, "unsupported encryption type %d\n", type);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300557 return 0;
558}
559
Michal Kazior890d3b22014-10-23 17:04:22 +0300560#define MICHAEL_MIC_LEN 8
561
Michal Kazior7aa7a722014-08-25 12:09:38 +0200562static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
563 enum htt_rx_mpdu_encrypt_type type)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300564{
565 switch (type) {
566 case HTT_RX_MPDU_ENCRYPT_NONE:
Michal Kazior890d3b22014-10-23 17:04:22 +0300567 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300568 case HTT_RX_MPDU_ENCRYPT_WEP40:
569 case HTT_RX_MPDU_ENCRYPT_WEP104:
Michal Kazior890d3b22014-10-23 17:04:22 +0300570 return IEEE80211_WEP_ICV_LEN;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300571 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
572 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
Michal Kazior890d3b22014-10-23 17:04:22 +0300573 return IEEE80211_TKIP_ICV_LEN;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300574 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
Michal Kazior890d3b22014-10-23 17:04:22 +0300575 return IEEE80211_CCMP_MIC_LEN;
576 case HTT_RX_MPDU_ENCRYPT_WEP128:
577 case HTT_RX_MPDU_ENCRYPT_WAPI:
578 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300579 }
580
Michal Kazior890d3b22014-10-23 17:04:22 +0300581 ath10k_warn(ar, "unsupported encryption type %d\n", type);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300582 return 0;
583}
584
Michal Kaziorf6dc2092013-09-26 10:12:22 +0300585struct amsdu_subframe_hdr {
586 u8 dst[ETH_ALEN];
587 u8 src[ETH_ALEN];
588 __be16 len;
589} __packed;
590
Michal Kazior6986fdd2015-08-27 14:47:33 +0200591#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
592
Janusz Dziedzic87326c92014-03-24 21:23:19 +0100593static void ath10k_htt_rx_h_rates(struct ath10k *ar,
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200594 struct ieee80211_rx_status *status,
595 struct htt_rx_desc *rxd)
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100596{
Michal Kazior5528e032015-03-30 09:51:56 +0300597 struct ieee80211_supported_band *sband;
598 u8 cck, rate, bw, sgi, mcs, nss;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100599 u8 preamble = 0;
Michal Kazior6986fdd2015-08-27 14:47:33 +0200600 u8 group_id;
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200601 u32 info1, info2, info3;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100602
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200603 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
604 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
605 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
606
607 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100608
609 switch (preamble) {
610 case HTT_RX_LEGACY:
Michal Kazior5528e032015-03-30 09:51:56 +0300611 /* To get legacy rate index band is required. Since band can't
612 * be undefined check if freq is non-zero.
613 */
614 if (!status->freq)
615 return;
616
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200617 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
618 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
Michal Kazior5528e032015-03-30 09:51:56 +0300619 rate &= ~RX_PPDU_START_RATE_FLAG;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100620
Michal Kazior5528e032015-03-30 09:51:56 +0300621 sband = &ar->mac.sbands[status->band];
Yanbo Li4b7f3532015-11-12 10:36:10 -0800622 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100623 break;
624 case HTT_RX_HT:
625 case HTT_RX_HT_WITH_TXBF:
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200626 /* HT-SIG - Table 20-11 in info2 and info3 */
627 mcs = info2 & 0x1F;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100628 nss = mcs >> 3;
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200629 bw = (info2 >> 7) & 1;
630 sgi = (info3 >> 7) & 1;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100631
632 status->rate_idx = mcs;
633 status->flag |= RX_FLAG_HT;
634 if (sgi)
635 status->flag |= RX_FLAG_SHORT_GI;
636 if (bw)
637 status->flag |= RX_FLAG_40MHZ;
638 break;
639 case HTT_RX_VHT:
640 case HTT_RX_VHT_WITH_TXBF:
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200641 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100642 TODO check this */
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200643 bw = info2 & 3;
644 sgi = info3 & 1;
Michal Kazior6986fdd2015-08-27 14:47:33 +0200645 group_id = (info2 >> 4) & 0x3F;
646
647 if (GROUP_ID_IS_SU_MIMO(group_id)) {
648 mcs = (info3 >> 4) & 0x0F;
649 nss = ((info2 >> 10) & 0x07) + 1;
650 } else {
651 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
652 * so it's impossible to decode MCS. Also since
653 * firmware consumes Group Id Management frames host
654 * has no knowledge regarding group/user position
655 * mapping so it's impossible to pick the correct Nsts
656 * from VHT-SIG-A1.
657 *
658 * Bandwidth and SGI are valid so report the rateinfo
659 * on best-effort basis.
660 */
661 mcs = 0;
662 nss = 1;
663 }
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100664
Manikanta Pubbisetty6ccea102015-09-02 17:05:27 +0300665 if (mcs > 0x09) {
666 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
667 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
668 __le32_to_cpu(rxd->attention.flags),
669 __le32_to_cpu(rxd->mpdu_start.info0),
670 __le32_to_cpu(rxd->mpdu_start.info1),
671 __le32_to_cpu(rxd->msdu_start.common.info0),
672 __le32_to_cpu(rxd->msdu_start.common.info1),
673 rxd->ppdu_start.info0,
674 __le32_to_cpu(rxd->ppdu_start.info1),
675 __le32_to_cpu(rxd->ppdu_start.info2),
676 __le32_to_cpu(rxd->ppdu_start.info3),
677 __le32_to_cpu(rxd->ppdu_start.info4));
678
679 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
680 __le32_to_cpu(rxd->msdu_end.common.info0),
681 __le32_to_cpu(rxd->mpdu_end.info0));
682
683 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
684 "rx desc msdu payload: ",
685 rxd->msdu_payload, 50);
686 }
687
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100688 status->rate_idx = mcs;
689 status->vht_nss = nss;
690
691 if (sgi)
692 status->flag |= RX_FLAG_SHORT_GI;
693
694 switch (bw) {
695 /* 20MHZ */
696 case 0:
697 break;
698 /* 40MHZ */
699 case 1:
700 status->flag |= RX_FLAG_40MHZ;
701 break;
702 /* 80MHZ */
703 case 2:
704 status->vht_flag |= RX_VHT_FLAG_80MHZ;
705 }
706
707 status->flag |= RX_FLAG_VHT;
708 break;
709 default:
710 break;
711 }
712}
713
Michal Kazior500ff9f2015-03-31 10:26:21 +0000714static struct ieee80211_channel *
715ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
716{
717 struct ath10k_peer *peer;
718 struct ath10k_vif *arvif;
719 struct cfg80211_chan_def def;
720 u16 peer_id;
721
722 lockdep_assert_held(&ar->data_lock);
723
724 if (!rxd)
725 return NULL;
726
727 if (rxd->attention.flags &
728 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
729 return NULL;
730
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700731 if (!(rxd->msdu_end.common.info0 &
Michal Kazior500ff9f2015-03-31 10:26:21 +0000732 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
733 return NULL;
734
735 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
736 RX_MPDU_START_INFO0_PEER_IDX);
737
738 peer = ath10k_peer_find_by_id(ar, peer_id);
739 if (!peer)
740 return NULL;
741
742 arvif = ath10k_get_arvif(ar, peer->vdev_id);
743 if (WARN_ON_ONCE(!arvif))
744 return NULL;
745
Mohammed Shafi Shajakhan569fba22016-06-29 19:29:24 +0300746 if (ath10k_mac_vif_chan(arvif->vif, &def))
Michal Kazior500ff9f2015-03-31 10:26:21 +0000747 return NULL;
748
749 return def.chan;
750}
751
752static struct ieee80211_channel *
753ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
754{
755 struct ath10k_vif *arvif;
756 struct cfg80211_chan_def def;
757
758 lockdep_assert_held(&ar->data_lock);
759
760 list_for_each_entry(arvif, &ar->arvifs, list) {
761 if (arvif->vdev_id == vdev_id &&
762 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
763 return def.chan;
764 }
765
766 return NULL;
767}
768
769static void
770ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
771 struct ieee80211_chanctx_conf *conf,
772 void *data)
773{
774 struct cfg80211_chan_def *def = data;
775
776 *def = conf->def;
777}
778
779static struct ieee80211_channel *
780ath10k_htt_rx_h_any_channel(struct ath10k *ar)
781{
782 struct cfg80211_chan_def def = {};
783
784 ieee80211_iter_chan_contexts_atomic(ar->hw,
785 ath10k_htt_rx_h_any_chan_iter,
786 &def);
787
788 return def.chan;
789}
790
Janusz Dziedzic36653f02014-03-24 21:23:18 +0100791static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
Michal Kazior500ff9f2015-03-31 10:26:21 +0000792 struct ieee80211_rx_status *status,
793 struct htt_rx_desc *rxd,
794 u32 vdev_id)
Janusz Dziedzic36653f02014-03-24 21:23:18 +0100795{
796 struct ieee80211_channel *ch;
797
798 spin_lock_bh(&ar->data_lock);
799 ch = ar->scan_channel;
800 if (!ch)
801 ch = ar->rx_channel;
Michal Kazior500ff9f2015-03-31 10:26:21 +0000802 if (!ch)
803 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
804 if (!ch)
805 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
806 if (!ch)
807 ch = ath10k_htt_rx_h_any_channel(ar);
Rajkumar Manoharan2ce9b252016-03-08 22:57:23 +0530808 if (!ch)
809 ch = ar->tgt_oper_chan;
Janusz Dziedzic36653f02014-03-24 21:23:18 +0100810 spin_unlock_bh(&ar->data_lock);
811
812 if (!ch)
813 return false;
814
815 status->band = ch->band;
816 status->freq = ch->center_freq;
817
818 return true;
819}
820
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200821static void ath10k_htt_rx_h_signal(struct ath10k *ar,
822 struct ieee80211_rx_status *status,
823 struct htt_rx_desc *rxd)
824{
825 /* FIXME: Get real NF */
826 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
827 rxd->ppdu_start.rssi_comb;
828 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
829}
830
831static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
832 struct ieee80211_rx_status *status,
833 struct htt_rx_desc *rxd)
834{
835 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
836 * means all prior MSDUs in a PPDU are reported to mac80211 without the
837 * TSF. Is it worth holding frames until end of PPDU is known?
838 *
839 * FIXME: Can we get/compute 64bit TSF?
840 */
Michal Kazior3ec79e32015-01-24 12:14:48 +0200841 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200842 status->flag |= RX_FLAG_MACTIME_END;
843}
844
845static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
846 struct sk_buff_head *amsdu,
Michal Kazior500ff9f2015-03-31 10:26:21 +0000847 struct ieee80211_rx_status *status,
848 u32 vdev_id)
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200849{
850 struct sk_buff *first;
851 struct htt_rx_desc *rxd;
852 bool is_first_ppdu;
853 bool is_last_ppdu;
854
855 if (skb_queue_empty(amsdu))
856 return;
857
858 first = skb_peek(amsdu);
859 rxd = (void *)first->data - sizeof(*rxd);
860
861 is_first_ppdu = !!(rxd->attention.flags &
862 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
863 is_last_ppdu = !!(rxd->attention.flags &
864 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
865
866 if (is_first_ppdu) {
867 /* New PPDU starts so clear out the old per-PPDU status. */
868 status->freq = 0;
869 status->rate_idx = 0;
870 status->vht_nss = 0;
871 status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
872 status->flag &= ~(RX_FLAG_HT |
873 RX_FLAG_VHT |
874 RX_FLAG_SHORT_GI |
875 RX_FLAG_40MHZ |
876 RX_FLAG_MACTIME_END);
877 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
878
879 ath10k_htt_rx_h_signal(ar, status, rxd);
Michal Kazior500ff9f2015-03-31 10:26:21 +0000880 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200881 ath10k_htt_rx_h_rates(ar, status, rxd);
882 }
883
884 if (is_last_ppdu)
885 ath10k_htt_rx_h_mactime(ar, status, rxd);
886}
887
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300888static const char * const tid_to_ac[] = {
889 "BE",
890 "BK",
891 "BK",
892 "BE",
893 "VI",
894 "VI",
895 "VO",
896 "VO",
897};
898
899static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
900{
901 u8 *qc;
902 int tid;
903
904 if (!ieee80211_is_data_qos(hdr->frame_control))
905 return "";
906
907 qc = ieee80211_get_qos_ctl(hdr);
908 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
909 if (tid < 8)
910 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
911 else
912 snprintf(out, size, "tid %d", tid);
913
914 return out;
915}
916
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100917static void ath10k_process_rx(struct ath10k *ar,
918 struct ieee80211_rx_status *rx_status,
919 struct sk_buff *skb)
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100920{
921 struct ieee80211_rx_status *status;
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300922 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
923 char tid[32];
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100924
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100925 status = IEEE80211_SKB_RXCB(skb);
926 *status = *rx_status;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100927
Michal Kazior7aa7a722014-08-25 12:09:38 +0200928 ath10k_dbg(ar, ATH10K_DBG_DATA,
Maharaja Kennadyrajan75b34802016-08-04 19:21:51 +0530929 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100930 skb,
931 skb->len,
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300932 ieee80211_get_SA(hdr),
933 ath10k_get_tid(hdr, tid, sizeof(tid)),
934 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
935 "mcast" : "ucast",
936 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
Mohammed Shafi Shajakhan026441c2016-05-23 23:12:47 +0300937 (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
938 "legacy" : "",
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100939 status->flag & RX_FLAG_HT ? "ht" : "",
940 status->flag & RX_FLAG_VHT ? "vht" : "",
941 status->flag & RX_FLAG_40MHZ ? "40" : "",
942 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
943 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
944 status->rate_idx,
945 status->vht_nss,
946 status->freq,
Janusz Dziedzic87326c92014-03-24 21:23:19 +0100947 status->band, status->flag,
Janusz Dziedzic78433f92014-03-24 21:23:21 +0100948 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300949 !!(status->flag & RX_FLAG_MMIC_ERROR),
950 !!(status->flag & RX_FLAG_AMSDU_MORE));
Michal Kazior7aa7a722014-08-25 12:09:38 +0200951 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100952 skb->data, skb->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530953 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
954 trace_ath10k_rx_payload(ar, skb->data, skb->len);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100955
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +0300956 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100957}
958
Michal Kazior48f4ca32015-05-19 14:09:34 +0200959static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
960 struct ieee80211_hdr *hdr)
Michal Kaziord960c362014-02-25 09:29:57 +0200961{
Michal Kazior48f4ca32015-05-19 14:09:34 +0200962 int len = ieee80211_hdrlen(hdr->frame_control);
963
964 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
Kalle Valoc4cdf752016-04-20 19:45:18 +0300965 ar->running_fw->fw_file.fw_features))
Michal Kazior48f4ca32015-05-19 14:09:34 +0200966 len = round_up(len, 4);
967
968 return len;
Michal Kaziord960c362014-02-25 09:29:57 +0200969}
970
Michal Kazior581c25f2014-11-18 09:24:48 +0200971static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
972 struct sk_buff *msdu,
973 struct ieee80211_rx_status *status,
974 enum htt_rx_mpdu_encrypt_type enctype,
975 bool is_decrypted)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300976{
Michal Kaziorf6dc2092013-09-26 10:12:22 +0300977 struct ieee80211_hdr *hdr;
Michal Kazior581c25f2014-11-18 09:24:48 +0200978 struct htt_rx_desc *rxd;
979 size_t hdr_len;
980 size_t crypto_len;
981 bool is_first;
982 bool is_last;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300983
Michal Kazior581c25f2014-11-18 09:24:48 +0200984 rxd = (void *)msdu->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700985 is_first = !!(rxd->msdu_end.common.info0 &
Michal Kazior581c25f2014-11-18 09:24:48 +0200986 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700987 is_last = !!(rxd->msdu_end.common.info0 &
Michal Kazior581c25f2014-11-18 09:24:48 +0200988 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
Michal Kazior9aa505d2014-11-18 09:24:47 +0200989
Michal Kazior581c25f2014-11-18 09:24:48 +0200990 /* Delivered decapped frame:
991 * [802.11 header]
992 * [crypto param] <-- can be trimmed if !fcs_err &&
993 * !decrypt_err && !peer_idx_invalid
994 * [amsdu header] <-- only if A-MSDU
995 * [rfc1042/llc]
996 * [payload]
997 * [FCS] <-- at end, needs to be trimmed
998 */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300999
Michal Kazior581c25f2014-11-18 09:24:48 +02001000 /* This probably shouldn't happen but warn just in case */
1001 if (unlikely(WARN_ON_ONCE(!is_first)))
1002 return;
1003
1004 /* This probably shouldn't happen but warn just in case */
1005 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1006 return;
1007
1008 skb_trim(msdu, msdu->len - FCS_LEN);
1009
1010 /* In most cases this will be true for sniffed frames. It makes sense
David Liuccec9032015-07-24 20:25:32 +03001011 * to deliver them as-is without stripping the crypto param. This is
1012 * necessary for software based decryption.
Michal Kazior581c25f2014-11-18 09:24:48 +02001013 *
1014 * If there's no error then the frame is decrypted. At least that is
1015 * the case for frames that come in via fragmented rx indication.
1016 */
1017 if (!is_decrypted)
1018 return;
1019
1020 /* The payload is decrypted so strip crypto params. Start from tail
1021 * since hdr is used to compute some stuff.
1022 */
1023
1024 hdr = (void *)msdu->data;
1025
1026 /* Tail */
Grzegorz Bajorski60549ca2015-11-30 13:56:59 +01001027 if (status->flag & RX_FLAG_IV_STRIPPED)
1028 skb_trim(msdu, msdu->len -
1029 ath10k_htt_rx_crypto_tail_len(ar, enctype));
Michal Kazior581c25f2014-11-18 09:24:48 +02001030
1031 /* MMIC */
Grzegorz Bajorski60549ca2015-11-30 13:56:59 +01001032 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1033 !ieee80211_has_morefrags(hdr->frame_control) &&
Michal Kazior581c25f2014-11-18 09:24:48 +02001034 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1035 skb_trim(msdu, msdu->len - 8);
1036
1037 /* Head */
Grzegorz Bajorski60549ca2015-11-30 13:56:59 +01001038 if (status->flag & RX_FLAG_IV_STRIPPED) {
1039 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1040 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001041
Grzegorz Bajorski60549ca2015-11-30 13:56:59 +01001042 memmove((void *)msdu->data + crypto_len,
1043 (void *)msdu->data, hdr_len);
1044 skb_pull(msdu, crypto_len);
1045 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001046}
1047
Michal Kazior581c25f2014-11-18 09:24:48 +02001048static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1049 struct sk_buff *msdu,
1050 struct ieee80211_rx_status *status,
1051 const u8 first_hdr[64])
Kalle Valo5e3dd152013-06-12 20:52:10 +03001052{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001053 struct ieee80211_hdr *hdr;
Vasanthakumar Thiagarajan9e19e132016-09-09 17:25:29 +03001054 struct htt_rx_desc *rxd;
Michal Kazior581c25f2014-11-18 09:24:48 +02001055 size_t hdr_len;
1056 u8 da[ETH_ALEN];
1057 u8 sa[ETH_ALEN];
Vasanthakumar Thiagarajan9e19e132016-09-09 17:25:29 +03001058 int l3_pad_bytes;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001059
Michal Kazior581c25f2014-11-18 09:24:48 +02001060 /* Delivered decapped frame:
1061 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1062 * [rfc1042/llc]
1063 *
1064 * Note: The nwifi header doesn't have QoS Control and is
1065 * (always?) a 3addr frame.
1066 *
1067 * Note2: There's no A-MSDU subframe header. Even if it's part
1068 * of an A-MSDU.
1069 */
1070
1071 /* pull decapped header and copy SA & DA */
Vasanthakumar Thiagarajan9e19e132016-09-09 17:25:29 +03001072 rxd = (void *)msdu->data - sizeof(*rxd);
1073
1074 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1075 skb_put(msdu, l3_pad_bytes);
1076
1077 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
Yanbo Lib8d55fc2015-11-16 22:22:02 +02001078
Michal Kazior48f4ca32015-05-19 14:09:34 +02001079 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
Michal Kazior581c25f2014-11-18 09:24:48 +02001080 ether_addr_copy(da, ieee80211_get_DA(hdr));
1081 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1082 skb_pull(msdu, hdr_len);
1083
1084 /* push original 802.11 header */
1085 hdr = (struct ieee80211_hdr *)first_hdr;
Michal Kaziore3fbf8d2013-09-26 10:12:23 +03001086 hdr_len = ieee80211_hdrlen(hdr->frame_control);
Michal Kazior581c25f2014-11-18 09:24:48 +02001087 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001088
Michal Kazior581c25f2014-11-18 09:24:48 +02001089 /* original 802.11 header has a different DA and in
1090 * case of 4addr it may also have different SA
1091 */
1092 hdr = (struct ieee80211_hdr *)msdu->data;
1093 ether_addr_copy(ieee80211_get_DA(hdr), da);
1094 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1095}
Michal Kaziorf6dc2092013-09-26 10:12:22 +03001096
Michal Kazior581c25f2014-11-18 09:24:48 +02001097static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1098 struct sk_buff *msdu,
1099 enum htt_rx_mpdu_encrypt_type enctype)
1100{
1101 struct ieee80211_hdr *hdr;
1102 struct htt_rx_desc *rxd;
1103 size_t hdr_len, crypto_len;
1104 void *rfc1042;
1105 bool is_first, is_last, is_amsdu;
Vasanthakumar Thiagarajan2f38c3c2016-09-26 21:56:24 +03001106 int bytes_aligned = ar->hw_params.decap_align_bytes;
Michal Kazior784f69d2013-09-26 10:12:23 +03001107
Michal Kazior581c25f2014-11-18 09:24:48 +02001108 rxd = (void *)msdu->data - sizeof(*rxd);
1109 hdr = (void *)rxd->rx_hdr_status;
1110
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001111 is_first = !!(rxd->msdu_end.common.info0 &
Michal Kazior581c25f2014-11-18 09:24:48 +02001112 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001113 is_last = !!(rxd->msdu_end.common.info0 &
Michal Kazior581c25f2014-11-18 09:24:48 +02001114 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1115 is_amsdu = !(is_first && is_last);
1116
1117 rfc1042 = hdr;
1118
1119 if (is_first) {
Michal Kazior784f69d2013-09-26 10:12:23 +03001120 hdr_len = ieee80211_hdrlen(hdr->frame_control);
Michal Kazior581c25f2014-11-18 09:24:48 +02001121 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
Michal Kaziore3fbf8d2013-09-26 10:12:23 +03001122
Vasanthakumar Thiagarajan2f38c3c2016-09-26 21:56:24 +03001123 rfc1042 += round_up(hdr_len, bytes_aligned) +
1124 round_up(crypto_len, bytes_aligned);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001125 }
1126
Michal Kazior581c25f2014-11-18 09:24:48 +02001127 if (is_amsdu)
1128 rfc1042 += sizeof(struct amsdu_subframe_hdr);
Michal Kaziorf6dc2092013-09-26 10:12:22 +03001129
Michal Kazior581c25f2014-11-18 09:24:48 +02001130 return rfc1042;
1131}
1132
1133static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1134 struct sk_buff *msdu,
1135 struct ieee80211_rx_status *status,
1136 const u8 first_hdr[64],
1137 enum htt_rx_mpdu_encrypt_type enctype)
1138{
1139 struct ieee80211_hdr *hdr;
1140 struct ethhdr *eth;
1141 size_t hdr_len;
1142 void *rfc1042;
1143 u8 da[ETH_ALEN];
1144 u8 sa[ETH_ALEN];
Vasanthakumar Thiagarajan9e19e132016-09-09 17:25:29 +03001145 int l3_pad_bytes;
1146 struct htt_rx_desc *rxd;
Michal Kazior581c25f2014-11-18 09:24:48 +02001147
1148 /* Delivered decapped frame:
1149 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1150 * [payload]
1151 */
1152
1153 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1154 if (WARN_ON_ONCE(!rfc1042))
1155 return;
1156
Vasanthakumar Thiagarajan9e19e132016-09-09 17:25:29 +03001157 rxd = (void *)msdu->data - sizeof(*rxd);
1158 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1159 skb_put(msdu, l3_pad_bytes);
1160 skb_pull(msdu, l3_pad_bytes);
1161
Michal Kazior581c25f2014-11-18 09:24:48 +02001162 /* pull decapped header and copy SA & DA */
1163 eth = (struct ethhdr *)msdu->data;
1164 ether_addr_copy(da, eth->h_dest);
1165 ether_addr_copy(sa, eth->h_source);
1166 skb_pull(msdu, sizeof(struct ethhdr));
1167
1168 /* push rfc1042/llc/snap */
1169 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1170 sizeof(struct rfc1042_hdr));
1171
1172 /* push original 802.11 header */
1173 hdr = (struct ieee80211_hdr *)first_hdr;
1174 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1175 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1176
1177 /* original 802.11 header has a different DA and in
1178 * case of 4addr it may also have different SA
1179 */
1180 hdr = (struct ieee80211_hdr *)msdu->data;
1181 ether_addr_copy(ieee80211_get_DA(hdr), da);
1182 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1183}
1184
1185static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1186 struct sk_buff *msdu,
1187 struct ieee80211_rx_status *status,
1188 const u8 first_hdr[64])
1189{
1190 struct ieee80211_hdr *hdr;
1191 size_t hdr_len;
Vasanthakumar Thiagarajan9e19e132016-09-09 17:25:29 +03001192 int l3_pad_bytes;
1193 struct htt_rx_desc *rxd;
Michal Kazior581c25f2014-11-18 09:24:48 +02001194
1195 /* Delivered decapped frame:
1196 * [amsdu header] <-- replaced with 802.11 hdr
1197 * [rfc1042/llc]
1198 * [payload]
1199 */
1200
Vasanthakumar Thiagarajan9e19e132016-09-09 17:25:29 +03001201 rxd = (void *)msdu->data - sizeof(*rxd);
1202 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1203
1204 skb_put(msdu, l3_pad_bytes);
1205 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
Michal Kazior581c25f2014-11-18 09:24:48 +02001206
1207 hdr = (struct ieee80211_hdr *)first_hdr;
1208 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1209 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1210}
1211
1212static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1213 struct sk_buff *msdu,
1214 struct ieee80211_rx_status *status,
1215 u8 first_hdr[64],
1216 enum htt_rx_mpdu_encrypt_type enctype,
1217 bool is_decrypted)
1218{
1219 struct htt_rx_desc *rxd;
1220 enum rx_msdu_decap_format decap;
Michal Kazior581c25f2014-11-18 09:24:48 +02001221
1222 /* First msdu's decapped header:
1223 * [802.11 header] <-- padded to 4 bytes long
1224 * [crypto param] <-- padded to 4 bytes long
1225 * [amsdu header] <-- only if A-MSDU
1226 * [rfc1042/llc]
1227 *
1228 * Other (2nd, 3rd, ..) msdu's decapped header:
1229 * [amsdu header] <-- only if A-MSDU
1230 * [rfc1042/llc]
1231 */
1232
1233 rxd = (void *)msdu->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001234 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
Michal Kazior581c25f2014-11-18 09:24:48 +02001235 RX_MSDU_START_INFO1_DECAP_FORMAT);
1236
1237 switch (decap) {
1238 case RX_MSDU_DECAP_RAW:
1239 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1240 is_decrypted);
1241 break;
1242 case RX_MSDU_DECAP_NATIVE_WIFI:
1243 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1244 break;
1245 case RX_MSDU_DECAP_ETHERNET2_DIX:
1246 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1247 break;
1248 case RX_MSDU_DECAP_8023_SNAP_LLC:
1249 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1250 break;
1251 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001252}
1253
Michal Kazior605f81a2013-07-31 10:47:56 +02001254static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1255{
1256 struct htt_rx_desc *rxd;
1257 u32 flags, info;
1258 bool is_ip4, is_ip6;
1259 bool is_tcp, is_udp;
1260 bool ip_csum_ok, tcpudp_csum_ok;
1261
1262 rxd = (void *)skb->data - sizeof(*rxd);
1263 flags = __le32_to_cpu(rxd->attention.flags);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001264 info = __le32_to_cpu(rxd->msdu_start.common.info1);
Michal Kazior605f81a2013-07-31 10:47:56 +02001265
1266 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1267 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1268 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1269 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1270 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1271 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1272
1273 if (!is_ip4 && !is_ip6)
1274 return CHECKSUM_NONE;
1275 if (!is_tcp && !is_udp)
1276 return CHECKSUM_NONE;
1277 if (!ip_csum_ok)
1278 return CHECKSUM_NONE;
1279 if (!tcpudp_csum_ok)
1280 return CHECKSUM_NONE;
1281
1282 return CHECKSUM_UNNECESSARY;
1283}
1284
Michal Kazior581c25f2014-11-18 09:24:48 +02001285static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1286{
1287 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1288}
1289
1290static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1291 struct sk_buff_head *amsdu,
1292 struct ieee80211_rx_status *status)
1293{
1294 struct sk_buff *first;
1295 struct sk_buff *last;
1296 struct sk_buff *msdu;
1297 struct htt_rx_desc *rxd;
1298 struct ieee80211_hdr *hdr;
1299 enum htt_rx_mpdu_encrypt_type enctype;
1300 u8 first_hdr[64];
1301 u8 *qos;
1302 size_t hdr_len;
1303 bool has_fcs_err;
1304 bool has_crypto_err;
1305 bool has_tkip_err;
1306 bool has_peer_idx_invalid;
1307 bool is_decrypted;
Grzegorz Bajorski60549ca2015-11-30 13:56:59 +01001308 bool is_mgmt;
Michal Kazior581c25f2014-11-18 09:24:48 +02001309 u32 attention;
1310
1311 if (skb_queue_empty(amsdu))
1312 return;
1313
1314 first = skb_peek(amsdu);
1315 rxd = (void *)first->data - sizeof(*rxd);
1316
Grzegorz Bajorski60549ca2015-11-30 13:56:59 +01001317 is_mgmt = !!(rxd->attention.flags &
1318 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1319
Michal Kazior581c25f2014-11-18 09:24:48 +02001320 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1321 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1322
1323 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1324 * decapped header. It'll be used for undecapping of each MSDU.
1325 */
1326 hdr = (void *)rxd->rx_hdr_status;
1327 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1328 memcpy(first_hdr, hdr, hdr_len);
1329
1330 /* Each A-MSDU subframe will use the original header as the base and be
1331 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1332 */
1333 hdr = (void *)first_hdr;
1334 qos = ieee80211_get_qos_ctl(hdr);
1335 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1336
1337 /* Some attention flags are valid only in the last MSDU. */
1338 last = skb_peek_tail(amsdu);
1339 rxd = (void *)last->data - sizeof(*rxd);
1340 attention = __le32_to_cpu(rxd->attention.flags);
1341
1342 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1343 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1344 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1345 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1346
1347 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1348 * e.g. due to fcs error, missing peer or invalid key data it will
1349 * report the frame as raw.
1350 */
1351 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1352 !has_fcs_err &&
1353 !has_crypto_err &&
1354 !has_peer_idx_invalid);
1355
1356 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1357 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1358 RX_FLAG_MMIC_ERROR |
1359 RX_FLAG_DECRYPTED |
1360 RX_FLAG_IV_STRIPPED |
Grzegorz Bajorski60549ca2015-11-30 13:56:59 +01001361 RX_FLAG_ONLY_MONITOR |
Michal Kazior581c25f2014-11-18 09:24:48 +02001362 RX_FLAG_MMIC_STRIPPED);
1363
1364 if (has_fcs_err)
1365 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1366
1367 if (has_tkip_err)
1368 status->flag |= RX_FLAG_MMIC_ERROR;
1369
Grzegorz Bajorski60549ca2015-11-30 13:56:59 +01001370 /* Firmware reports all necessary management frames via WMI already.
1371 * They are not reported to monitor interfaces at all so pass the ones
1372 * coming via HTT to monitor interfaces instead. This simplifies
1373 * matters a lot.
1374 */
1375 if (is_mgmt)
1376 status->flag |= RX_FLAG_ONLY_MONITOR;
1377
1378 if (is_decrypted) {
1379 status->flag |= RX_FLAG_DECRYPTED;
1380
1381 if (likely(!is_mgmt))
1382 status->flag |= RX_FLAG_IV_STRIPPED |
1383 RX_FLAG_MMIC_STRIPPED;
1384}
Michal Kazior581c25f2014-11-18 09:24:48 +02001385
1386 skb_queue_walk(amsdu, msdu) {
1387 ath10k_htt_rx_h_csum_offload(msdu);
1388 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1389 is_decrypted);
1390
1391 /* Undecapping involves copying the original 802.11 header back
1392 * to sk_buff. If frame is protected and hardware has decrypted
1393 * it then remove the protected bit.
1394 */
1395 if (!is_decrypted)
1396 continue;
Grzegorz Bajorski60549ca2015-11-30 13:56:59 +01001397 if (is_mgmt)
1398 continue;
Michal Kazior581c25f2014-11-18 09:24:48 +02001399
1400 hdr = (void *)msdu->data;
1401 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1402 }
1403}
1404
1405static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1406 struct sk_buff_head *amsdu,
1407 struct ieee80211_rx_status *status)
1408{
1409 struct sk_buff *msdu;
1410
1411 while ((msdu = __skb_dequeue(amsdu))) {
1412 /* Setup per-MSDU flags */
1413 if (skb_queue_empty(amsdu))
1414 status->flag &= ~RX_FLAG_AMSDU_MORE;
1415 else
1416 status->flag |= RX_FLAG_AMSDU_MORE;
1417
1418 ath10k_process_rx(ar, status, msdu);
1419 }
1420}
1421
Michal Kazior9aa505d2014-11-18 09:24:47 +02001422static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
Ben Greearbfa35362014-03-03 14:07:09 -08001423{
Michal Kazior9aa505d2014-11-18 09:24:47 +02001424 struct sk_buff *skb, *first;
Ben Greearbfa35362014-03-03 14:07:09 -08001425 int space;
1426 int total_len = 0;
1427
1428 /* TODO: Might could optimize this by using
1429 * skb_try_coalesce or similar method to
1430 * decrease copying, or maybe get mac80211 to
1431 * provide a way to just receive a list of
1432 * skb?
1433 */
1434
Michal Kazior9aa505d2014-11-18 09:24:47 +02001435 first = __skb_dequeue(amsdu);
Ben Greearbfa35362014-03-03 14:07:09 -08001436
1437 /* Allocate total length all at once. */
Michal Kazior9aa505d2014-11-18 09:24:47 +02001438 skb_queue_walk(amsdu, skb)
1439 total_len += skb->len;
Ben Greearbfa35362014-03-03 14:07:09 -08001440
Michal Kazior9aa505d2014-11-18 09:24:47 +02001441 space = total_len - skb_tailroom(first);
Ben Greearbfa35362014-03-03 14:07:09 -08001442 if ((space > 0) &&
Michal Kazior9aa505d2014-11-18 09:24:47 +02001443 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
Ben Greearbfa35362014-03-03 14:07:09 -08001444 /* TODO: bump some rx-oom error stat */
1445 /* put it back together so we can free the
1446 * whole list at once.
1447 */
Michal Kazior9aa505d2014-11-18 09:24:47 +02001448 __skb_queue_head(amsdu, first);
Ben Greearbfa35362014-03-03 14:07:09 -08001449 return -1;
1450 }
1451
1452 /* Walk list again, copying contents into
1453 * msdu_head
1454 */
Michal Kazior9aa505d2014-11-18 09:24:47 +02001455 while ((skb = __skb_dequeue(amsdu))) {
1456 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1457 skb->len);
1458 dev_kfree_skb_any(skb);
Ben Greearbfa35362014-03-03 14:07:09 -08001459 }
1460
Michal Kazior9aa505d2014-11-18 09:24:47 +02001461 __skb_queue_head(amsdu, first);
Ben Greearbfa35362014-03-03 14:07:09 -08001462 return 0;
1463}
1464
Michal Kazior581c25f2014-11-18 09:24:48 +02001465static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1466 struct sk_buff_head *amsdu,
1467 bool chained)
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001468{
Michal Kazior581c25f2014-11-18 09:24:48 +02001469 struct sk_buff *first;
1470 struct htt_rx_desc *rxd;
1471 enum rx_msdu_decap_format decap;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001472
Michal Kazior581c25f2014-11-18 09:24:48 +02001473 first = skb_peek(amsdu);
1474 rxd = (void *)first->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001475 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
Michal Kazior581c25f2014-11-18 09:24:48 +02001476 RX_MSDU_START_INFO1_DECAP_FORMAT);
1477
1478 if (!chained)
1479 return;
1480
1481 /* FIXME: Current unchaining logic can only handle simple case of raw
1482 * msdu chaining. If decapping is other than raw the chaining may be
1483 * more complex and this isn't handled by the current code. Don't even
1484 * try re-constructing such frames - it'll be pretty much garbage.
1485 */
1486 if (decap != RX_MSDU_DECAP_RAW ||
1487 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1488 __skb_queue_purge(amsdu);
1489 return;
1490 }
1491
1492 ath10k_unchain_msdu(amsdu);
1493}
1494
1495static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1496 struct sk_buff_head *amsdu,
1497 struct ieee80211_rx_status *rx_status)
1498{
Michal Kazior581c25f2014-11-18 09:24:48 +02001499 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1500 * invalid/dangerous frames.
1501 */
1502
1503 if (!rx_status->freq) {
1504 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001505 return false;
1506 }
1507
Michal Kazior581c25f2014-11-18 09:24:48 +02001508 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1509 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001510 return false;
1511 }
1512
1513 return true;
1514}
1515
Michal Kazior581c25f2014-11-18 09:24:48 +02001516static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1517 struct sk_buff_head *amsdu,
1518 struct ieee80211_rx_status *rx_status)
1519{
1520 if (skb_queue_empty(amsdu))
1521 return;
1522
1523 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1524 return;
1525
1526 __skb_queue_purge(amsdu);
1527}
1528
Rajkumar Manoharan18235662016-03-22 17:22:14 +05301529static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001530{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001531 struct ath10k *ar = htt->ar;
Ashok Raj Nagarajan237e15d2016-08-19 13:37:37 +03001532 struct ieee80211_rx_status *rx_status = &htt->rx_status;
Michal Kazior9aa505d2014-11-18 09:24:47 +02001533 struct sk_buff_head amsdu;
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001534 int ret, num_msdus;
Rajkumar Manoharan18235662016-03-22 17:22:14 +05301535
1536 __skb_queue_head_init(&amsdu);
1537
1538 spin_lock_bh(&htt->rx_ring.lock);
1539 if (htt->rx_confused) {
1540 spin_unlock_bh(&htt->rx_ring.lock);
1541 return -EIO;
1542 }
1543 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1544 spin_unlock_bh(&htt->rx_ring.lock);
1545
1546 if (ret < 0) {
1547 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1548 __skb_queue_purge(&amsdu);
1549 /* FIXME: It's probably a good idea to reboot the
1550 * device instead of leaving it inoperable.
1551 */
1552 htt->rx_confused = true;
1553 return ret;
1554 }
1555
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001556 num_msdus = skb_queue_len(&amsdu);
Ashok Raj Nagarajan237e15d2016-08-19 13:37:37 +03001557 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
Rajkumar Manoharan18235662016-03-22 17:22:14 +05301558 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
Ashok Raj Nagarajan237e15d2016-08-19 13:37:37 +03001559 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1560 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1561 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
Rajkumar Manoharan18235662016-03-22 17:22:14 +05301562
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001563 return num_msdus;
Rajkumar Manoharan18235662016-03-22 17:22:14 +05301564}
1565
Rajkumar Manoharan3128b3d2016-03-22 17:22:15 +05301566static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1567 struct htt_rx_indication *rx)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001568{
1569 struct ath10k *ar = htt->ar;
Janusz Dziedzic6df92a32014-03-24 21:24:57 +01001570 struct htt_rx_indication_mpdu_range *mpdu_ranges;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001571 int num_mpdu_ranges;
Rajkumar Manoharan18235662016-03-22 17:22:14 +05301572 int i, mpdu_count = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001573
1574 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1575 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1576 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1577
Michal Kazior7aa7a722014-08-25 12:09:38 +02001578 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001579 rx, sizeof(*rx) +
1580 (sizeof(struct htt_rx_indication_mpdu_range) *
1581 num_mpdu_ranges));
1582
Michal Kaziord5406902014-11-18 09:24:47 +02001583 for (i = 0; i < num_mpdu_ranges; i++)
1584 mpdu_count += mpdu_ranges[i].mpdu_count;
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001585
Rajkumar Manoharan3128b3d2016-03-22 17:22:15 +05301586 atomic_add(mpdu_count, &htt->num_mpdus_ready);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001587}
1588
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05301589static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
Michal Kazior6c5151a2014-02-27 18:50:04 +02001590 struct sk_buff *skb)
1591{
1592 struct ath10k_htt *htt = &ar->htt;
1593 struct htt_resp *resp = (struct htt_resp *)skb->data;
1594 struct htt_tx_done tx_done = {};
1595 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1596 __le16 msdu_id;
1597 int i;
1598
1599 switch (status) {
1600 case HTT_DATA_TX_STATUS_NO_ACK:
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05301601 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
Michal Kazior6c5151a2014-02-27 18:50:04 +02001602 break;
1603 case HTT_DATA_TX_STATUS_OK:
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05301604 tx_done.status = HTT_TX_COMPL_STATE_ACK;
Michal Kazior6c5151a2014-02-27 18:50:04 +02001605 break;
1606 case HTT_DATA_TX_STATUS_DISCARD:
1607 case HTT_DATA_TX_STATUS_POSTPONE:
1608 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05301609 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
Michal Kazior6c5151a2014-02-27 18:50:04 +02001610 break;
1611 default:
Michal Kazior7aa7a722014-08-25 12:09:38 +02001612 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05301613 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
Michal Kazior6c5151a2014-02-27 18:50:04 +02001614 break;
1615 }
1616
Michal Kazior7aa7a722014-08-25 12:09:38 +02001617 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
Michal Kazior6c5151a2014-02-27 18:50:04 +02001618 resp->data_tx_completion.num_msdus);
1619
1620 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1621 msdu_id = resp->data_tx_completion.msdus[i];
1622 tx_done.msdu_id = __le16_to_cpu(msdu_id);
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05301623
1624 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1625 * interrupt and main interrupt (MSI/-X range case) for the same
1626 * HTC service so it should be safe to use kfifo_put w/o lock.
1627 *
1628 * From kfifo_put() documentation:
1629 * Note that with only one concurrent reader and one concurrent
1630 * writer, you don't need extra locking to use these macro.
1631 */
1632 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1633 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1634 tx_done.msdu_id, tx_done.status);
1635 ath10k_txrx_tx_unref(htt, &tx_done);
1636 }
Michal Kazior6c5151a2014-02-27 18:50:04 +02001637 }
1638}
1639
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001640static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1641{
1642 struct htt_rx_addba *ev = &resp->rx_addba;
1643 struct ath10k_peer *peer;
1644 struct ath10k_vif *arvif;
1645 u16 info0, tid, peer_id;
1646
1647 info0 = __le16_to_cpu(ev->info0);
1648 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1649 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1650
Michal Kazior7aa7a722014-08-25 12:09:38 +02001651 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001652 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1653 tid, peer_id, ev->window_size);
1654
1655 spin_lock_bh(&ar->data_lock);
1656 peer = ath10k_peer_find_by_id(ar, peer_id);
1657 if (!peer) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001658 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001659 peer_id);
1660 spin_unlock_bh(&ar->data_lock);
1661 return;
1662 }
1663
1664 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1665 if (!arvif) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001666 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001667 peer->vdev_id);
1668 spin_unlock_bh(&ar->data_lock);
1669 return;
1670 }
1671
Michal Kazior7aa7a722014-08-25 12:09:38 +02001672 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001673 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1674 peer->addr, tid, ev->window_size);
1675
1676 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1677 spin_unlock_bh(&ar->data_lock);
1678}
1679
1680static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1681{
1682 struct htt_rx_delba *ev = &resp->rx_delba;
1683 struct ath10k_peer *peer;
1684 struct ath10k_vif *arvif;
1685 u16 info0, tid, peer_id;
1686
1687 info0 = __le16_to_cpu(ev->info0);
1688 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1689 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1690
Michal Kazior7aa7a722014-08-25 12:09:38 +02001691 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001692 "htt rx delba tid %hu peer_id %hu\n",
1693 tid, peer_id);
1694
1695 spin_lock_bh(&ar->data_lock);
1696 peer = ath10k_peer_find_by_id(ar, peer_id);
1697 if (!peer) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001698 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001699 peer_id);
1700 spin_unlock_bh(&ar->data_lock);
1701 return;
1702 }
1703
1704 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1705 if (!arvif) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001706 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001707 peer->vdev_id);
1708 spin_unlock_bh(&ar->data_lock);
1709 return;
1710 }
1711
Michal Kazior7aa7a722014-08-25 12:09:38 +02001712 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001713 "htt rx stop rx ba session sta %pM tid %hu\n",
1714 peer->addr, tid);
1715
1716 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1717 spin_unlock_bh(&ar->data_lock);
1718}
1719
Michal Kaziorc5450702015-01-24 12:14:48 +02001720static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1721 struct sk_buff_head *amsdu)
1722{
1723 struct sk_buff *msdu;
1724 struct htt_rx_desc *rxd;
1725
1726 if (skb_queue_empty(list))
1727 return -ENOBUFS;
1728
1729 if (WARN_ON(!skb_queue_empty(amsdu)))
1730 return -EINVAL;
1731
1732 while ((msdu = __skb_dequeue(list))) {
1733 __skb_queue_tail(amsdu, msdu);
1734
1735 rxd = (void *)msdu->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001736 if (rxd->msdu_end.common.info0 &
Michal Kaziorc5450702015-01-24 12:14:48 +02001737 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1738 break;
1739 }
1740
1741 msdu = skb_peek_tail(amsdu);
1742 rxd = (void *)msdu->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001743 if (!(rxd->msdu_end.common.info0 &
Michal Kaziorc5450702015-01-24 12:14:48 +02001744 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1745 skb_queue_splice_init(amsdu, list);
1746 return -EAGAIN;
1747 }
1748
1749 return 0;
1750}
1751
1752static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1753 struct sk_buff *skb)
1754{
1755 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1756
1757 if (!ieee80211_has_protected(hdr->frame_control))
1758 return;
1759
1760 /* Offloaded frames are already decrypted but firmware insists they are
1761 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1762 * will drop the frame.
1763 */
1764
1765 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1766 status->flag |= RX_FLAG_DECRYPTED |
1767 RX_FLAG_IV_STRIPPED |
1768 RX_FLAG_MMIC_STRIPPED;
1769}
1770
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001771static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1772 struct sk_buff_head *list)
Michal Kaziorc5450702015-01-24 12:14:48 +02001773{
1774 struct ath10k_htt *htt = &ar->htt;
1775 struct ieee80211_rx_status *status = &htt->rx_status;
1776 struct htt_rx_offload_msdu *rx;
1777 struct sk_buff *msdu;
1778 size_t offset;
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001779 int num_msdu = 0;
Michal Kaziorc5450702015-01-24 12:14:48 +02001780
1781 while ((msdu = __skb_dequeue(list))) {
1782 /* Offloaded frames don't have Rx descriptor. Instead they have
1783 * a short meta information header.
1784 */
1785
1786 rx = (void *)msdu->data;
1787
1788 skb_put(msdu, sizeof(*rx));
1789 skb_pull(msdu, sizeof(*rx));
1790
1791 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1792 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1793 dev_kfree_skb_any(msdu);
1794 continue;
1795 }
1796
1797 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1798
1799 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1800 * actual payload is unaligned. Align the frame. Otherwise
1801 * mac80211 complains. This shouldn't reduce performance much
1802 * because these offloaded frames are rare.
1803 */
1804 offset = 4 - ((unsigned long)msdu->data & 3);
1805 skb_put(msdu, offset);
1806 memmove(msdu->data + offset, msdu->data, msdu->len);
1807 skb_pull(msdu, offset);
1808
1809 /* FIXME: The frame is NWifi. Re-construct QoS Control
1810 * if possible later.
1811 */
1812
1813 memset(status, 0, sizeof(*status));
1814 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1815
1816 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
Michal Kazior500ff9f2015-03-31 10:26:21 +00001817 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
Michal Kaziorc5450702015-01-24 12:14:48 +02001818 ath10k_process_rx(ar, status, msdu);
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001819 num_msdu++;
Michal Kaziorc5450702015-01-24 12:14:48 +02001820 }
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001821 return num_msdu;
Michal Kaziorc5450702015-01-24 12:14:48 +02001822}
1823
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001824static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
Michal Kaziorc5450702015-01-24 12:14:48 +02001825{
1826 struct ath10k_htt *htt = &ar->htt;
1827 struct htt_resp *resp = (void *)skb->data;
1828 struct ieee80211_rx_status *status = &htt->rx_status;
1829 struct sk_buff_head list;
1830 struct sk_buff_head amsdu;
1831 u16 peer_id;
1832 u16 msdu_count;
1833 u8 vdev_id;
1834 u8 tid;
1835 bool offload;
1836 bool frag;
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001837 int ret, num_msdus = 0;
Michal Kaziorc5450702015-01-24 12:14:48 +02001838
1839 lockdep_assert_held(&htt->rx_ring.lock);
1840
1841 if (htt->rx_confused)
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001842 return -EIO;
Michal Kaziorc5450702015-01-24 12:14:48 +02001843
1844 skb_pull(skb, sizeof(resp->hdr));
1845 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1846
1847 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1848 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1849 vdev_id = resp->rx_in_ord_ind.vdev_id;
1850 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1851 offload = !!(resp->rx_in_ord_ind.info &
1852 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1853 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1854
1855 ath10k_dbg(ar, ATH10K_DBG_HTT,
1856 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1857 vdev_id, peer_id, tid, offload, frag, msdu_count);
1858
1859 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1860 ath10k_warn(ar, "dropping invalid in order rx indication\n");
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001861 return -EINVAL;
Michal Kaziorc5450702015-01-24 12:14:48 +02001862 }
1863
1864 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1865 * extracted and processed.
1866 */
1867 __skb_queue_head_init(&list);
1868 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1869 if (ret < 0) {
1870 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1871 htt->rx_confused = true;
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001872 return -EIO;
Michal Kaziorc5450702015-01-24 12:14:48 +02001873 }
1874
1875 /* Offloaded frames are very different and need to be handled
1876 * separately.
1877 */
1878 if (offload)
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001879 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
Michal Kaziorc5450702015-01-24 12:14:48 +02001880
1881 while (!skb_queue_empty(&list)) {
1882 __skb_queue_head_init(&amsdu);
1883 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1884 switch (ret) {
1885 case 0:
1886 /* Note: The in-order indication may report interleaved
1887 * frames from different PPDUs meaning reported rx rate
1888 * to mac80211 isn't accurate/reliable. It's still
1889 * better to report something than nothing though. This
1890 * should still give an idea about rx rate to the user.
1891 */
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001892 num_msdus += skb_queue_len(&amsdu);
Michal Kazior500ff9f2015-03-31 10:26:21 +00001893 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
Michal Kaziorc5450702015-01-24 12:14:48 +02001894 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1895 ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1896 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1897 break;
1898 case -EAGAIN:
1899 /* fall through */
1900 default:
1901 /* Should not happen. */
1902 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1903 htt->rx_confused = true;
1904 __skb_queue_purge(&list);
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001905 return -EIO;
Michal Kaziorc5450702015-01-24 12:14:48 +02001906 }
1907 }
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03001908 return num_msdus;
Michal Kaziorc5450702015-01-24 12:14:48 +02001909}
1910
Michal Kazior839ae632016-03-06 16:14:32 +02001911static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
1912 const __le32 *resp_ids,
1913 int num_resp_ids)
1914{
1915 int i;
1916 u32 resp_id;
1917
1918 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
1919 num_resp_ids);
1920
1921 for (i = 0; i < num_resp_ids; i++) {
1922 resp_id = le32_to_cpu(resp_ids[i]);
1923
1924 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
1925 resp_id);
1926
1927 /* TODO: free resp_id */
1928 }
1929}
1930
1931static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
1932{
Michal Kazior426e10e2016-03-06 16:14:43 +02001933 struct ieee80211_hw *hw = ar->hw;
1934 struct ieee80211_txq *txq;
Michal Kazior839ae632016-03-06 16:14:32 +02001935 struct htt_resp *resp = (struct htt_resp *)skb->data;
1936 struct htt_tx_fetch_record *record;
1937 size_t len;
1938 size_t max_num_bytes;
1939 size_t max_num_msdus;
Michal Kazior426e10e2016-03-06 16:14:43 +02001940 size_t num_bytes;
1941 size_t num_msdus;
Michal Kazior839ae632016-03-06 16:14:32 +02001942 const __le32 *resp_ids;
1943 u16 num_records;
1944 u16 num_resp_ids;
1945 u16 peer_id;
1946 u8 tid;
Michal Kazior426e10e2016-03-06 16:14:43 +02001947 int ret;
Michal Kazior839ae632016-03-06 16:14:32 +02001948 int i;
1949
1950 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
1951
1952 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
1953 if (unlikely(skb->len < len)) {
1954 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
1955 return;
1956 }
1957
1958 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
1959 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
1960
1961 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
1962 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
1963
1964 if (unlikely(skb->len < len)) {
1965 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
1966 return;
1967 }
1968
1969 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
1970 num_records, num_resp_ids,
1971 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
1972
Michal Kazior426e10e2016-03-06 16:14:43 +02001973 if (!ar->htt.tx_q_state.enabled) {
1974 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
1975 return;
1976 }
1977
1978 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
1979 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
1980 return;
1981 }
1982
1983 rcu_read_lock();
Michal Kazior839ae632016-03-06 16:14:32 +02001984
1985 for (i = 0; i < num_records; i++) {
1986 record = &resp->tx_fetch_ind.records[i];
1987 peer_id = MS(le16_to_cpu(record->info),
1988 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
1989 tid = MS(le16_to_cpu(record->info),
1990 HTT_TX_FETCH_RECORD_INFO_TID);
1991 max_num_msdus = le16_to_cpu(record->num_msdus);
1992 max_num_bytes = le32_to_cpu(record->num_bytes);
1993
1994 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
1995 i, peer_id, tid, max_num_msdus, max_num_bytes);
1996
1997 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
1998 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
1999 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2000 peer_id, tid);
2001 continue;
2002 }
2003
Michal Kazior426e10e2016-03-06 16:14:43 +02002004 spin_lock_bh(&ar->data_lock);
2005 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2006 spin_unlock_bh(&ar->data_lock);
2007
2008 /* It is okay to release the lock and use txq because RCU read
2009 * lock is held.
2010 */
2011
2012 if (unlikely(!txq)) {
2013 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2014 peer_id, tid);
2015 continue;
2016 }
2017
2018 num_msdus = 0;
2019 num_bytes = 0;
2020
2021 while (num_msdus < max_num_msdus &&
2022 num_bytes < max_num_bytes) {
2023 ret = ath10k_mac_tx_push_txq(hw, txq);
2024 if (ret < 0)
2025 break;
2026
2027 num_msdus++;
2028 num_bytes += ret;
2029 }
2030
2031 record->num_msdus = cpu_to_le16(num_msdus);
2032 record->num_bytes = cpu_to_le32(num_bytes);
2033
2034 ath10k_htt_tx_txq_recalc(hw, txq);
Michal Kazior839ae632016-03-06 16:14:32 +02002035 }
2036
Michal Kazior426e10e2016-03-06 16:14:43 +02002037 rcu_read_unlock();
2038
Michal Kazior839ae632016-03-06 16:14:32 +02002039 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2040 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2041
Michal Kazior426e10e2016-03-06 16:14:43 +02002042 ret = ath10k_htt_tx_fetch_resp(ar,
2043 resp->tx_fetch_ind.token,
2044 resp->tx_fetch_ind.fetch_seq_num,
2045 resp->tx_fetch_ind.records,
2046 num_records);
2047 if (unlikely(ret)) {
2048 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2049 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2050 /* FIXME: request fw restart */
2051 }
2052
2053 ath10k_htt_tx_txq_sync(ar);
Michal Kazior839ae632016-03-06 16:14:32 +02002054}
2055
2056static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2057 struct sk_buff *skb)
2058{
2059 const struct htt_resp *resp = (void *)skb->data;
2060 size_t len;
2061 int num_resp_ids;
2062
2063 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2064
2065 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2066 if (unlikely(skb->len < len)) {
2067 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2068 return;
2069 }
2070
2071 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2072 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2073
2074 if (unlikely(skb->len < len)) {
2075 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2076 return;
2077 }
2078
2079 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2080 resp->tx_fetch_confirm.resp_ids,
2081 num_resp_ids);
2082}
2083
2084static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2085 struct sk_buff *skb)
2086{
2087 const struct htt_resp *resp = (void *)skb->data;
2088 const struct htt_tx_mode_switch_record *record;
Michal Kazior426e10e2016-03-06 16:14:43 +02002089 struct ieee80211_txq *txq;
2090 struct ath10k_txq *artxq;
Michal Kazior839ae632016-03-06 16:14:32 +02002091 size_t len;
2092 size_t num_records;
2093 enum htt_tx_mode_switch_mode mode;
2094 bool enable;
2095 u16 info0;
2096 u16 info1;
2097 u16 threshold;
2098 u16 peer_id;
2099 u8 tid;
2100 int i;
2101
2102 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2103
2104 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2105 if (unlikely(skb->len < len)) {
2106 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2107 return;
2108 }
2109
2110 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2111 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2112
2113 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2114 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2115 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2116 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2117
2118 ath10k_dbg(ar, ATH10K_DBG_HTT,
2119 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2120 info0, info1, enable, num_records, mode, threshold);
2121
2122 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2123
2124 if (unlikely(skb->len < len)) {
2125 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2126 return;
2127 }
2128
2129 switch (mode) {
2130 case HTT_TX_MODE_SWITCH_PUSH:
2131 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2132 break;
2133 default:
2134 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2135 mode);
2136 return;
2137 }
2138
2139 if (!enable)
2140 return;
2141
Michal Kazior426e10e2016-03-06 16:14:43 +02002142 ar->htt.tx_q_state.enabled = enable;
2143 ar->htt.tx_q_state.mode = mode;
2144 ar->htt.tx_q_state.num_push_allowed = threshold;
2145
2146 rcu_read_lock();
Michal Kazior839ae632016-03-06 16:14:32 +02002147
2148 for (i = 0; i < num_records; i++) {
2149 record = &resp->tx_mode_switch_ind.records[i];
2150 info0 = le16_to_cpu(record->info0);
2151 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2152 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2153
2154 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2155 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2156 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2157 peer_id, tid);
2158 continue;
2159 }
2160
Michal Kazior426e10e2016-03-06 16:14:43 +02002161 spin_lock_bh(&ar->data_lock);
2162 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2163 spin_unlock_bh(&ar->data_lock);
2164
2165 /* It is okay to release the lock and use txq because RCU read
2166 * lock is held.
2167 */
2168
2169 if (unlikely(!txq)) {
2170 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2171 peer_id, tid);
2172 continue;
2173 }
2174
2175 spin_lock_bh(&ar->htt.tx_lock);
2176 artxq = (void *)txq->drv_priv;
2177 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2178 spin_unlock_bh(&ar->htt.tx_lock);
Michal Kazior839ae632016-03-06 16:14:32 +02002179 }
2180
Michal Kazior426e10e2016-03-06 16:14:43 +02002181 rcu_read_unlock();
2182
2183 ath10k_mac_tx_push_pending(ar);
Michal Kazior839ae632016-03-06 16:14:32 +02002184}
2185
Rajkumar Manoharane3a91f82016-03-22 17:22:16 +05302186void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2187{
2188 bool release;
2189
2190 release = ath10k_htt_t2h_msg_handler(ar, skb);
2191
2192 /* Free the indication buffer */
2193 if (release)
2194 dev_kfree_skb_any(skb);
2195}
2196
2197bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002198{
Michal Kazioredb82362013-07-05 16:15:14 +03002199 struct ath10k_htt *htt = &ar->htt;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002200 struct htt_resp *resp = (struct htt_resp *)skb->data;
Rajkumar Manoharan8348db22015-03-25 13:12:27 +02002201 enum htt_t2h_msg_type type;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002202
2203 /* confirm alignment */
2204 if (!IS_ALIGNED((unsigned long)skb->data, 4))
Michal Kazior7aa7a722014-08-25 12:09:38 +02002205 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002206
Michal Kazior7aa7a722014-08-25 12:09:38 +02002207 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002208 resp->hdr.msg_type);
Rajkumar Manoharan8348db22015-03-25 13:12:27 +02002209
2210 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2211 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2212 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
Rajkumar Manoharane3a91f82016-03-22 17:22:16 +05302213 return true;
Rajkumar Manoharan8348db22015-03-25 13:12:27 +02002214 }
2215 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2216
2217 switch (type) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002218 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2219 htt->target_version_major = resp->ver_resp.major;
2220 htt->target_version_minor = resp->ver_resp.minor;
2221 complete(&htt->target_version_received);
2222 break;
2223 }
Michal Kazior6c5151a2014-02-27 18:50:04 +02002224 case HTT_T2H_MSG_TYPE_RX_IND:
Rajkumar Manoharan3128b3d2016-03-22 17:22:15 +05302225 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2226 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002227 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2228 struct htt_peer_map_event ev = {
2229 .vdev_id = resp->peer_map.vdev_id,
2230 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2231 };
2232 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2233 ath10k_peer_map_event(htt, &ev);
2234 break;
2235 }
2236 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2237 struct htt_peer_unmap_event ev = {
2238 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2239 };
2240 ath10k_peer_unmap_event(htt, &ev);
2241 break;
2242 }
2243 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2244 struct htt_tx_done tx_done = {};
2245 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2246
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05302247 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002248
2249 switch (status) {
2250 case HTT_MGMT_TX_STATUS_OK:
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05302251 tx_done.status = HTT_TX_COMPL_STATE_ACK;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002252 break;
2253 case HTT_MGMT_TX_STATUS_RETRY:
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05302254 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002255 break;
2256 case HTT_MGMT_TX_STATUS_DROP:
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05302257 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002258 break;
2259 }
2260
Rajkumar Manoharancac08552016-03-09 20:25:46 +05302261 status = ath10k_txrx_tx_unref(htt, &tx_done);
2262 if (!status) {
2263 spin_lock_bh(&htt->tx_lock);
2264 ath10k_htt_tx_mgmt_dec_pending(htt);
2265 spin_unlock_bh(&htt->tx_lock);
2266 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002267 break;
2268 }
Michal Kazior6c5151a2014-02-27 18:50:04 +02002269 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05302270 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05302271 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002272 case HTT_T2H_MSG_TYPE_SEC_IND: {
2273 struct ath10k *ar = htt->ar;
2274 struct htt_security_indication *ev = &resp->security_indication;
2275
Michal Kazior7aa7a722014-08-25 12:09:38 +02002276 ath10k_dbg(ar, ATH10K_DBG_HTT,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002277 "sec ind peer_id %d unicast %d type %d\n",
2278 __le16_to_cpu(ev->peer_id),
2279 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2280 MS(ev->flags, HTT_SECURITY_TYPE));
2281 complete(&ar->install_key_done);
2282 break;
2283 }
2284 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002285 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002286 skb->data, skb->len);
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002287 atomic_inc(&htt->num_mpdus_ready);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002288 break;
2289 }
2290 case HTT_T2H_MSG_TYPE_TEST:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002291 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002292 case HTT_T2H_MSG_TYPE_STATS_CONF:
Michal Kaziord35a6c12014-09-02 11:00:21 +03002293 trace_ath10k_htt_stats(ar, skb->data, skb->len);
Kalle Valoa9bf0502013-09-03 11:43:55 +03002294 break;
2295 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
Michal Kazior708b9bd2014-07-21 20:52:59 +03002296 /* Firmware can return tx frames if it's unable to fully
2297 * process them and suspects host may be able to fix it. ath10k
2298 * sends all tx frames as already inspected so this shouldn't
2299 * happen unless fw has a bug.
2300 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02002301 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
Michal Kazior708b9bd2014-07-21 20:52:59 +03002302 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002303 case HTT_T2H_MSG_TYPE_RX_ADDBA:
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02002304 ath10k_htt_rx_addba(ar, resp);
2305 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002306 case HTT_T2H_MSG_TYPE_RX_DELBA:
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02002307 ath10k_htt_rx_delba(ar, resp);
2308 break;
Rajkumar Manoharanbfdd7932014-10-03 08:02:40 +03002309 case HTT_T2H_MSG_TYPE_PKTLOG: {
Rajkumar Manoharanbfdd7932014-10-03 08:02:40 +03002310 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
Ashok Raj Nagarajan34293f72016-06-30 15:23:55 +03002311 skb->len -
2312 offsetof(struct htt_resp,
2313 pktlog_msg.payload));
Rajkumar Manoharanbfdd7932014-10-03 08:02:40 +03002314 break;
2315 }
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02002316 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2317 /* Ignore this event because mac80211 takes care of Rx
2318 * aggregation reordering.
2319 */
2320 break;
2321 }
Michal Kaziorc5450702015-01-24 12:14:48 +02002322 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002323 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
Rajkumar Manoharane3a91f82016-03-22 17:22:16 +05302324 return false;
Michal Kaziorc5450702015-01-24 12:14:48 +02002325 }
2326 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
Rajkumar Manoharan8348db22015-03-25 13:12:27 +02002327 break;
Rajkumar Manoharan2ce9b252016-03-08 22:57:23 +05302328 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2329 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2330 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2331
2332 ar->tgt_oper_chan =
2333 __ieee80211_get_channel(ar->hw->wiphy, freq);
2334 ath10k_dbg(ar, ATH10K_DBG_HTT,
2335 "htt chan change freq %u phymode %s\n",
2336 freq, ath10k_wmi_phymode_str(phymode));
Michal Kaziorc5450702015-01-24 12:14:48 +02002337 break;
Rajkumar Manoharan2ce9b252016-03-08 22:57:23 +05302338 }
David Liuccec9032015-07-24 20:25:32 +03002339 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2340 break;
Rajkumar Manoharanb2fdbcc2016-03-22 17:22:12 +05302341 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2342 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2343
2344 if (!tx_fetch_ind) {
2345 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2346 break;
2347 }
2348 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
Rajkumar Manoharanb2fdbcc2016-03-22 17:22:12 +05302349 break;
2350 }
Michal Kaziordf94e702016-01-21 14:13:23 +01002351 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
Michal Kazior839ae632016-03-06 16:14:32 +02002352 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2353 break;
Michal Kaziordf94e702016-01-21 14:13:23 +01002354 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
Michal Kazior839ae632016-03-06 16:14:32 +02002355 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
Michal Kazior9b158732016-01-21 14:13:27 +01002356 break;
2357 case HTT_T2H_MSG_TYPE_EN_STATS:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002358 default:
Michal Kazior2358a542014-10-02 13:32:55 +02002359 ath10k_warn(ar, "htt event (%d) not handled\n",
2360 resp->hdr.msg_type);
Michal Kazior7aa7a722014-08-25 12:09:38 +02002361 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002362 skb->data, skb->len);
2363 break;
2364 };
Rajkumar Manoharane3a91f82016-03-22 17:22:16 +05302365 return true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002366}
Rajkumar Manoharan3f0f7ed2015-10-12 18:27:03 +05302367EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
Michal Kazior6c5151a2014-02-27 18:50:04 +02002368
Vivek Natarajanafb0bf72015-10-30 14:57:58 +05302369void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2370 struct sk_buff *skb)
2371{
Ashok Raj Nagarajan53a5c9b2016-02-05 21:12:48 +05302372 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
Vivek Natarajanafb0bf72015-10-30 14:57:58 +05302373 dev_kfree_skb_any(skb);
2374}
2375EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2376
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002377int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
Michal Kazior6c5151a2014-02-27 18:50:04 +02002378{
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002379 struct ath10k_htt *htt = &ar->htt;
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05302380 struct htt_tx_done tx_done = {};
Michal Kazior426e10e2016-03-06 16:14:43 +02002381 struct sk_buff_head tx_ind_q;
Michal Kazior6c5151a2014-02-27 18:50:04 +02002382 struct sk_buff *skb;
Michal Kaziord742c962016-01-13 14:52:52 +01002383 unsigned long flags;
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002384 int quota = 0, done, num_rx_msdus;
2385 bool resched_napi = false;
Michal Kazior6c5151a2014-02-27 18:50:04 +02002386
Michal Kazior426e10e2016-03-06 16:14:43 +02002387 __skb_queue_head_init(&tx_ind_q);
Rajkumar Manoharanda6416c2016-02-12 11:40:59 +05302388
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002389 /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
2390 * process it first to utilize full available quota.
2391 */
2392 while (quota < budget) {
2393 if (skb_queue_empty(&htt->rx_in_ord_compl_q))
2394 break;
Rajkumar Manoharanda6416c2016-02-12 11:40:59 +05302395
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002396 skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
2397 if (!skb) {
2398 resched_napi = true;
2399 goto exit;
2400 }
2401
2402 spin_lock_bh(&htt->rx_ring.lock);
2403 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
2404 spin_unlock_bh(&htt->rx_ring.lock);
2405 if (num_rx_msdus < 0) {
2406 resched_napi = true;
2407 goto exit;
2408 }
2409
2410 dev_kfree_skb_any(skb);
2411 if (num_rx_msdus > 0)
2412 quota += num_rx_msdus;
2413
2414 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2415 !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
2416 resched_napi = true;
2417 goto exit;
2418 }
2419 }
2420
2421 while (quota < budget) {
2422 /* no more data to receive */
2423 if (!atomic_read(&htt->num_mpdus_ready))
2424 break;
2425
2426 num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
2427 if (num_rx_msdus < 0) {
2428 resched_napi = true;
2429 goto exit;
2430 }
2431
2432 quota += num_rx_msdus;
2433 atomic_dec(&htt->num_mpdus_ready);
2434 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2435 atomic_read(&htt->num_mpdus_ready)) {
2436 resched_napi = true;
2437 goto exit;
2438 }
2439 }
2440
2441 /* From NAPI documentation:
2442 * The napi poll() function may also process TX completions, in which
2443 * case if it processes the entire TX ring then it should count that
2444 * work as the rest of the budget.
2445 */
2446 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
2447 quota = budget;
Michal Kazior426e10e2016-03-06 16:14:43 +02002448
Rajkumar Manoharan59465fe2016-03-22 17:22:11 +05302449 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2450 * From kfifo_get() documentation:
2451 * Note that with only one concurrent reader and one concurrent writer,
2452 * you don't need extra locking to use these macro.
2453 */
2454 while (kfifo_get(&htt->txdone_fifo, &tx_done))
2455 ath10k_txrx_tx_unref(htt, &tx_done);
Michal Kazior6c5151a2014-02-27 18:50:04 +02002456
Rajkumar Manoharan18f53fe2016-09-02 19:46:10 +03002457 ath10k_mac_tx_push_pending(ar);
2458
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002459 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2460 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2461 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2462
Michal Kazior426e10e2016-03-06 16:14:43 +02002463 while ((skb = __skb_dequeue(&tx_ind_q))) {
2464 ath10k_htt_rx_tx_fetch_ind(ar, skb);
Michal Kazior6c5151a2014-02-27 18:50:04 +02002465 dev_kfree_skb_any(skb);
2466 }
2467
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002468exit:
Rajkumar Manoharan5c86d972016-03-22 17:22:19 +05302469 ath10k_htt_rx_msdu_buff_replenish(htt);
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002470 /* In case of rx failure or more data to read, report budget
2471 * to reschedule NAPI poll
2472 */
2473 done = resched_napi ? budget : quota;
2474
2475 return done;
Michal Kazior6c5151a2014-02-27 18:50:04 +02002476}
Rajkumar Manoharan3c97f5d2016-09-02 19:46:09 +03002477EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);