blob: 5e54c393767e2212ecf6463c2b4113f464bed7d2 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
Michal Kazioredb82362013-07-05 16:15:14 +030018#include "core.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030019#include "htc.h"
20#include "htt.h"
21#include "txrx.h"
22#include "debug.h"
Kalle Valoa9bf0502013-09-03 11:43:55 +030023#include "trace.h"
Michal Kazioraa5b4fb2014-07-23 12:20:33 +020024#include "mac.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030025
26#include <linux/log2.h>
27
Michal Kaziorc5450702015-01-24 12:14:48 +020028#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
Kalle Valo5e3dd152013-06-12 20:52:10 +030030
31/* when under memory pressure rx ring refill may fail and needs a retry */
32#define HTT_RX_RING_REFILL_RETRY_MS 50
33
Michal Kaziorf6dc2092013-09-26 10:12:22 +030034static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
Michal Kazior6c5151a2014-02-27 18:50:04 +020035static void ath10k_htt_txrx_compl_task(unsigned long ptr);
Michal Kaziorf6dc2092013-09-26 10:12:22 +030036
Michal Kaziorc5450702015-01-24 12:14:48 +020037static struct sk_buff *
38ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
39{
40 struct ath10k_skb_rxcb *rxcb;
41
42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43 if (rxcb->paddr == paddr)
44 return ATH10K_RXCB_SKB(rxcb);
45
46 WARN_ON_ONCE(1);
47 return NULL;
48}
49
Kalle Valo5e3dd152013-06-12 20:52:10 +030050static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
51{
52 struct sk_buff *skb;
Michal Kaziorc5450702015-01-24 12:14:48 +020053 struct ath10k_skb_rxcb *rxcb;
54 struct hlist_node *n;
Kalle Valo5e3dd152013-06-12 20:52:10 +030055 int i;
56
Michal Kaziorc5450702015-01-24 12:14:48 +020057 if (htt->rx_ring.in_ord_rx) {
58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
59 skb = ATH10K_RXCB_SKB(rxcb);
60 dma_unmap_single(htt->ar->dev, rxcb->paddr,
61 skb->len + skb_tailroom(skb),
62 DMA_FROM_DEVICE);
63 hash_del(&rxcb->hlist);
64 dev_kfree_skb_any(skb);
65 }
66 } else {
67 for (i = 0; i < htt->rx_ring.size; i++) {
68 skb = htt->rx_ring.netbufs_ring[i];
69 if (!skb)
70 continue;
71
72 rxcb = ATH10K_SKB_RXCB(skb);
73 dma_unmap_single(htt->ar->dev, rxcb->paddr,
74 skb->len + skb_tailroom(skb),
75 DMA_FROM_DEVICE);
76 dev_kfree_skb_any(skb);
77 }
Kalle Valo5e3dd152013-06-12 20:52:10 +030078 }
79
80 htt->rx_ring.fill_cnt = 0;
Michal Kaziorc5450702015-01-24 12:14:48 +020081 hash_init(htt->rx_ring.skb_table);
82 memset(htt->rx_ring.netbufs_ring, 0,
83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
Kalle Valo5e3dd152013-06-12 20:52:10 +030084}
85
86static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
87{
88 struct htt_rx_desc *rx_desc;
Michal Kaziorc5450702015-01-24 12:14:48 +020089 struct ath10k_skb_rxcb *rxcb;
Kalle Valo5e3dd152013-06-12 20:52:10 +030090 struct sk_buff *skb;
91 dma_addr_t paddr;
92 int ret = 0, idx;
93
Michal Kaziorc5450702015-01-24 12:14:48 +020094 /* The Full Rx Reorder firmware has no way of telling the host
95 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
96 * To keep things simple make sure ring is always half empty. This
97 * guarantees there'll be no replenishment overruns possible.
98 */
99 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
100
Kalle Valo8cc7f262014-09-14 12:50:39 +0300101 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300102 while (num > 0) {
103 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
104 if (!skb) {
105 ret = -ENOMEM;
106 goto fail;
107 }
108
109 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
110 skb_pull(skb,
111 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
112 skb->data);
113
114 /* Clear rx_desc attention word before posting to Rx ring */
115 rx_desc = (struct htt_rx_desc *)skb->data;
116 rx_desc->attention.flags = __cpu_to_le32(0);
117
118 paddr = dma_map_single(htt->ar->dev, skb->data,
119 skb->len + skb_tailroom(skb),
120 DMA_FROM_DEVICE);
121
122 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
123 dev_kfree_skb_any(skb);
124 ret = -ENOMEM;
125 goto fail;
126 }
127
Michal Kaziorc5450702015-01-24 12:14:48 +0200128 rxcb = ATH10K_SKB_RXCB(skb);
129 rxcb->paddr = paddr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300130 htt->rx_ring.netbufs_ring[idx] = skb;
131 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
132 htt->rx_ring.fill_cnt++;
133
Michal Kaziorc5450702015-01-24 12:14:48 +0200134 if (htt->rx_ring.in_ord_rx) {
135 hash_add(htt->rx_ring.skb_table,
136 &ATH10K_SKB_RXCB(skb)->hlist,
137 (u32)paddr);
138 }
139
Kalle Valo5e3dd152013-06-12 20:52:10 +0300140 num--;
141 idx++;
142 idx &= htt->rx_ring.size_mask;
143 }
144
145fail:
Vasanthakumar Thiagarajan5de6dfc2015-01-09 22:49:46 +0530146 /*
147 * Make sure the rx buffer is updated before available buffer
148 * index to avoid any potential rx ring corruption.
149 */
150 mb();
Kalle Valo8cc7f262014-09-14 12:50:39 +0300151 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300152 return ret;
153}
154
155static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
156{
157 lockdep_assert_held(&htt->rx_ring.lock);
158 return __ath10k_htt_rx_ring_fill_n(htt, num);
159}
160
161static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
162{
Michal Kazior6e712d42013-09-24 10:18:36 +0200163 int ret, num_deficit, num_to_fill;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300164
Michal Kazior6e712d42013-09-24 10:18:36 +0200165 /* Refilling the whole RX ring buffer proves to be a bad idea. The
166 * reason is RX may take up significant amount of CPU cycles and starve
167 * other tasks, e.g. TX on an ethernet device while acting as a bridge
168 * with ath10k wlan interface. This ended up with very poor performance
169 * once CPU the host system was overwhelmed with RX on ath10k.
170 *
171 * By limiting the number of refills the replenishing occurs
172 * progressively. This in turns makes use of the fact tasklets are
173 * processed in FIFO order. This means actual RX processing can starve
174 * out refilling. If there's not enough buffers on RX ring FW will not
175 * report RX until it is refilled with enough buffers. This
176 * automatically balances load wrt to CPU power.
177 *
178 * This probably comes at a cost of lower maximum throughput but
Ben Greear3eafdfd2015-02-15 16:50:39 +0200179 * improves the average and stability. */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300180 spin_lock_bh(&htt->rx_ring.lock);
Michal Kazior6e712d42013-09-24 10:18:36 +0200181 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
182 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
183 num_deficit -= num_to_fill;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300184 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
185 if (ret == -ENOMEM) {
186 /*
187 * Failed to fill it to the desired level -
188 * we'll start a timer and try again next time.
189 * As long as enough buffers are left in the ring for
190 * another A-MPDU rx, no special recovery is needed.
191 */
192 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
193 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
Michal Kazior6e712d42013-09-24 10:18:36 +0200194 } else if (num_deficit > 0) {
195 tasklet_schedule(&htt->rx_replenish_task);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300196 }
197 spin_unlock_bh(&htt->rx_ring.lock);
198}
199
200static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
201{
202 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
Kalle Valoaf762c02014-09-14 12:50:17 +0300203
Kalle Valo5e3dd152013-06-12 20:52:10 +0300204 ath10k_htt_rx_msdu_buff_replenish(htt);
205}
206
Michal Kaziorc5450702015-01-24 12:14:48 +0200207int ath10k_htt_rx_ring_refill(struct ath10k *ar)
Michal Kazior3e841fd2014-05-14 16:23:31 +0300208{
Michal Kaziorc5450702015-01-24 12:14:48 +0200209 struct ath10k_htt *htt = &ar->htt;
210 int ret;
Michal Kazior3e841fd2014-05-14 16:23:31 +0300211
Michal Kaziorc5450702015-01-24 12:14:48 +0200212 spin_lock_bh(&htt->rx_ring.lock);
213 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
214 htt->rx_ring.fill_cnt));
215 spin_unlock_bh(&htt->rx_ring.lock);
Michal Kazior3e841fd2014-05-14 16:23:31 +0300216
Michal Kaziorc5450702015-01-24 12:14:48 +0200217 if (ret)
218 ath10k_htt_rx_ring_free(htt);
219
220 return ret;
Michal Kazior3e841fd2014-05-14 16:23:31 +0300221}
222
Michal Kazior95bf21f2014-05-16 17:15:39 +0300223void ath10k_htt_rx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300224{
Kalle Valo5e3dd152013-06-12 20:52:10 +0300225 del_timer_sync(&htt->rx_ring.refill_retry_timer);
Michal Kazior6e712d42013-09-24 10:18:36 +0200226 tasklet_kill(&htt->rx_replenish_task);
Michal Kazior6c5151a2014-02-27 18:50:04 +0200227 tasklet_kill(&htt->txrx_compl_task);
228
229 skb_queue_purge(&htt->tx_compl_q);
230 skb_queue_purge(&htt->rx_compl_q);
Michal Kaziorc5450702015-01-24 12:14:48 +0200231 skb_queue_purge(&htt->rx_in_ord_compl_q);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300232
Michal Kaziorc5450702015-01-24 12:14:48 +0200233 ath10k_htt_rx_ring_free(htt);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300234
235 dma_free_coherent(htt->ar->dev,
236 (htt->rx_ring.size *
237 sizeof(htt->rx_ring.paddrs_ring)),
238 htt->rx_ring.paddrs_ring,
239 htt->rx_ring.base_paddr);
240
241 dma_free_coherent(htt->ar->dev,
242 sizeof(*htt->rx_ring.alloc_idx.vaddr),
243 htt->rx_ring.alloc_idx.vaddr,
244 htt->rx_ring.alloc_idx.paddr);
245
246 kfree(htt->rx_ring.netbufs_ring);
247}
248
249static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
250{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200251 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300252 int idx;
253 struct sk_buff *msdu;
254
Michal Kazior45967082014-02-27 18:50:05 +0200255 lockdep_assert_held(&htt->rx_ring.lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300256
Michal Kazior8d60ee82014-02-27 18:50:05 +0200257 if (htt->rx_ring.fill_cnt == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200258 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
Michal Kazior8d60ee82014-02-27 18:50:05 +0200259 return NULL;
260 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300261
262 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
263 msdu = htt->rx_ring.netbufs_ring[idx];
Michal Kazior3e841fd2014-05-14 16:23:31 +0300264 htt->rx_ring.netbufs_ring[idx] = NULL;
Michal Kaziorc5450702015-01-24 12:14:48 +0200265 htt->rx_ring.paddrs_ring[idx] = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300266
267 idx++;
268 idx &= htt->rx_ring.size_mask;
269 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
270 htt->rx_ring.fill_cnt--;
271
Michal Kazior4de02802014-10-23 17:04:23 +0300272 dma_unmap_single(htt->ar->dev,
Michal Kazior8582bf32015-01-24 12:14:47 +0200273 ATH10K_SKB_RXCB(msdu)->paddr,
Michal Kazior4de02802014-10-23 17:04:23 +0300274 msdu->len + skb_tailroom(msdu),
275 DMA_FROM_DEVICE);
276 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
277 msdu->data, msdu->len + skb_tailroom(msdu));
Michal Kazior4de02802014-10-23 17:04:23 +0300278
Kalle Valo5e3dd152013-06-12 20:52:10 +0300279 return msdu;
280}
281
Janusz Dziedzicd84dd602014-03-24 21:23:20 +0100282/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300283static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
284 u8 **fw_desc, int *fw_desc_len,
Michal Kaziorf0e27702014-11-18 09:24:49 +0200285 struct sk_buff_head *amsdu)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300286{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200287 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300288 int msdu_len, msdu_chaining = 0;
Michal Kazior9aa505d2014-11-18 09:24:47 +0200289 struct sk_buff *msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300290 struct htt_rx_desc *rx_desc;
291
Michal Kazior45967082014-02-27 18:50:05 +0200292 lockdep_assert_held(&htt->rx_ring.lock);
293
Michal Kazior9aa505d2014-11-18 09:24:47 +0200294 for (;;) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300295 int last_msdu, msdu_len_invalid, msdu_chained;
296
Michal Kazior9aa505d2014-11-18 09:24:47 +0200297 msdu = ath10k_htt_rx_netbuf_pop(htt);
298 if (!msdu) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200299 __skb_queue_purge(amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +0200300 return -ENOENT;
Michal Kazior9aa505d2014-11-18 09:24:47 +0200301 }
302
303 __skb_queue_tail(amsdu, msdu);
304
Kalle Valo5e3dd152013-06-12 20:52:10 +0300305 rx_desc = (struct htt_rx_desc *)msdu->data;
306
307 /* FIXME: we must report msdu payload since this is what caller
308 * expects now */
309 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
310 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
311
312 /*
313 * Sanity check - confirm the HW is finished filling in the
314 * rx data.
315 * If the HW and SW are working correctly, then it's guaranteed
316 * that the HW's MAC DMA is done before this point in the SW.
317 * To prevent the case that we handle a stale Rx descriptor,
318 * just assert for now until we have a way to recover.
319 */
320 if (!(__le32_to_cpu(rx_desc->attention.flags)
321 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200322 __skb_queue_purge(amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +0200323 return -EIO;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300324 }
325
326 /*
327 * Copy the FW rx descriptor for this MSDU from the rx
328 * indication message into the MSDU's netbuf. HL uses the
329 * same rx indication message definition as LL, and simply
330 * appends new info (fields from the HW rx desc, and the
331 * MSDU payload itself). So, the offset into the rx
332 * indication message only has to account for the standard
333 * offset of the per-MSDU FW rx desc info within the
334 * message, and how many bytes of the per-MSDU FW rx desc
335 * info have already been consumed. (And the endianness of
336 * the host, since for a big-endian host, the rx ind
337 * message contents, including the per-MSDU rx desc bytes,
338 * were byteswapped during upload.)
339 */
340 if (*fw_desc_len > 0) {
341 rx_desc->fw_desc.info0 = **fw_desc;
342 /*
343 * The target is expected to only provide the basic
344 * per-MSDU rx descriptors. Just to be sure, verify
345 * that the target has not attached extension data
346 * (e.g. LRO flow ID).
347 */
348
349 /* or more, if there's extension data */
350 (*fw_desc)++;
351 (*fw_desc_len)--;
352 } else {
353 /*
354 * When an oversized AMSDU happened, FW will lost
355 * some of MSDU status - in this case, the FW
356 * descriptors provided will be less than the
357 * actual MSDUs inside this MPDU. Mark the FW
358 * descriptors so that it will still deliver to
359 * upper stack, if no CRC error for this MPDU.
360 *
361 * FIX THIS - the FW descriptors are actually for
362 * MSDUs in the end of this A-MSDU instead of the
363 * beginning.
364 */
365 rx_desc->fw_desc.info0 = 0;
366 }
367
368 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
369 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
370 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700371 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
Kalle Valo5e3dd152013-06-12 20:52:10 +0300372 RX_MSDU_START_INFO0_MSDU_LENGTH);
373 msdu_chained = rx_desc->frag_info.ring2_more_count;
374
375 if (msdu_len_invalid)
376 msdu_len = 0;
377
378 skb_trim(msdu, 0);
379 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
380 msdu_len -= msdu->len;
381
Michal Kazior9aa505d2014-11-18 09:24:47 +0200382 /* Note: Chained buffers do not contain rx descriptor */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300383 while (msdu_chained--) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200384 msdu = ath10k_htt_rx_netbuf_pop(htt);
385 if (!msdu) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200386 __skb_queue_purge(amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +0200387 return -ENOENT;
Michal Kaziorb30595a2014-10-23 17:04:24 +0300388 }
389
Michal Kazior9aa505d2014-11-18 09:24:47 +0200390 __skb_queue_tail(amsdu, msdu);
391 skb_trim(msdu, 0);
392 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
393 msdu_len -= msdu->len;
Michal Kaziorede9c8e2014-05-14 16:23:31 +0300394 msdu_chaining = 1;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300395 }
396
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700397 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
Kalle Valo5e3dd152013-06-12 20:52:10 +0300398 RX_MSDU_END_INFO0_LAST_MSDU;
399
Michal Kaziorb04e2042014-10-23 17:04:27 +0300400 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
Rajkumar Manoharana0883cf2014-10-03 08:02:47 +0300401 sizeof(*rx_desc) - sizeof(u32));
Michal Kazior9aa505d2014-11-18 09:24:47 +0200402
403 if (last_msdu)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300404 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300405 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300406
Michal Kazior9aa505d2014-11-18 09:24:47 +0200407 if (skb_queue_empty(amsdu))
Janusz Dziedzicd84dd602014-03-24 21:23:20 +0100408 msdu_chaining = -1;
409
Kalle Valo5e3dd152013-06-12 20:52:10 +0300410 /*
411 * Don't refill the ring yet.
412 *
413 * First, the elements popped here are still in use - it is not
414 * safe to overwrite them until the matching call to
415 * mpdu_desc_list_next. Second, for efficiency it is preferable to
416 * refill the rx ring with 1 PPDU's worth of rx buffers (something
417 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
418 * (something like 3 buffers). Consequently, we'll rely on the txrx
419 * SW to tell us when it is done pulling all the PPDU's rx buffers
420 * out of the rx ring, and then refill it just once.
421 */
422
423 return msdu_chaining;
424}
425
Michal Kazior6e712d42013-09-24 10:18:36 +0200426static void ath10k_htt_rx_replenish_task(unsigned long ptr)
427{
428 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
Kalle Valoaf762c02014-09-14 12:50:17 +0300429
Michal Kazior6e712d42013-09-24 10:18:36 +0200430 ath10k_htt_rx_msdu_buff_replenish(htt);
431}
432
Michal Kaziorc5450702015-01-24 12:14:48 +0200433static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
434 u32 paddr)
435{
436 struct ath10k *ar = htt->ar;
437 struct ath10k_skb_rxcb *rxcb;
438 struct sk_buff *msdu;
439
440 lockdep_assert_held(&htt->rx_ring.lock);
441
442 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
443 if (!msdu)
444 return NULL;
445
446 rxcb = ATH10K_SKB_RXCB(msdu);
447 hash_del(&rxcb->hlist);
448 htt->rx_ring.fill_cnt--;
449
450 dma_unmap_single(htt->ar->dev, rxcb->paddr,
451 msdu->len + skb_tailroom(msdu),
452 DMA_FROM_DEVICE);
453 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
454 msdu->data, msdu->len + skb_tailroom(msdu));
455
456 return msdu;
457}
458
459static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
460 struct htt_rx_in_ord_ind *ev,
461 struct sk_buff_head *list)
462{
463 struct ath10k *ar = htt->ar;
464 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
465 struct htt_rx_desc *rxd;
466 struct sk_buff *msdu;
467 int msdu_count;
468 bool is_offload;
469 u32 paddr;
470
471 lockdep_assert_held(&htt->rx_ring.lock);
472
473 msdu_count = __le16_to_cpu(ev->msdu_count);
474 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
475
476 while (msdu_count--) {
477 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
478
479 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
480 if (!msdu) {
481 __skb_queue_purge(list);
482 return -ENOENT;
483 }
484
485 __skb_queue_tail(list, msdu);
486
487 if (!is_offload) {
488 rxd = (void *)msdu->data;
489
490 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
491
492 skb_put(msdu, sizeof(*rxd));
493 skb_pull(msdu, sizeof(*rxd));
494 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
495
496 if (!(__le32_to_cpu(rxd->attention.flags) &
497 RX_ATTENTION_FLAGS_MSDU_DONE)) {
498 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
499 return -EIO;
500 }
501 }
502
503 msdu_desc++;
504 }
505
506 return 0;
507}
508
Michal Kazior95bf21f2014-05-16 17:15:39 +0300509int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300510{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200511 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300512 dma_addr_t paddr;
513 void *vaddr;
Kalle Valobd8bdbb2014-09-14 12:50:00 +0300514 size_t size;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300515 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
516
Michal Kazior51fc7d72014-10-23 17:04:24 +0300517 htt->rx_confused = false;
518
Michal Kaziorfe2407a2014-11-27 11:12:43 +0100519 /* XXX: The fill level could be changed during runtime in response to
520 * the host processing latency. Is this really worth it?
521 */
522 htt->rx_ring.size = HTT_RX_RING_SIZE;
523 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
524 htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
525
Kalle Valo5e3dd152013-06-12 20:52:10 +0300526 if (!is_power_of_2(htt->rx_ring.size)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200527 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300528 return -EINVAL;
529 }
530
Kalle Valo5e3dd152013-06-12 20:52:10 +0300531 htt->rx_ring.netbufs_ring =
Michal Kazior3e841fd2014-05-14 16:23:31 +0300532 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
Kalle Valo5e3dd152013-06-12 20:52:10 +0300533 GFP_KERNEL);
534 if (!htt->rx_ring.netbufs_ring)
535 goto err_netbuf;
536
Kalle Valobd8bdbb2014-09-14 12:50:00 +0300537 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
538
539 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300540 if (!vaddr)
541 goto err_dma_ring;
542
543 htt->rx_ring.paddrs_ring = vaddr;
544 htt->rx_ring.base_paddr = paddr;
545
546 vaddr = dma_alloc_coherent(htt->ar->dev,
547 sizeof(*htt->rx_ring.alloc_idx.vaddr),
548 &paddr, GFP_DMA);
549 if (!vaddr)
550 goto err_dma_idx;
551
552 htt->rx_ring.alloc_idx.vaddr = vaddr;
553 htt->rx_ring.alloc_idx.paddr = paddr;
Michal Kaziorc5450702015-01-24 12:14:48 +0200554 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300555 *htt->rx_ring.alloc_idx.vaddr = 0;
556
557 /* Initialize the Rx refill retry timer */
558 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
559
560 spin_lock_init(&htt->rx_ring.lock);
561
562 htt->rx_ring.fill_cnt = 0;
Michal Kaziorc5450702015-01-24 12:14:48 +0200563 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
564 hash_init(htt->rx_ring.skb_table);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300565
Michal Kazior6e712d42013-09-24 10:18:36 +0200566 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
567 (unsigned long)htt);
568
Michal Kazior6c5151a2014-02-27 18:50:04 +0200569 skb_queue_head_init(&htt->tx_compl_q);
570 skb_queue_head_init(&htt->rx_compl_q);
Michal Kaziorc5450702015-01-24 12:14:48 +0200571 skb_queue_head_init(&htt->rx_in_ord_compl_q);
Michal Kazior6c5151a2014-02-27 18:50:04 +0200572
573 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
574 (unsigned long)htt);
575
Michal Kazior7aa7a722014-08-25 12:09:38 +0200576 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300577 htt->rx_ring.size, htt->rx_ring.fill_level);
578 return 0;
579
Kalle Valo5e3dd152013-06-12 20:52:10 +0300580err_dma_idx:
581 dma_free_coherent(htt->ar->dev,
582 (htt->rx_ring.size *
583 sizeof(htt->rx_ring.paddrs_ring)),
584 htt->rx_ring.paddrs_ring,
585 htt->rx_ring.base_paddr);
586err_dma_ring:
587 kfree(htt->rx_ring.netbufs_ring);
588err_netbuf:
589 return -ENOMEM;
590}
591
Michal Kazior7aa7a722014-08-25 12:09:38 +0200592static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
593 enum htt_rx_mpdu_encrypt_type type)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300594{
595 switch (type) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300596 case HTT_RX_MPDU_ENCRYPT_NONE:
597 return 0;
Michal Kazior890d3b22014-10-23 17:04:22 +0300598 case HTT_RX_MPDU_ENCRYPT_WEP40:
599 case HTT_RX_MPDU_ENCRYPT_WEP104:
600 return IEEE80211_WEP_IV_LEN;
601 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
602 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
603 return IEEE80211_TKIP_IV_LEN;
604 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
605 return IEEE80211_CCMP_HDR_LEN;
606 case HTT_RX_MPDU_ENCRYPT_WEP128:
607 case HTT_RX_MPDU_ENCRYPT_WAPI:
608 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300609 }
610
Michal Kazior890d3b22014-10-23 17:04:22 +0300611 ath10k_warn(ar, "unsupported encryption type %d\n", type);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300612 return 0;
613}
614
Michal Kazior890d3b22014-10-23 17:04:22 +0300615#define MICHAEL_MIC_LEN 8
616
Michal Kazior7aa7a722014-08-25 12:09:38 +0200617static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
618 enum htt_rx_mpdu_encrypt_type type)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300619{
620 switch (type) {
621 case HTT_RX_MPDU_ENCRYPT_NONE:
Michal Kazior890d3b22014-10-23 17:04:22 +0300622 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300623 case HTT_RX_MPDU_ENCRYPT_WEP40:
624 case HTT_RX_MPDU_ENCRYPT_WEP104:
Michal Kazior890d3b22014-10-23 17:04:22 +0300625 return IEEE80211_WEP_ICV_LEN;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300626 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
627 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
Michal Kazior890d3b22014-10-23 17:04:22 +0300628 return IEEE80211_TKIP_ICV_LEN;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300629 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
Michal Kazior890d3b22014-10-23 17:04:22 +0300630 return IEEE80211_CCMP_MIC_LEN;
631 case HTT_RX_MPDU_ENCRYPT_WEP128:
632 case HTT_RX_MPDU_ENCRYPT_WAPI:
633 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300634 }
635
Michal Kazior890d3b22014-10-23 17:04:22 +0300636 ath10k_warn(ar, "unsupported encryption type %d\n", type);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300637 return 0;
638}
639
Michal Kaziorf6dc2092013-09-26 10:12:22 +0300640struct amsdu_subframe_hdr {
641 u8 dst[ETH_ALEN];
642 u8 src[ETH_ALEN];
643 __be16 len;
644} __packed;
645
Janusz Dziedzic87326c92014-03-24 21:23:19 +0100646static void ath10k_htt_rx_h_rates(struct ath10k *ar,
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200647 struct ieee80211_rx_status *status,
648 struct htt_rx_desc *rxd)
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100649{
Michal Kazior5528e032015-03-30 09:51:56 +0300650 struct ieee80211_supported_band *sband;
651 u8 cck, rate, bw, sgi, mcs, nss;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100652 u8 preamble = 0;
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200653 u32 info1, info2, info3;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100654
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200655 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
656 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
657 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
658
659 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100660
661 switch (preamble) {
662 case HTT_RX_LEGACY:
Michal Kazior5528e032015-03-30 09:51:56 +0300663 /* To get legacy rate index band is required. Since band can't
664 * be undefined check if freq is non-zero.
665 */
666 if (!status->freq)
667 return;
668
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200669 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
670 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
Michal Kazior5528e032015-03-30 09:51:56 +0300671 rate &= ~RX_PPDU_START_RATE_FLAG;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100672
Michal Kazior5528e032015-03-30 09:51:56 +0300673 sband = &ar->mac.sbands[status->band];
674 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100675 break;
676 case HTT_RX_HT:
677 case HTT_RX_HT_WITH_TXBF:
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200678 /* HT-SIG - Table 20-11 in info2 and info3 */
679 mcs = info2 & 0x1F;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100680 nss = mcs >> 3;
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200681 bw = (info2 >> 7) & 1;
682 sgi = (info3 >> 7) & 1;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100683
684 status->rate_idx = mcs;
685 status->flag |= RX_FLAG_HT;
686 if (sgi)
687 status->flag |= RX_FLAG_SHORT_GI;
688 if (bw)
689 status->flag |= RX_FLAG_40MHZ;
690 break;
691 case HTT_RX_VHT:
692 case HTT_RX_VHT_WITH_TXBF:
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200693 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100694 TODO check this */
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200695 mcs = (info3 >> 4) & 0x0F;
696 nss = ((info2 >> 10) & 0x07) + 1;
697 bw = info2 & 3;
698 sgi = info3 & 1;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100699
700 status->rate_idx = mcs;
701 status->vht_nss = nss;
702
703 if (sgi)
704 status->flag |= RX_FLAG_SHORT_GI;
705
706 switch (bw) {
707 /* 20MHZ */
708 case 0:
709 break;
710 /* 40MHZ */
711 case 1:
712 status->flag |= RX_FLAG_40MHZ;
713 break;
714 /* 80MHZ */
715 case 2:
716 status->vht_flag |= RX_VHT_FLAG_80MHZ;
717 }
718
719 status->flag |= RX_FLAG_VHT;
720 break;
721 default:
722 break;
723 }
724}
725
Michal Kazior500ff9f2015-03-31 10:26:21 +0000726static struct ieee80211_channel *
727ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
728{
729 struct ath10k_peer *peer;
730 struct ath10k_vif *arvif;
731 struct cfg80211_chan_def def;
732 u16 peer_id;
733
734 lockdep_assert_held(&ar->data_lock);
735
736 if (!rxd)
737 return NULL;
738
739 if (rxd->attention.flags &
740 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
741 return NULL;
742
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700743 if (!(rxd->msdu_end.common.info0 &
Michal Kazior500ff9f2015-03-31 10:26:21 +0000744 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
745 return NULL;
746
747 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
748 RX_MPDU_START_INFO0_PEER_IDX);
749
750 peer = ath10k_peer_find_by_id(ar, peer_id);
751 if (!peer)
752 return NULL;
753
754 arvif = ath10k_get_arvif(ar, peer->vdev_id);
755 if (WARN_ON_ONCE(!arvif))
756 return NULL;
757
758 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
759 return NULL;
760
761 return def.chan;
762}
763
764static struct ieee80211_channel *
765ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
766{
767 struct ath10k_vif *arvif;
768 struct cfg80211_chan_def def;
769
770 lockdep_assert_held(&ar->data_lock);
771
772 list_for_each_entry(arvif, &ar->arvifs, list) {
773 if (arvif->vdev_id == vdev_id &&
774 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
775 return def.chan;
776 }
777
778 return NULL;
779}
780
781static void
782ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
783 struct ieee80211_chanctx_conf *conf,
784 void *data)
785{
786 struct cfg80211_chan_def *def = data;
787
788 *def = conf->def;
789}
790
791static struct ieee80211_channel *
792ath10k_htt_rx_h_any_channel(struct ath10k *ar)
793{
794 struct cfg80211_chan_def def = {};
795
796 ieee80211_iter_chan_contexts_atomic(ar->hw,
797 ath10k_htt_rx_h_any_chan_iter,
798 &def);
799
800 return def.chan;
801}
802
Janusz Dziedzic36653f02014-03-24 21:23:18 +0100803static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
Michal Kazior500ff9f2015-03-31 10:26:21 +0000804 struct ieee80211_rx_status *status,
805 struct htt_rx_desc *rxd,
806 u32 vdev_id)
Janusz Dziedzic36653f02014-03-24 21:23:18 +0100807{
808 struct ieee80211_channel *ch;
809
810 spin_lock_bh(&ar->data_lock);
811 ch = ar->scan_channel;
812 if (!ch)
813 ch = ar->rx_channel;
Michal Kazior500ff9f2015-03-31 10:26:21 +0000814 if (!ch)
815 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
816 if (!ch)
817 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
818 if (!ch)
819 ch = ath10k_htt_rx_h_any_channel(ar);
Janusz Dziedzic36653f02014-03-24 21:23:18 +0100820 spin_unlock_bh(&ar->data_lock);
821
822 if (!ch)
823 return false;
824
825 status->band = ch->band;
826 status->freq = ch->center_freq;
827
828 return true;
829}
830
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200831static void ath10k_htt_rx_h_signal(struct ath10k *ar,
832 struct ieee80211_rx_status *status,
833 struct htt_rx_desc *rxd)
834{
835 /* FIXME: Get real NF */
836 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
837 rxd->ppdu_start.rssi_comb;
838 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
839}
840
841static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
842 struct ieee80211_rx_status *status,
843 struct htt_rx_desc *rxd)
844{
845 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
846 * means all prior MSDUs in a PPDU are reported to mac80211 without the
847 * TSF. Is it worth holding frames until end of PPDU is known?
848 *
849 * FIXME: Can we get/compute 64bit TSF?
850 */
Michal Kazior3ec79e32015-01-24 12:14:48 +0200851 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200852 status->flag |= RX_FLAG_MACTIME_END;
853}
854
855static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
856 struct sk_buff_head *amsdu,
Michal Kazior500ff9f2015-03-31 10:26:21 +0000857 struct ieee80211_rx_status *status,
858 u32 vdev_id)
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200859{
860 struct sk_buff *first;
861 struct htt_rx_desc *rxd;
862 bool is_first_ppdu;
863 bool is_last_ppdu;
864
865 if (skb_queue_empty(amsdu))
866 return;
867
868 first = skb_peek(amsdu);
869 rxd = (void *)first->data - sizeof(*rxd);
870
871 is_first_ppdu = !!(rxd->attention.flags &
872 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
873 is_last_ppdu = !!(rxd->attention.flags &
874 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
875
876 if (is_first_ppdu) {
877 /* New PPDU starts so clear out the old per-PPDU status. */
878 status->freq = 0;
879 status->rate_idx = 0;
880 status->vht_nss = 0;
881 status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
882 status->flag &= ~(RX_FLAG_HT |
883 RX_FLAG_VHT |
884 RX_FLAG_SHORT_GI |
885 RX_FLAG_40MHZ |
886 RX_FLAG_MACTIME_END);
887 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
888
889 ath10k_htt_rx_h_signal(ar, status, rxd);
Michal Kazior500ff9f2015-03-31 10:26:21 +0000890 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200891 ath10k_htt_rx_h_rates(ar, status, rxd);
892 }
893
894 if (is_last_ppdu)
895 ath10k_htt_rx_h_mactime(ar, status, rxd);
896}
897
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300898static const char * const tid_to_ac[] = {
899 "BE",
900 "BK",
901 "BK",
902 "BE",
903 "VI",
904 "VI",
905 "VO",
906 "VO",
907};
908
909static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
910{
911 u8 *qc;
912 int tid;
913
914 if (!ieee80211_is_data_qos(hdr->frame_control))
915 return "";
916
917 qc = ieee80211_get_qos_ctl(hdr);
918 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
919 if (tid < 8)
920 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
921 else
922 snprintf(out, size, "tid %d", tid);
923
924 return out;
925}
926
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100927static void ath10k_process_rx(struct ath10k *ar,
928 struct ieee80211_rx_status *rx_status,
929 struct sk_buff *skb)
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100930{
931 struct ieee80211_rx_status *status;
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300932 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
933 char tid[32];
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100934
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100935 status = IEEE80211_SKB_RXCB(skb);
936 *status = *rx_status;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100937
Michal Kazior7aa7a722014-08-25 12:09:38 +0200938 ath10k_dbg(ar, ATH10K_DBG_DATA,
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300939 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100940 skb,
941 skb->len,
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300942 ieee80211_get_SA(hdr),
943 ath10k_get_tid(hdr, tid, sizeof(tid)),
944 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
945 "mcast" : "ucast",
946 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100947 status->flag == 0 ? "legacy" : "",
948 status->flag & RX_FLAG_HT ? "ht" : "",
949 status->flag & RX_FLAG_VHT ? "vht" : "",
950 status->flag & RX_FLAG_40MHZ ? "40" : "",
951 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
952 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
953 status->rate_idx,
954 status->vht_nss,
955 status->freq,
Janusz Dziedzic87326c92014-03-24 21:23:19 +0100956 status->band, status->flag,
Janusz Dziedzic78433f92014-03-24 21:23:21 +0100957 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300958 !!(status->flag & RX_FLAG_MMIC_ERROR),
959 !!(status->flag & RX_FLAG_AMSDU_MORE));
Michal Kazior7aa7a722014-08-25 12:09:38 +0200960 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100961 skb->data, skb->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530962 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
963 trace_ath10k_rx_payload(ar, skb->data, skb->len);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100964
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100965 ieee80211_rx(ar->hw, skb);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100966}
967
Michal Kazior48f4ca32015-05-19 14:09:34 +0200968static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
969 struct ieee80211_hdr *hdr)
Michal Kaziord960c362014-02-25 09:29:57 +0200970{
Michal Kazior48f4ca32015-05-19 14:09:34 +0200971 int len = ieee80211_hdrlen(hdr->frame_control);
972
973 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
974 ar->fw_features))
975 len = round_up(len, 4);
976
977 return len;
Michal Kaziord960c362014-02-25 09:29:57 +0200978}
979
Michal Kazior581c25f2014-11-18 09:24:48 +0200980static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
981 struct sk_buff *msdu,
982 struct ieee80211_rx_status *status,
983 enum htt_rx_mpdu_encrypt_type enctype,
984 bool is_decrypted)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300985{
Michal Kaziorf6dc2092013-09-26 10:12:22 +0300986 struct ieee80211_hdr *hdr;
Michal Kazior581c25f2014-11-18 09:24:48 +0200987 struct htt_rx_desc *rxd;
988 size_t hdr_len;
989 size_t crypto_len;
990 bool is_first;
991 bool is_last;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300992
Michal Kazior581c25f2014-11-18 09:24:48 +0200993 rxd = (void *)msdu->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700994 is_first = !!(rxd->msdu_end.common.info0 &
Michal Kazior581c25f2014-11-18 09:24:48 +0200995 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
Peter Oh1f5dbfb2015-07-15 19:01:21 -0700996 is_last = !!(rxd->msdu_end.common.info0 &
Michal Kazior581c25f2014-11-18 09:24:48 +0200997 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
Michal Kazior9aa505d2014-11-18 09:24:47 +0200998
Michal Kazior581c25f2014-11-18 09:24:48 +0200999 /* Delivered decapped frame:
1000 * [802.11 header]
1001 * [crypto param] <-- can be trimmed if !fcs_err &&
1002 * !decrypt_err && !peer_idx_invalid
1003 * [amsdu header] <-- only if A-MSDU
1004 * [rfc1042/llc]
1005 * [payload]
1006 * [FCS] <-- at end, needs to be trimmed
1007 */
Kalle Valo5e3dd152013-06-12 20:52:10 +03001008
Michal Kazior581c25f2014-11-18 09:24:48 +02001009 /* This probably shouldn't happen but warn just in case */
1010 if (unlikely(WARN_ON_ONCE(!is_first)))
1011 return;
1012
1013 /* This probably shouldn't happen but warn just in case */
1014 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1015 return;
1016
1017 skb_trim(msdu, msdu->len - FCS_LEN);
1018
1019 /* In most cases this will be true for sniffed frames. It makes sense
1020 * to deliver them as-is without stripping the crypto param. This would
1021 * also make sense for software based decryption (which is not
1022 * implemented in ath10k).
1023 *
1024 * If there's no error then the frame is decrypted. At least that is
1025 * the case for frames that come in via fragmented rx indication.
1026 */
1027 if (!is_decrypted)
1028 return;
1029
1030 /* The payload is decrypted so strip crypto params. Start from tail
1031 * since hdr is used to compute some stuff.
1032 */
1033
1034 hdr = (void *)msdu->data;
1035
1036 /* Tail */
1037 skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
1038
1039 /* MMIC */
1040 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1041 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1042 skb_trim(msdu, msdu->len - 8);
1043
1044 /* Head */
Michal Kaziorf6dc2092013-09-26 10:12:22 +03001045 hdr_len = ieee80211_hdrlen(hdr->frame_control);
Michal Kazior581c25f2014-11-18 09:24:48 +02001046 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001047
Michal Kazior581c25f2014-11-18 09:24:48 +02001048 memmove((void *)msdu->data + crypto_len,
1049 (void *)msdu->data, hdr_len);
1050 skb_pull(msdu, crypto_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001051}
1052
Michal Kazior581c25f2014-11-18 09:24:48 +02001053static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1054 struct sk_buff *msdu,
1055 struct ieee80211_rx_status *status,
1056 const u8 first_hdr[64])
Kalle Valo5e3dd152013-06-12 20:52:10 +03001057{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001058 struct ieee80211_hdr *hdr;
Michal Kazior581c25f2014-11-18 09:24:48 +02001059 size_t hdr_len;
1060 u8 da[ETH_ALEN];
1061 u8 sa[ETH_ALEN];
Kalle Valo5e3dd152013-06-12 20:52:10 +03001062
Michal Kazior581c25f2014-11-18 09:24:48 +02001063 /* Delivered decapped frame:
1064 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1065 * [rfc1042/llc]
1066 *
1067 * Note: The nwifi header doesn't have QoS Control and is
1068 * (always?) a 3addr frame.
1069 *
1070 * Note2: There's no A-MSDU subframe header. Even if it's part
1071 * of an A-MSDU.
1072 */
1073
1074 /* pull decapped header and copy SA & DA */
1075 hdr = (struct ieee80211_hdr *)msdu->data;
Michal Kazior48f4ca32015-05-19 14:09:34 +02001076 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
Michal Kazior581c25f2014-11-18 09:24:48 +02001077 ether_addr_copy(da, ieee80211_get_DA(hdr));
1078 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1079 skb_pull(msdu, hdr_len);
1080
1081 /* push original 802.11 header */
1082 hdr = (struct ieee80211_hdr *)first_hdr;
Michal Kaziore3fbf8d2013-09-26 10:12:23 +03001083 hdr_len = ieee80211_hdrlen(hdr->frame_control);
Michal Kazior581c25f2014-11-18 09:24:48 +02001084 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001085
Michal Kazior581c25f2014-11-18 09:24:48 +02001086 /* original 802.11 header has a different DA and in
1087 * case of 4addr it may also have different SA
1088 */
1089 hdr = (struct ieee80211_hdr *)msdu->data;
1090 ether_addr_copy(ieee80211_get_DA(hdr), da);
1091 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1092}
Michal Kaziorf6dc2092013-09-26 10:12:22 +03001093
Michal Kazior581c25f2014-11-18 09:24:48 +02001094static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1095 struct sk_buff *msdu,
1096 enum htt_rx_mpdu_encrypt_type enctype)
1097{
1098 struct ieee80211_hdr *hdr;
1099 struct htt_rx_desc *rxd;
1100 size_t hdr_len, crypto_len;
1101 void *rfc1042;
1102 bool is_first, is_last, is_amsdu;
Michal Kazior784f69d2013-09-26 10:12:23 +03001103
Michal Kazior581c25f2014-11-18 09:24:48 +02001104 rxd = (void *)msdu->data - sizeof(*rxd);
1105 hdr = (void *)rxd->rx_hdr_status;
1106
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001107 is_first = !!(rxd->msdu_end.common.info0 &
Michal Kazior581c25f2014-11-18 09:24:48 +02001108 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001109 is_last = !!(rxd->msdu_end.common.info0 &
Michal Kazior581c25f2014-11-18 09:24:48 +02001110 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1111 is_amsdu = !(is_first && is_last);
1112
1113 rfc1042 = hdr;
1114
1115 if (is_first) {
Michal Kazior784f69d2013-09-26 10:12:23 +03001116 hdr_len = ieee80211_hdrlen(hdr->frame_control);
Michal Kazior581c25f2014-11-18 09:24:48 +02001117 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
Michal Kaziore3fbf8d2013-09-26 10:12:23 +03001118
Michal Kazior581c25f2014-11-18 09:24:48 +02001119 rfc1042 += round_up(hdr_len, 4) +
1120 round_up(crypto_len, 4);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001121 }
1122
Michal Kazior581c25f2014-11-18 09:24:48 +02001123 if (is_amsdu)
1124 rfc1042 += sizeof(struct amsdu_subframe_hdr);
Michal Kaziorf6dc2092013-09-26 10:12:22 +03001125
Michal Kazior581c25f2014-11-18 09:24:48 +02001126 return rfc1042;
1127}
1128
1129static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1130 struct sk_buff *msdu,
1131 struct ieee80211_rx_status *status,
1132 const u8 first_hdr[64],
1133 enum htt_rx_mpdu_encrypt_type enctype)
1134{
1135 struct ieee80211_hdr *hdr;
1136 struct ethhdr *eth;
1137 size_t hdr_len;
1138 void *rfc1042;
1139 u8 da[ETH_ALEN];
1140 u8 sa[ETH_ALEN];
1141
1142 /* Delivered decapped frame:
1143 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1144 * [payload]
1145 */
1146
1147 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1148 if (WARN_ON_ONCE(!rfc1042))
1149 return;
1150
1151 /* pull decapped header and copy SA & DA */
1152 eth = (struct ethhdr *)msdu->data;
1153 ether_addr_copy(da, eth->h_dest);
1154 ether_addr_copy(sa, eth->h_source);
1155 skb_pull(msdu, sizeof(struct ethhdr));
1156
1157 /* push rfc1042/llc/snap */
1158 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1159 sizeof(struct rfc1042_hdr));
1160
1161 /* push original 802.11 header */
1162 hdr = (struct ieee80211_hdr *)first_hdr;
1163 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1164 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1165
1166 /* original 802.11 header has a different DA and in
1167 * case of 4addr it may also have different SA
1168 */
1169 hdr = (struct ieee80211_hdr *)msdu->data;
1170 ether_addr_copy(ieee80211_get_DA(hdr), da);
1171 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1172}
1173
1174static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1175 struct sk_buff *msdu,
1176 struct ieee80211_rx_status *status,
1177 const u8 first_hdr[64])
1178{
1179 struct ieee80211_hdr *hdr;
1180 size_t hdr_len;
1181
1182 /* Delivered decapped frame:
1183 * [amsdu header] <-- replaced with 802.11 hdr
1184 * [rfc1042/llc]
1185 * [payload]
1186 */
1187
1188 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1189
1190 hdr = (struct ieee80211_hdr *)first_hdr;
1191 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1192 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1193}
1194
1195static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1196 struct sk_buff *msdu,
1197 struct ieee80211_rx_status *status,
1198 u8 first_hdr[64],
1199 enum htt_rx_mpdu_encrypt_type enctype,
1200 bool is_decrypted)
1201{
1202 struct htt_rx_desc *rxd;
1203 enum rx_msdu_decap_format decap;
Michal Kazior581c25f2014-11-18 09:24:48 +02001204
1205 /* First msdu's decapped header:
1206 * [802.11 header] <-- padded to 4 bytes long
1207 * [crypto param] <-- padded to 4 bytes long
1208 * [amsdu header] <-- only if A-MSDU
1209 * [rfc1042/llc]
1210 *
1211 * Other (2nd, 3rd, ..) msdu's decapped header:
1212 * [amsdu header] <-- only if A-MSDU
1213 * [rfc1042/llc]
1214 */
1215
1216 rxd = (void *)msdu->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001217 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
Michal Kazior581c25f2014-11-18 09:24:48 +02001218 RX_MSDU_START_INFO1_DECAP_FORMAT);
1219
1220 switch (decap) {
1221 case RX_MSDU_DECAP_RAW:
1222 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1223 is_decrypted);
1224 break;
1225 case RX_MSDU_DECAP_NATIVE_WIFI:
1226 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1227 break;
1228 case RX_MSDU_DECAP_ETHERNET2_DIX:
1229 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1230 break;
1231 case RX_MSDU_DECAP_8023_SNAP_LLC:
1232 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1233 break;
1234 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001235}
1236
Michal Kazior605f81a2013-07-31 10:47:56 +02001237static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1238{
1239 struct htt_rx_desc *rxd;
1240 u32 flags, info;
1241 bool is_ip4, is_ip6;
1242 bool is_tcp, is_udp;
1243 bool ip_csum_ok, tcpudp_csum_ok;
1244
1245 rxd = (void *)skb->data - sizeof(*rxd);
1246 flags = __le32_to_cpu(rxd->attention.flags);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001247 info = __le32_to_cpu(rxd->msdu_start.common.info1);
Michal Kazior605f81a2013-07-31 10:47:56 +02001248
1249 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1250 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1251 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1252 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1253 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1254 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1255
1256 if (!is_ip4 && !is_ip6)
1257 return CHECKSUM_NONE;
1258 if (!is_tcp && !is_udp)
1259 return CHECKSUM_NONE;
1260 if (!ip_csum_ok)
1261 return CHECKSUM_NONE;
1262 if (!tcpudp_csum_ok)
1263 return CHECKSUM_NONE;
1264
1265 return CHECKSUM_UNNECESSARY;
1266}
1267
Michal Kazior581c25f2014-11-18 09:24:48 +02001268static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1269{
1270 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1271}
1272
1273static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1274 struct sk_buff_head *amsdu,
1275 struct ieee80211_rx_status *status)
1276{
1277 struct sk_buff *first;
1278 struct sk_buff *last;
1279 struct sk_buff *msdu;
1280 struct htt_rx_desc *rxd;
1281 struct ieee80211_hdr *hdr;
1282 enum htt_rx_mpdu_encrypt_type enctype;
1283 u8 first_hdr[64];
1284 u8 *qos;
1285 size_t hdr_len;
1286 bool has_fcs_err;
1287 bool has_crypto_err;
1288 bool has_tkip_err;
1289 bool has_peer_idx_invalid;
1290 bool is_decrypted;
1291 u32 attention;
1292
1293 if (skb_queue_empty(amsdu))
1294 return;
1295
1296 first = skb_peek(amsdu);
1297 rxd = (void *)first->data - sizeof(*rxd);
1298
1299 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1300 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1301
1302 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1303 * decapped header. It'll be used for undecapping of each MSDU.
1304 */
1305 hdr = (void *)rxd->rx_hdr_status;
1306 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1307 memcpy(first_hdr, hdr, hdr_len);
1308
1309 /* Each A-MSDU subframe will use the original header as the base and be
1310 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1311 */
1312 hdr = (void *)first_hdr;
1313 qos = ieee80211_get_qos_ctl(hdr);
1314 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1315
1316 /* Some attention flags are valid only in the last MSDU. */
1317 last = skb_peek_tail(amsdu);
1318 rxd = (void *)last->data - sizeof(*rxd);
1319 attention = __le32_to_cpu(rxd->attention.flags);
1320
1321 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1322 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1323 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1324 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1325
1326 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1327 * e.g. due to fcs error, missing peer or invalid key data it will
1328 * report the frame as raw.
1329 */
1330 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1331 !has_fcs_err &&
1332 !has_crypto_err &&
1333 !has_peer_idx_invalid);
1334
1335 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1336 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1337 RX_FLAG_MMIC_ERROR |
1338 RX_FLAG_DECRYPTED |
1339 RX_FLAG_IV_STRIPPED |
1340 RX_FLAG_MMIC_STRIPPED);
1341
1342 if (has_fcs_err)
1343 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1344
1345 if (has_tkip_err)
1346 status->flag |= RX_FLAG_MMIC_ERROR;
1347
1348 if (is_decrypted)
1349 status->flag |= RX_FLAG_DECRYPTED |
1350 RX_FLAG_IV_STRIPPED |
1351 RX_FLAG_MMIC_STRIPPED;
1352
1353 skb_queue_walk(amsdu, msdu) {
1354 ath10k_htt_rx_h_csum_offload(msdu);
1355 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1356 is_decrypted);
1357
1358 /* Undecapping involves copying the original 802.11 header back
1359 * to sk_buff. If frame is protected and hardware has decrypted
1360 * it then remove the protected bit.
1361 */
1362 if (!is_decrypted)
1363 continue;
1364
1365 hdr = (void *)msdu->data;
1366 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1367 }
1368}
1369
1370static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1371 struct sk_buff_head *amsdu,
1372 struct ieee80211_rx_status *status)
1373{
1374 struct sk_buff *msdu;
1375
1376 while ((msdu = __skb_dequeue(amsdu))) {
1377 /* Setup per-MSDU flags */
1378 if (skb_queue_empty(amsdu))
1379 status->flag &= ~RX_FLAG_AMSDU_MORE;
1380 else
1381 status->flag |= RX_FLAG_AMSDU_MORE;
1382
1383 ath10k_process_rx(ar, status, msdu);
1384 }
1385}
1386
Michal Kazior9aa505d2014-11-18 09:24:47 +02001387static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
Ben Greearbfa35362014-03-03 14:07:09 -08001388{
Michal Kazior9aa505d2014-11-18 09:24:47 +02001389 struct sk_buff *skb, *first;
Ben Greearbfa35362014-03-03 14:07:09 -08001390 int space;
1391 int total_len = 0;
1392
1393 /* TODO: Might could optimize this by using
1394 * skb_try_coalesce or similar method to
1395 * decrease copying, or maybe get mac80211 to
1396 * provide a way to just receive a list of
1397 * skb?
1398 */
1399
Michal Kazior9aa505d2014-11-18 09:24:47 +02001400 first = __skb_dequeue(amsdu);
Ben Greearbfa35362014-03-03 14:07:09 -08001401
1402 /* Allocate total length all at once. */
Michal Kazior9aa505d2014-11-18 09:24:47 +02001403 skb_queue_walk(amsdu, skb)
1404 total_len += skb->len;
Ben Greearbfa35362014-03-03 14:07:09 -08001405
Michal Kazior9aa505d2014-11-18 09:24:47 +02001406 space = total_len - skb_tailroom(first);
Ben Greearbfa35362014-03-03 14:07:09 -08001407 if ((space > 0) &&
Michal Kazior9aa505d2014-11-18 09:24:47 +02001408 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
Ben Greearbfa35362014-03-03 14:07:09 -08001409 /* TODO: bump some rx-oom error stat */
1410 /* put it back together so we can free the
1411 * whole list at once.
1412 */
Michal Kazior9aa505d2014-11-18 09:24:47 +02001413 __skb_queue_head(amsdu, first);
Ben Greearbfa35362014-03-03 14:07:09 -08001414 return -1;
1415 }
1416
1417 /* Walk list again, copying contents into
1418 * msdu_head
1419 */
Michal Kazior9aa505d2014-11-18 09:24:47 +02001420 while ((skb = __skb_dequeue(amsdu))) {
1421 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1422 skb->len);
1423 dev_kfree_skb_any(skb);
Ben Greearbfa35362014-03-03 14:07:09 -08001424 }
1425
Michal Kazior9aa505d2014-11-18 09:24:47 +02001426 __skb_queue_head(amsdu, first);
Ben Greearbfa35362014-03-03 14:07:09 -08001427 return 0;
1428}
1429
Michal Kazior581c25f2014-11-18 09:24:48 +02001430static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1431 struct sk_buff_head *amsdu,
1432 bool chained)
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001433{
Michal Kazior581c25f2014-11-18 09:24:48 +02001434 struct sk_buff *first;
1435 struct htt_rx_desc *rxd;
1436 enum rx_msdu_decap_format decap;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001437
Michal Kazior581c25f2014-11-18 09:24:48 +02001438 first = skb_peek(amsdu);
1439 rxd = (void *)first->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001440 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
Michal Kazior581c25f2014-11-18 09:24:48 +02001441 RX_MSDU_START_INFO1_DECAP_FORMAT);
1442
1443 if (!chained)
1444 return;
1445
1446 /* FIXME: Current unchaining logic can only handle simple case of raw
1447 * msdu chaining. If decapping is other than raw the chaining may be
1448 * more complex and this isn't handled by the current code. Don't even
1449 * try re-constructing such frames - it'll be pretty much garbage.
1450 */
1451 if (decap != RX_MSDU_DECAP_RAW ||
1452 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1453 __skb_queue_purge(amsdu);
1454 return;
1455 }
1456
1457 ath10k_unchain_msdu(amsdu);
1458}
1459
1460static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1461 struct sk_buff_head *amsdu,
1462 struct ieee80211_rx_status *rx_status)
1463{
1464 struct sk_buff *msdu;
1465 struct htt_rx_desc *rxd;
Michal Kaziord67d0a02014-11-24 15:34:08 +01001466 bool is_mgmt;
1467 bool has_fcs_err;
Michal Kazior581c25f2014-11-18 09:24:48 +02001468
1469 msdu = skb_peek(amsdu);
1470 rxd = (void *)msdu->data - sizeof(*rxd);
1471
1472 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1473 * invalid/dangerous frames.
1474 */
1475
1476 if (!rx_status->freq) {
1477 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001478 return false;
1479 }
1480
Michal Kaziord67d0a02014-11-24 15:34:08 +01001481 is_mgmt = !!(rxd->attention.flags &
1482 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1483 has_fcs_err = !!(rxd->attention.flags &
1484 __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
1485
Michal Kazior581c25f2014-11-18 09:24:48 +02001486 /* Management frames are handled via WMI events. The pros of such
1487 * approach is that channel is explicitly provided in WMI events
1488 * whereas HTT doesn't provide channel information for Rxed frames.
Michal Kaziord67d0a02014-11-24 15:34:08 +01001489 *
1490 * However some firmware revisions don't report corrupted frames via
1491 * WMI so don't drop them.
Michal Kazior581c25f2014-11-18 09:24:48 +02001492 */
Michal Kaziord67d0a02014-11-24 15:34:08 +01001493 if (is_mgmt && !has_fcs_err) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001494 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001495 return false;
1496 }
1497
Michal Kazior581c25f2014-11-18 09:24:48 +02001498 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1499 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001500 return false;
1501 }
1502
1503 return true;
1504}
1505
Michal Kazior581c25f2014-11-18 09:24:48 +02001506static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1507 struct sk_buff_head *amsdu,
1508 struct ieee80211_rx_status *rx_status)
1509{
1510 if (skb_queue_empty(amsdu))
1511 return;
1512
1513 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1514 return;
1515
1516 __skb_queue_purge(amsdu);
1517}
1518
Kalle Valo5e3dd152013-06-12 20:52:10 +03001519static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1520 struct htt_rx_indication *rx)
1521{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001522 struct ath10k *ar = htt->ar;
Janusz Dziedzic6df92a32014-03-24 21:24:57 +01001523 struct ieee80211_rx_status *rx_status = &htt->rx_status;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001524 struct htt_rx_indication_mpdu_range *mpdu_ranges;
Michal Kazior9aa505d2014-11-18 09:24:47 +02001525 struct sk_buff_head amsdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001526 int num_mpdu_ranges;
1527 int fw_desc_len;
1528 u8 *fw_desc;
Michal Kaziord5406902014-11-18 09:24:47 +02001529 int i, ret, mpdu_count = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001530
Michal Kazior45967082014-02-27 18:50:05 +02001531 lockdep_assert_held(&htt->rx_ring.lock);
1532
Michal Kaziore0bd7512014-11-18 09:24:48 +02001533 if (htt->rx_confused)
1534 return;
1535
Kalle Valo5e3dd152013-06-12 20:52:10 +03001536 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
1537 fw_desc = (u8 *)&rx->fw_desc;
1538
1539 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1540 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1541 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1542
Michal Kazior7aa7a722014-08-25 12:09:38 +02001543 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001544 rx, sizeof(*rx) +
1545 (sizeof(struct htt_rx_indication_mpdu_range) *
1546 num_mpdu_ranges));
1547
Michal Kaziord5406902014-11-18 09:24:47 +02001548 for (i = 0; i < num_mpdu_ranges; i++)
1549 mpdu_count += mpdu_ranges[i].mpdu_count;
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001550
Michal Kaziord5406902014-11-18 09:24:47 +02001551 while (mpdu_count--) {
Michal Kaziord5406902014-11-18 09:24:47 +02001552 __skb_queue_head_init(&amsdu);
1553 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
Michal Kaziorf0e27702014-11-18 09:24:49 +02001554 &fw_desc_len, &amsdu);
Michal Kaziord5406902014-11-18 09:24:47 +02001555 if (ret < 0) {
Michal Kaziore0bd7512014-11-18 09:24:48 +02001556 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
Michal Kaziord5406902014-11-18 09:24:47 +02001557 __skb_queue_purge(&amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +02001558 /* FIXME: It's probably a good idea to reboot the
1559 * device instead of leaving it inoperable.
1560 */
1561 htt->rx_confused = true;
1562 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001563 }
Michal Kaziord5406902014-11-18 09:24:47 +02001564
Michal Kazior500ff9f2015-03-31 10:26:21 +00001565 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
Michal Kazior581c25f2014-11-18 09:24:48 +02001566 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1567 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1568 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1569 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001570 }
1571
Michal Kazior6e712d42013-09-24 10:18:36 +02001572 tasklet_schedule(&htt->rx_replenish_task);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001573}
1574
1575static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
Kalle Valo5b07e072014-09-14 12:50:06 +03001576 struct htt_rx_fragment_indication *frag)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001577{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001578 struct ath10k *ar = htt->ar;
Janusz Dziedzic6df92a32014-03-24 21:24:57 +01001579 struct ieee80211_rx_status *rx_status = &htt->rx_status;
Michal Kazior9aa505d2014-11-18 09:24:47 +02001580 struct sk_buff_head amsdu;
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001581 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001582 u8 *fw_desc;
Michal Kazior581c25f2014-11-18 09:24:48 +02001583 int fw_desc_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001584
1585 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1586 fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1587
Michal Kazior9aa505d2014-11-18 09:24:47 +02001588 __skb_queue_head_init(&amsdu);
Michal Kazior45967082014-02-27 18:50:05 +02001589
1590 spin_lock_bh(&htt->rx_ring.lock);
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001591 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
Michal Kaziorf0e27702014-11-18 09:24:49 +02001592 &amsdu);
Michal Kazior45967082014-02-27 18:50:05 +02001593 spin_unlock_bh(&htt->rx_ring.lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001594
Michal Kazior686687c2014-10-23 17:04:24 +03001595 tasklet_schedule(&htt->rx_replenish_task);
1596
Michal Kazior7aa7a722014-08-25 12:09:38 +02001597 ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001598
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001599 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001600 ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001601 ret);
Michal Kazior9aa505d2014-11-18 09:24:47 +02001602 __skb_queue_purge(&amsdu);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001603 return;
1604 }
1605
Michal Kazior9aa505d2014-11-18 09:24:47 +02001606 if (skb_queue_len(&amsdu) != 1) {
1607 ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
1608 __skb_queue_purge(&amsdu);
1609 return;
1610 }
1611
Michal Kazior500ff9f2015-03-31 10:26:21 +00001612 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
Michal Kazior581c25f2014-11-18 09:24:48 +02001613 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1614 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1615 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001616
Kalle Valo5e3dd152013-06-12 20:52:10 +03001617 if (fw_desc_len > 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001618 ath10k_dbg(ar, ATH10K_DBG_HTT,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001619 "expecting more fragmented rx in one indication %d\n",
1620 fw_desc_len);
1621 }
1622}
1623
Michal Kazior6c5151a2014-02-27 18:50:04 +02001624static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1625 struct sk_buff *skb)
1626{
1627 struct ath10k_htt *htt = &ar->htt;
1628 struct htt_resp *resp = (struct htt_resp *)skb->data;
1629 struct htt_tx_done tx_done = {};
1630 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1631 __le16 msdu_id;
1632 int i;
1633
1634 switch (status) {
1635 case HTT_DATA_TX_STATUS_NO_ACK:
1636 tx_done.no_ack = true;
1637 break;
1638 case HTT_DATA_TX_STATUS_OK:
Sujith Manoharan55314fc2015-04-01 22:53:21 +03001639 tx_done.success = true;
Michal Kazior6c5151a2014-02-27 18:50:04 +02001640 break;
1641 case HTT_DATA_TX_STATUS_DISCARD:
1642 case HTT_DATA_TX_STATUS_POSTPONE:
1643 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1644 tx_done.discard = true;
1645 break;
1646 default:
Michal Kazior7aa7a722014-08-25 12:09:38 +02001647 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
Michal Kazior6c5151a2014-02-27 18:50:04 +02001648 tx_done.discard = true;
1649 break;
1650 }
1651
Michal Kazior7aa7a722014-08-25 12:09:38 +02001652 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
Michal Kazior6c5151a2014-02-27 18:50:04 +02001653 resp->data_tx_completion.num_msdus);
1654
1655 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1656 msdu_id = resp->data_tx_completion.msdus[i];
1657 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1658 ath10k_txrx_tx_unref(htt, &tx_done);
1659 }
1660}
1661
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001662static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1663{
1664 struct htt_rx_addba *ev = &resp->rx_addba;
1665 struct ath10k_peer *peer;
1666 struct ath10k_vif *arvif;
1667 u16 info0, tid, peer_id;
1668
1669 info0 = __le16_to_cpu(ev->info0);
1670 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1671 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1672
Michal Kazior7aa7a722014-08-25 12:09:38 +02001673 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001674 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1675 tid, peer_id, ev->window_size);
1676
1677 spin_lock_bh(&ar->data_lock);
1678 peer = ath10k_peer_find_by_id(ar, peer_id);
1679 if (!peer) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001680 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001681 peer_id);
1682 spin_unlock_bh(&ar->data_lock);
1683 return;
1684 }
1685
1686 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1687 if (!arvif) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001688 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001689 peer->vdev_id);
1690 spin_unlock_bh(&ar->data_lock);
1691 return;
1692 }
1693
Michal Kazior7aa7a722014-08-25 12:09:38 +02001694 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001695 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1696 peer->addr, tid, ev->window_size);
1697
1698 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1699 spin_unlock_bh(&ar->data_lock);
1700}
1701
1702static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1703{
1704 struct htt_rx_delba *ev = &resp->rx_delba;
1705 struct ath10k_peer *peer;
1706 struct ath10k_vif *arvif;
1707 u16 info0, tid, peer_id;
1708
1709 info0 = __le16_to_cpu(ev->info0);
1710 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1711 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1712
Michal Kazior7aa7a722014-08-25 12:09:38 +02001713 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001714 "htt rx delba tid %hu peer_id %hu\n",
1715 tid, peer_id);
1716
1717 spin_lock_bh(&ar->data_lock);
1718 peer = ath10k_peer_find_by_id(ar, peer_id);
1719 if (!peer) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001720 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001721 peer_id);
1722 spin_unlock_bh(&ar->data_lock);
1723 return;
1724 }
1725
1726 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1727 if (!arvif) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001728 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001729 peer->vdev_id);
1730 spin_unlock_bh(&ar->data_lock);
1731 return;
1732 }
1733
Michal Kazior7aa7a722014-08-25 12:09:38 +02001734 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001735 "htt rx stop rx ba session sta %pM tid %hu\n",
1736 peer->addr, tid);
1737
1738 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1739 spin_unlock_bh(&ar->data_lock);
1740}
1741
Michal Kaziorc5450702015-01-24 12:14:48 +02001742static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1743 struct sk_buff_head *amsdu)
1744{
1745 struct sk_buff *msdu;
1746 struct htt_rx_desc *rxd;
1747
1748 if (skb_queue_empty(list))
1749 return -ENOBUFS;
1750
1751 if (WARN_ON(!skb_queue_empty(amsdu)))
1752 return -EINVAL;
1753
1754 while ((msdu = __skb_dequeue(list))) {
1755 __skb_queue_tail(amsdu, msdu);
1756
1757 rxd = (void *)msdu->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001758 if (rxd->msdu_end.common.info0 &
Michal Kaziorc5450702015-01-24 12:14:48 +02001759 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1760 break;
1761 }
1762
1763 msdu = skb_peek_tail(amsdu);
1764 rxd = (void *)msdu->data - sizeof(*rxd);
Peter Oh1f5dbfb2015-07-15 19:01:21 -07001765 if (!(rxd->msdu_end.common.info0 &
Michal Kaziorc5450702015-01-24 12:14:48 +02001766 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1767 skb_queue_splice_init(amsdu, list);
1768 return -EAGAIN;
1769 }
1770
1771 return 0;
1772}
1773
1774static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1775 struct sk_buff *skb)
1776{
1777 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1778
1779 if (!ieee80211_has_protected(hdr->frame_control))
1780 return;
1781
1782 /* Offloaded frames are already decrypted but firmware insists they are
1783 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1784 * will drop the frame.
1785 */
1786
1787 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1788 status->flag |= RX_FLAG_DECRYPTED |
1789 RX_FLAG_IV_STRIPPED |
1790 RX_FLAG_MMIC_STRIPPED;
1791}
1792
1793static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1794 struct sk_buff_head *list)
1795{
1796 struct ath10k_htt *htt = &ar->htt;
1797 struct ieee80211_rx_status *status = &htt->rx_status;
1798 struct htt_rx_offload_msdu *rx;
1799 struct sk_buff *msdu;
1800 size_t offset;
1801
1802 while ((msdu = __skb_dequeue(list))) {
1803 /* Offloaded frames don't have Rx descriptor. Instead they have
1804 * a short meta information header.
1805 */
1806
1807 rx = (void *)msdu->data;
1808
1809 skb_put(msdu, sizeof(*rx));
1810 skb_pull(msdu, sizeof(*rx));
1811
1812 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1813 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1814 dev_kfree_skb_any(msdu);
1815 continue;
1816 }
1817
1818 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1819
1820 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1821 * actual payload is unaligned. Align the frame. Otherwise
1822 * mac80211 complains. This shouldn't reduce performance much
1823 * because these offloaded frames are rare.
1824 */
1825 offset = 4 - ((unsigned long)msdu->data & 3);
1826 skb_put(msdu, offset);
1827 memmove(msdu->data + offset, msdu->data, msdu->len);
1828 skb_pull(msdu, offset);
1829
1830 /* FIXME: The frame is NWifi. Re-construct QoS Control
1831 * if possible later.
1832 */
1833
1834 memset(status, 0, sizeof(*status));
1835 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1836
1837 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
Michal Kazior500ff9f2015-03-31 10:26:21 +00001838 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
Michal Kaziorc5450702015-01-24 12:14:48 +02001839 ath10k_process_rx(ar, status, msdu);
1840 }
1841}
1842
1843static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1844{
1845 struct ath10k_htt *htt = &ar->htt;
1846 struct htt_resp *resp = (void *)skb->data;
1847 struct ieee80211_rx_status *status = &htt->rx_status;
1848 struct sk_buff_head list;
1849 struct sk_buff_head amsdu;
1850 u16 peer_id;
1851 u16 msdu_count;
1852 u8 vdev_id;
1853 u8 tid;
1854 bool offload;
1855 bool frag;
1856 int ret;
1857
1858 lockdep_assert_held(&htt->rx_ring.lock);
1859
1860 if (htt->rx_confused)
1861 return;
1862
1863 skb_pull(skb, sizeof(resp->hdr));
1864 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1865
1866 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1867 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1868 vdev_id = resp->rx_in_ord_ind.vdev_id;
1869 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1870 offload = !!(resp->rx_in_ord_ind.info &
1871 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1872 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1873
1874 ath10k_dbg(ar, ATH10K_DBG_HTT,
1875 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1876 vdev_id, peer_id, tid, offload, frag, msdu_count);
1877
1878 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1879 ath10k_warn(ar, "dropping invalid in order rx indication\n");
1880 return;
1881 }
1882
1883 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1884 * extracted and processed.
1885 */
1886 __skb_queue_head_init(&list);
1887 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1888 if (ret < 0) {
1889 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1890 htt->rx_confused = true;
1891 return;
1892 }
1893
1894 /* Offloaded frames are very different and need to be handled
1895 * separately.
1896 */
1897 if (offload)
1898 ath10k_htt_rx_h_rx_offload(ar, &list);
1899
1900 while (!skb_queue_empty(&list)) {
1901 __skb_queue_head_init(&amsdu);
1902 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1903 switch (ret) {
1904 case 0:
1905 /* Note: The in-order indication may report interleaved
1906 * frames from different PPDUs meaning reported rx rate
1907 * to mac80211 isn't accurate/reliable. It's still
1908 * better to report something than nothing though. This
1909 * should still give an idea about rx rate to the user.
1910 */
Michal Kazior500ff9f2015-03-31 10:26:21 +00001911 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
Michal Kaziorc5450702015-01-24 12:14:48 +02001912 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1913 ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1914 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1915 break;
1916 case -EAGAIN:
1917 /* fall through */
1918 default:
1919 /* Should not happen. */
1920 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1921 htt->rx_confused = true;
1922 __skb_queue_purge(&list);
1923 return;
1924 }
1925 }
1926
1927 tasklet_schedule(&htt->rx_replenish_task);
1928}
1929
Kalle Valo5e3dd152013-06-12 20:52:10 +03001930void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1931{
Michal Kazioredb82362013-07-05 16:15:14 +03001932 struct ath10k_htt *htt = &ar->htt;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001933 struct htt_resp *resp = (struct htt_resp *)skb->data;
Rajkumar Manoharan8348db22015-03-25 13:12:27 +02001934 enum htt_t2h_msg_type type;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001935
1936 /* confirm alignment */
1937 if (!IS_ALIGNED((unsigned long)skb->data, 4))
Michal Kazior7aa7a722014-08-25 12:09:38 +02001938 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001939
Michal Kazior7aa7a722014-08-25 12:09:38 +02001940 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001941 resp->hdr.msg_type);
Rajkumar Manoharan8348db22015-03-25 13:12:27 +02001942
1943 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
1944 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
1945 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
1946 dev_kfree_skb_any(skb);
1947 return;
1948 }
1949 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
1950
1951 switch (type) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001952 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1953 htt->target_version_major = resp->ver_resp.major;
1954 htt->target_version_minor = resp->ver_resp.minor;
1955 complete(&htt->target_version_received);
1956 break;
1957 }
Michal Kazior6c5151a2014-02-27 18:50:04 +02001958 case HTT_T2H_MSG_TYPE_RX_IND:
Michal Kazior45967082014-02-27 18:50:05 +02001959 spin_lock_bh(&htt->rx_ring.lock);
1960 __skb_queue_tail(&htt->rx_compl_q, skb);
1961 spin_unlock_bh(&htt->rx_ring.lock);
Michal Kazior6c5151a2014-02-27 18:50:04 +02001962 tasklet_schedule(&htt->txrx_compl_task);
1963 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001964 case HTT_T2H_MSG_TYPE_PEER_MAP: {
1965 struct htt_peer_map_event ev = {
1966 .vdev_id = resp->peer_map.vdev_id,
1967 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1968 };
1969 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1970 ath10k_peer_map_event(htt, &ev);
1971 break;
1972 }
1973 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1974 struct htt_peer_unmap_event ev = {
1975 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1976 };
1977 ath10k_peer_unmap_event(htt, &ev);
1978 break;
1979 }
1980 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1981 struct htt_tx_done tx_done = {};
1982 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1983
1984 tx_done.msdu_id =
1985 __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1986
1987 switch (status) {
1988 case HTT_MGMT_TX_STATUS_OK:
Sujith Manoharan55314fc2015-04-01 22:53:21 +03001989 tx_done.success = true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001990 break;
1991 case HTT_MGMT_TX_STATUS_RETRY:
1992 tx_done.no_ack = true;
1993 break;
1994 case HTT_MGMT_TX_STATUS_DROP:
1995 tx_done.discard = true;
1996 break;
1997 }
1998
Michal Kazior0a89f8a2013-09-18 14:43:20 +02001999 ath10k_txrx_tx_unref(htt, &tx_done);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002000 break;
2001 }
Michal Kazior6c5151a2014-02-27 18:50:04 +02002002 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
Qi Zhou005fb162015-07-22 16:38:24 -04002003 skb_queue_tail(&htt->tx_compl_q, skb);
Michal Kazior6c5151a2014-02-27 18:50:04 +02002004 tasklet_schedule(&htt->txrx_compl_task);
2005 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002006 case HTT_T2H_MSG_TYPE_SEC_IND: {
2007 struct ath10k *ar = htt->ar;
2008 struct htt_security_indication *ev = &resp->security_indication;
2009
Michal Kazior7aa7a722014-08-25 12:09:38 +02002010 ath10k_dbg(ar, ATH10K_DBG_HTT,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002011 "sec ind peer_id %d unicast %d type %d\n",
2012 __le16_to_cpu(ev->peer_id),
2013 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2014 MS(ev->flags, HTT_SECURITY_TYPE));
2015 complete(&ar->install_key_done);
2016 break;
2017 }
2018 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002019 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002020 skb->data, skb->len);
2021 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
2022 break;
2023 }
2024 case HTT_T2H_MSG_TYPE_TEST:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002025 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002026 case HTT_T2H_MSG_TYPE_STATS_CONF:
Michal Kaziord35a6c12014-09-02 11:00:21 +03002027 trace_ath10k_htt_stats(ar, skb->data, skb->len);
Kalle Valoa9bf0502013-09-03 11:43:55 +03002028 break;
2029 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
Michal Kazior708b9bd2014-07-21 20:52:59 +03002030 /* Firmware can return tx frames if it's unable to fully
2031 * process them and suspects host may be able to fix it. ath10k
2032 * sends all tx frames as already inspected so this shouldn't
2033 * happen unless fw has a bug.
2034 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02002035 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
Michal Kazior708b9bd2014-07-21 20:52:59 +03002036 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002037 case HTT_T2H_MSG_TYPE_RX_ADDBA:
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02002038 ath10k_htt_rx_addba(ar, resp);
2039 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002040 case HTT_T2H_MSG_TYPE_RX_DELBA:
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02002041 ath10k_htt_rx_delba(ar, resp);
2042 break;
Rajkumar Manoharanbfdd7932014-10-03 08:02:40 +03002043 case HTT_T2H_MSG_TYPE_PKTLOG: {
2044 struct ath10k_pktlog_hdr *hdr =
2045 (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
2046
2047 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2048 sizeof(*hdr) +
2049 __le16_to_cpu(hdr->size));
2050 break;
2051 }
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02002052 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2053 /* Ignore this event because mac80211 takes care of Rx
2054 * aggregation reordering.
2055 */
2056 break;
2057 }
Michal Kaziorc5450702015-01-24 12:14:48 +02002058 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2059 spin_lock_bh(&htt->rx_ring.lock);
2060 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2061 spin_unlock_bh(&htt->rx_ring.lock);
2062 tasklet_schedule(&htt->txrx_compl_task);
2063 return;
2064 }
2065 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
Rajkumar Manoharan8348db22015-03-25 13:12:27 +02002066 break;
2067 case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
Michal Kaziorc5450702015-01-24 12:14:48 +02002068 break;
Raja Mani721ad3c2015-06-22 20:22:24 +05302069 case HTT_T2H_MSG_TYPE_EN_STATS:
2070 case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
2071 case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
2072 case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002073 default:
Michal Kazior2358a542014-10-02 13:32:55 +02002074 ath10k_warn(ar, "htt event (%d) not handled\n",
2075 resp->hdr.msg_type);
Michal Kazior7aa7a722014-08-25 12:09:38 +02002076 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002077 skb->data, skb->len);
2078 break;
2079 };
2080
2081 /* Free the indication buffer */
2082 dev_kfree_skb_any(skb);
2083}
Michal Kazior6c5151a2014-02-27 18:50:04 +02002084
2085static void ath10k_htt_txrx_compl_task(unsigned long ptr)
2086{
2087 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
Michal Kaziorc5450702015-01-24 12:14:48 +02002088 struct ath10k *ar = htt->ar;
Michal Kazior6c5151a2014-02-27 18:50:04 +02002089 struct htt_resp *resp;
2090 struct sk_buff *skb;
2091
Qi Zhou005fb162015-07-22 16:38:24 -04002092 while ((skb = skb_dequeue(&htt->tx_compl_q))) {
Michal Kazior6c5151a2014-02-27 18:50:04 +02002093 ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
2094 dev_kfree_skb_any(skb);
2095 }
2096
Michal Kazior45967082014-02-27 18:50:05 +02002097 spin_lock_bh(&htt->rx_ring.lock);
2098 while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
Michal Kazior6c5151a2014-02-27 18:50:04 +02002099 resp = (struct htt_resp *)skb->data;
2100 ath10k_htt_rx_handler(htt, &resp->rx_ind);
2101 dev_kfree_skb_any(skb);
2102 }
Michal Kaziorc5450702015-01-24 12:14:48 +02002103
2104 while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
2105 ath10k_htt_rx_in_ord_ind(ar, skb);
2106 dev_kfree_skb_any(skb);
2107 }
Michal Kazior45967082014-02-27 18:50:05 +02002108 spin_unlock_bh(&htt->rx_ring.lock);
Michal Kazior6c5151a2014-02-27 18:50:04 +02002109}