blob: 85910b2a2373bc85e65d6ad24db3d1d53a26e6fe [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
Michal Kazioredb82362013-07-05 16:15:14 +030018#include "core.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030019#include "htc.h"
20#include "htt.h"
21#include "txrx.h"
22#include "debug.h"
Kalle Valoa9bf0502013-09-03 11:43:55 +030023#include "trace.h"
Michal Kazioraa5b4fb2014-07-23 12:20:33 +020024#include "mac.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030025
26#include <linux/log2.h>
27
28/* slightly larger than one large A-MPDU */
29#define HTT_RX_RING_SIZE_MIN 128
30
31/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
32#define HTT_RX_RING_SIZE_MAX 2048
33
34#define HTT_RX_AVG_FRM_BYTES 1000
35
36/* ms, very conservative */
37#define HTT_RX_HOST_LATENCY_MAX_MS 20
38
39/* ms, conservative */
40#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
41
42/* when under memory pressure rx ring refill may fail and needs a retry */
43#define HTT_RX_RING_REFILL_RETRY_MS 50
44
Michal Kaziorf6dc2092013-09-26 10:12:22 +030045static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
Michal Kazior6c5151a2014-02-27 18:50:04 +020046static void ath10k_htt_txrx_compl_task(unsigned long ptr);
Michal Kaziorf6dc2092013-09-26 10:12:22 +030047
Kalle Valo5e3dd152013-06-12 20:52:10 +030048static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
49{
50 int size;
51
52 /*
53 * It is expected that the host CPU will typically be able to
54 * service the rx indication from one A-MPDU before the rx
55 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
56 * later. However, the rx ring should be sized very conservatively,
57 * to accomodate the worst reasonable delay before the host CPU
58 * services a rx indication interrupt.
59 *
60 * The rx ring need not be kept full of empty buffers. In theory,
61 * the htt host SW can dynamically track the low-water mark in the
62 * rx ring, and dynamically adjust the level to which the rx ring
63 * is filled with empty buffers, to dynamically meet the desired
64 * low-water mark.
65 *
66 * In contrast, it's difficult to resize the rx ring itself, once
67 * it's in use. Thus, the ring itself should be sized very
68 * conservatively, while the degree to which the ring is filled
69 * with empty buffers should be sized moderately conservatively.
70 */
71
72 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
73 size =
74 htt->max_throughput_mbps +
75 1000 /
76 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
77
78 if (size < HTT_RX_RING_SIZE_MIN)
79 size = HTT_RX_RING_SIZE_MIN;
80
81 if (size > HTT_RX_RING_SIZE_MAX)
82 size = HTT_RX_RING_SIZE_MAX;
83
84 size = roundup_pow_of_two(size);
85
86 return size;
87}
88
89static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
90{
91 int size;
92
93 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
94 size =
95 htt->max_throughput_mbps *
96 1000 /
97 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
98
99 /*
100 * Make sure the fill level is at least 1 less than the ring size.
101 * Leaving 1 element empty allows the SW to easily distinguish
102 * between a full ring vs. an empty ring.
103 */
104 if (size >= htt->rx_ring.size)
105 size = htt->rx_ring.size - 1;
106
107 return size;
108}
109
110static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
111{
112 struct sk_buff *skb;
113 struct ath10k_skb_cb *cb;
114 int i;
115
116 for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
117 skb = htt->rx_ring.netbufs_ring[i];
118 cb = ATH10K_SKB_CB(skb);
119 dma_unmap_single(htt->ar->dev, cb->paddr,
120 skb->len + skb_tailroom(skb),
121 DMA_FROM_DEVICE);
122 dev_kfree_skb_any(skb);
123 }
124
125 htt->rx_ring.fill_cnt = 0;
126}
127
128static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
129{
130 struct htt_rx_desc *rx_desc;
131 struct sk_buff *skb;
132 dma_addr_t paddr;
133 int ret = 0, idx;
134
Kalle Valo8cc7f262014-09-14 12:50:39 +0300135 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300136 while (num > 0) {
137 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
138 if (!skb) {
139 ret = -ENOMEM;
140 goto fail;
141 }
142
143 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
144 skb_pull(skb,
145 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
146 skb->data);
147
148 /* Clear rx_desc attention word before posting to Rx ring */
149 rx_desc = (struct htt_rx_desc *)skb->data;
150 rx_desc->attention.flags = __cpu_to_le32(0);
151
152 paddr = dma_map_single(htt->ar->dev, skb->data,
153 skb->len + skb_tailroom(skb),
154 DMA_FROM_DEVICE);
155
156 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
157 dev_kfree_skb_any(skb);
158 ret = -ENOMEM;
159 goto fail;
160 }
161
162 ATH10K_SKB_CB(skb)->paddr = paddr;
163 htt->rx_ring.netbufs_ring[idx] = skb;
164 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
165 htt->rx_ring.fill_cnt++;
166
167 num--;
168 idx++;
169 idx &= htt->rx_ring.size_mask;
170 }
171
172fail:
Kalle Valo8cc7f262014-09-14 12:50:39 +0300173 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300174 return ret;
175}
176
177static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
178{
179 lockdep_assert_held(&htt->rx_ring.lock);
180 return __ath10k_htt_rx_ring_fill_n(htt, num);
181}
182
183static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
184{
Michal Kazior6e712d42013-09-24 10:18:36 +0200185 int ret, num_deficit, num_to_fill;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300186
Michal Kazior6e712d42013-09-24 10:18:36 +0200187 /* Refilling the whole RX ring buffer proves to be a bad idea. The
188 * reason is RX may take up significant amount of CPU cycles and starve
189 * other tasks, e.g. TX on an ethernet device while acting as a bridge
190 * with ath10k wlan interface. This ended up with very poor performance
191 * once CPU the host system was overwhelmed with RX on ath10k.
192 *
193 * By limiting the number of refills the replenishing occurs
194 * progressively. This in turns makes use of the fact tasklets are
195 * processed in FIFO order. This means actual RX processing can starve
196 * out refilling. If there's not enough buffers on RX ring FW will not
197 * report RX until it is refilled with enough buffers. This
198 * automatically balances load wrt to CPU power.
199 *
200 * This probably comes at a cost of lower maximum throughput but
201 * improves the avarage and stability. */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300202 spin_lock_bh(&htt->rx_ring.lock);
Michal Kazior6e712d42013-09-24 10:18:36 +0200203 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
204 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
205 num_deficit -= num_to_fill;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300206 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
207 if (ret == -ENOMEM) {
208 /*
209 * Failed to fill it to the desired level -
210 * we'll start a timer and try again next time.
211 * As long as enough buffers are left in the ring for
212 * another A-MPDU rx, no special recovery is needed.
213 */
214 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
215 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
Michal Kazior6e712d42013-09-24 10:18:36 +0200216 } else if (num_deficit > 0) {
217 tasklet_schedule(&htt->rx_replenish_task);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300218 }
219 spin_unlock_bh(&htt->rx_ring.lock);
220}
221
222static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
223{
224 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
Kalle Valoaf762c02014-09-14 12:50:17 +0300225
Kalle Valo5e3dd152013-06-12 20:52:10 +0300226 ath10k_htt_rx_msdu_buff_replenish(htt);
227}
228
Michal Kazior3e841fd2014-05-14 16:23:31 +0300229static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
230{
231 struct sk_buff *skb;
232 int i;
233
234 for (i = 0; i < htt->rx_ring.size; i++) {
235 skb = htt->rx_ring.netbufs_ring[i];
236 if (!skb)
237 continue;
238
239 dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
240 skb->len + skb_tailroom(skb),
241 DMA_FROM_DEVICE);
242 dev_kfree_skb_any(skb);
243 htt->rx_ring.netbufs_ring[i] = NULL;
244 }
245}
246
Michal Kazior95bf21f2014-05-16 17:15:39 +0300247void ath10k_htt_rx_free(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300248{
Kalle Valo5e3dd152013-06-12 20:52:10 +0300249 del_timer_sync(&htt->rx_ring.refill_retry_timer);
Michal Kazior6e712d42013-09-24 10:18:36 +0200250 tasklet_kill(&htt->rx_replenish_task);
Michal Kazior6c5151a2014-02-27 18:50:04 +0200251 tasklet_kill(&htt->txrx_compl_task);
252
253 skb_queue_purge(&htt->tx_compl_q);
254 skb_queue_purge(&htt->rx_compl_q);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300255
Michal Kazior3e841fd2014-05-14 16:23:31 +0300256 ath10k_htt_rx_ring_clean_up(htt);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300257
258 dma_free_coherent(htt->ar->dev,
259 (htt->rx_ring.size *
260 sizeof(htt->rx_ring.paddrs_ring)),
261 htt->rx_ring.paddrs_ring,
262 htt->rx_ring.base_paddr);
263
264 dma_free_coherent(htt->ar->dev,
265 sizeof(*htt->rx_ring.alloc_idx.vaddr),
266 htt->rx_ring.alloc_idx.vaddr,
267 htt->rx_ring.alloc_idx.paddr);
268
269 kfree(htt->rx_ring.netbufs_ring);
270}
271
272static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
273{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200274 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300275 int idx;
276 struct sk_buff *msdu;
277
Michal Kazior45967082014-02-27 18:50:05 +0200278 lockdep_assert_held(&htt->rx_ring.lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300279
Michal Kazior8d60ee82014-02-27 18:50:05 +0200280 if (htt->rx_ring.fill_cnt == 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200281 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
Michal Kazior8d60ee82014-02-27 18:50:05 +0200282 return NULL;
283 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300284
285 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
286 msdu = htt->rx_ring.netbufs_ring[idx];
Michal Kazior3e841fd2014-05-14 16:23:31 +0300287 htt->rx_ring.netbufs_ring[idx] = NULL;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300288
289 idx++;
290 idx &= htt->rx_ring.size_mask;
291 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
292 htt->rx_ring.fill_cnt--;
293
Michal Kazior4de02802014-10-23 17:04:23 +0300294 dma_unmap_single(htt->ar->dev,
295 ATH10K_SKB_CB(msdu)->paddr,
296 msdu->len + skb_tailroom(msdu),
297 DMA_FROM_DEVICE);
298 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
299 msdu->data, msdu->len + skb_tailroom(msdu));
Michal Kazior4de02802014-10-23 17:04:23 +0300300
Kalle Valo5e3dd152013-06-12 20:52:10 +0300301 return msdu;
302}
303
Janusz Dziedzicd84dd602014-03-24 21:23:20 +0100304/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300305static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
306 u8 **fw_desc, int *fw_desc_len,
Michal Kaziorf0e27702014-11-18 09:24:49 +0200307 struct sk_buff_head *amsdu)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300308{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200309 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300310 int msdu_len, msdu_chaining = 0;
Michal Kazior9aa505d2014-11-18 09:24:47 +0200311 struct sk_buff *msdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300312 struct htt_rx_desc *rx_desc;
313
Michal Kazior45967082014-02-27 18:50:05 +0200314 lockdep_assert_held(&htt->rx_ring.lock);
315
Michal Kazior9aa505d2014-11-18 09:24:47 +0200316 for (;;) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300317 int last_msdu, msdu_len_invalid, msdu_chained;
318
Michal Kazior9aa505d2014-11-18 09:24:47 +0200319 msdu = ath10k_htt_rx_netbuf_pop(htt);
320 if (!msdu) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200321 __skb_queue_purge(amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +0200322 return -ENOENT;
Michal Kazior9aa505d2014-11-18 09:24:47 +0200323 }
324
325 __skb_queue_tail(amsdu, msdu);
326
Kalle Valo5e3dd152013-06-12 20:52:10 +0300327 rx_desc = (struct htt_rx_desc *)msdu->data;
328
329 /* FIXME: we must report msdu payload since this is what caller
330 * expects now */
331 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
332 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
333
334 /*
335 * Sanity check - confirm the HW is finished filling in the
336 * rx data.
337 * If the HW and SW are working correctly, then it's guaranteed
338 * that the HW's MAC DMA is done before this point in the SW.
339 * To prevent the case that we handle a stale Rx descriptor,
340 * just assert for now until we have a way to recover.
341 */
342 if (!(__le32_to_cpu(rx_desc->attention.flags)
343 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200344 __skb_queue_purge(amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +0200345 return -EIO;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300346 }
347
348 /*
349 * Copy the FW rx descriptor for this MSDU from the rx
350 * indication message into the MSDU's netbuf. HL uses the
351 * same rx indication message definition as LL, and simply
352 * appends new info (fields from the HW rx desc, and the
353 * MSDU payload itself). So, the offset into the rx
354 * indication message only has to account for the standard
355 * offset of the per-MSDU FW rx desc info within the
356 * message, and how many bytes of the per-MSDU FW rx desc
357 * info have already been consumed. (And the endianness of
358 * the host, since for a big-endian host, the rx ind
359 * message contents, including the per-MSDU rx desc bytes,
360 * were byteswapped during upload.)
361 */
362 if (*fw_desc_len > 0) {
363 rx_desc->fw_desc.info0 = **fw_desc;
364 /*
365 * The target is expected to only provide the basic
366 * per-MSDU rx descriptors. Just to be sure, verify
367 * that the target has not attached extension data
368 * (e.g. LRO flow ID).
369 */
370
371 /* or more, if there's extension data */
372 (*fw_desc)++;
373 (*fw_desc_len)--;
374 } else {
375 /*
376 * When an oversized AMSDU happened, FW will lost
377 * some of MSDU status - in this case, the FW
378 * descriptors provided will be less than the
379 * actual MSDUs inside this MPDU. Mark the FW
380 * descriptors so that it will still deliver to
381 * upper stack, if no CRC error for this MPDU.
382 *
383 * FIX THIS - the FW descriptors are actually for
384 * MSDUs in the end of this A-MSDU instead of the
385 * beginning.
386 */
387 rx_desc->fw_desc.info0 = 0;
388 }
389
390 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
391 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
392 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
393 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
394 RX_MSDU_START_INFO0_MSDU_LENGTH);
395 msdu_chained = rx_desc->frag_info.ring2_more_count;
396
397 if (msdu_len_invalid)
398 msdu_len = 0;
399
400 skb_trim(msdu, 0);
401 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
402 msdu_len -= msdu->len;
403
Michal Kazior9aa505d2014-11-18 09:24:47 +0200404 /* Note: Chained buffers do not contain rx descriptor */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300405 while (msdu_chained--) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200406 msdu = ath10k_htt_rx_netbuf_pop(htt);
407 if (!msdu) {
Michal Kazior9aa505d2014-11-18 09:24:47 +0200408 __skb_queue_purge(amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +0200409 return -ENOENT;
Michal Kaziorb30595a2014-10-23 17:04:24 +0300410 }
411
Michal Kazior9aa505d2014-11-18 09:24:47 +0200412 __skb_queue_tail(amsdu, msdu);
413 skb_trim(msdu, 0);
414 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
415 msdu_len -= msdu->len;
Michal Kaziorede9c8e2014-05-14 16:23:31 +0300416 msdu_chaining = 1;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300417 }
418
Kalle Valo5e3dd152013-06-12 20:52:10 +0300419 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
420 RX_MSDU_END_INFO0_LAST_MSDU;
421
Michal Kaziorb04e2042014-10-23 17:04:27 +0300422 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
Rajkumar Manoharana0883cf2014-10-03 08:02:47 +0300423 sizeof(*rx_desc) - sizeof(u32));
Michal Kazior9aa505d2014-11-18 09:24:47 +0200424
425 if (last_msdu)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300426 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300427 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300428
Michal Kazior9aa505d2014-11-18 09:24:47 +0200429 if (skb_queue_empty(amsdu))
Janusz Dziedzicd84dd602014-03-24 21:23:20 +0100430 msdu_chaining = -1;
431
Kalle Valo5e3dd152013-06-12 20:52:10 +0300432 /*
433 * Don't refill the ring yet.
434 *
435 * First, the elements popped here are still in use - it is not
436 * safe to overwrite them until the matching call to
437 * mpdu_desc_list_next. Second, for efficiency it is preferable to
438 * refill the rx ring with 1 PPDU's worth of rx buffers (something
439 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
440 * (something like 3 buffers). Consequently, we'll rely on the txrx
441 * SW to tell us when it is done pulling all the PPDU's rx buffers
442 * out of the rx ring, and then refill it just once.
443 */
444
445 return msdu_chaining;
446}
447
Michal Kazior6e712d42013-09-24 10:18:36 +0200448static void ath10k_htt_rx_replenish_task(unsigned long ptr)
449{
450 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
Kalle Valoaf762c02014-09-14 12:50:17 +0300451
Michal Kazior6e712d42013-09-24 10:18:36 +0200452 ath10k_htt_rx_msdu_buff_replenish(htt);
453}
454
Michal Kazior95bf21f2014-05-16 17:15:39 +0300455int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300456{
Michal Kazior7aa7a722014-08-25 12:09:38 +0200457 struct ath10k *ar = htt->ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300458 dma_addr_t paddr;
459 void *vaddr;
Kalle Valobd8bdbb2014-09-14 12:50:00 +0300460 size_t size;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300461 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
462
Michal Kazior51fc7d72014-10-23 17:04:24 +0300463 htt->rx_confused = false;
464
Kalle Valo5e3dd152013-06-12 20:52:10 +0300465 htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
466 if (!is_power_of_2(htt->rx_ring.size)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200467 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300468 return -EINVAL;
469 }
470
471 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
472
473 /*
474 * Set the initial value for the level to which the rx ring
475 * should be filled, based on the max throughput and the
476 * worst likely latency for the host to fill the rx ring
477 * with new buffers. In theory, this fill level can be
478 * dynamically adjusted from the initial value set here, to
479 * reflect the actual host latency rather than a
480 * conservative assumption about the host latency.
481 */
482 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
483
484 htt->rx_ring.netbufs_ring =
Michal Kazior3e841fd2014-05-14 16:23:31 +0300485 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
Kalle Valo5e3dd152013-06-12 20:52:10 +0300486 GFP_KERNEL);
487 if (!htt->rx_ring.netbufs_ring)
488 goto err_netbuf;
489
Kalle Valobd8bdbb2014-09-14 12:50:00 +0300490 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
491
492 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300493 if (!vaddr)
494 goto err_dma_ring;
495
496 htt->rx_ring.paddrs_ring = vaddr;
497 htt->rx_ring.base_paddr = paddr;
498
499 vaddr = dma_alloc_coherent(htt->ar->dev,
500 sizeof(*htt->rx_ring.alloc_idx.vaddr),
501 &paddr, GFP_DMA);
502 if (!vaddr)
503 goto err_dma_idx;
504
505 htt->rx_ring.alloc_idx.vaddr = vaddr;
506 htt->rx_ring.alloc_idx.paddr = paddr;
507 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
508 *htt->rx_ring.alloc_idx.vaddr = 0;
509
510 /* Initialize the Rx refill retry timer */
511 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
512
513 spin_lock_init(&htt->rx_ring.lock);
514
515 htt->rx_ring.fill_cnt = 0;
516 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
517 goto err_fill_ring;
518
Michal Kazior6e712d42013-09-24 10:18:36 +0200519 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
520 (unsigned long)htt);
521
Michal Kazior6c5151a2014-02-27 18:50:04 +0200522 skb_queue_head_init(&htt->tx_compl_q);
523 skb_queue_head_init(&htt->rx_compl_q);
524
525 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
526 (unsigned long)htt);
527
Michal Kazior7aa7a722014-08-25 12:09:38 +0200528 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300529 htt->rx_ring.size, htt->rx_ring.fill_level);
530 return 0;
531
532err_fill_ring:
533 ath10k_htt_rx_ring_free(htt);
534 dma_free_coherent(htt->ar->dev,
535 sizeof(*htt->rx_ring.alloc_idx.vaddr),
536 htt->rx_ring.alloc_idx.vaddr,
537 htt->rx_ring.alloc_idx.paddr);
538err_dma_idx:
539 dma_free_coherent(htt->ar->dev,
540 (htt->rx_ring.size *
541 sizeof(htt->rx_ring.paddrs_ring)),
542 htt->rx_ring.paddrs_ring,
543 htt->rx_ring.base_paddr);
544err_dma_ring:
545 kfree(htt->rx_ring.netbufs_ring);
546err_netbuf:
547 return -ENOMEM;
548}
549
Michal Kazior7aa7a722014-08-25 12:09:38 +0200550static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
551 enum htt_rx_mpdu_encrypt_type type)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300552{
553 switch (type) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300554 case HTT_RX_MPDU_ENCRYPT_NONE:
555 return 0;
Michal Kazior890d3b22014-10-23 17:04:22 +0300556 case HTT_RX_MPDU_ENCRYPT_WEP40:
557 case HTT_RX_MPDU_ENCRYPT_WEP104:
558 return IEEE80211_WEP_IV_LEN;
559 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
560 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
561 return IEEE80211_TKIP_IV_LEN;
562 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
563 return IEEE80211_CCMP_HDR_LEN;
564 case HTT_RX_MPDU_ENCRYPT_WEP128:
565 case HTT_RX_MPDU_ENCRYPT_WAPI:
566 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300567 }
568
Michal Kazior890d3b22014-10-23 17:04:22 +0300569 ath10k_warn(ar, "unsupported encryption type %d\n", type);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300570 return 0;
571}
572
Michal Kazior890d3b22014-10-23 17:04:22 +0300573#define MICHAEL_MIC_LEN 8
574
Michal Kazior7aa7a722014-08-25 12:09:38 +0200575static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
576 enum htt_rx_mpdu_encrypt_type type)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300577{
578 switch (type) {
579 case HTT_RX_MPDU_ENCRYPT_NONE:
Michal Kazior890d3b22014-10-23 17:04:22 +0300580 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300581 case HTT_RX_MPDU_ENCRYPT_WEP40:
582 case HTT_RX_MPDU_ENCRYPT_WEP104:
Michal Kazior890d3b22014-10-23 17:04:22 +0300583 return IEEE80211_WEP_ICV_LEN;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300584 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
585 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
Michal Kazior890d3b22014-10-23 17:04:22 +0300586 return IEEE80211_TKIP_ICV_LEN;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300587 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
Michal Kazior890d3b22014-10-23 17:04:22 +0300588 return IEEE80211_CCMP_MIC_LEN;
589 case HTT_RX_MPDU_ENCRYPT_WEP128:
590 case HTT_RX_MPDU_ENCRYPT_WAPI:
591 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300592 }
593
Michal Kazior890d3b22014-10-23 17:04:22 +0300594 ath10k_warn(ar, "unsupported encryption type %d\n", type);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300595 return 0;
596}
597
Michal Kaziorf6dc2092013-09-26 10:12:22 +0300598struct rfc1042_hdr {
599 u8 llc_dsap;
600 u8 llc_ssap;
601 u8 llc_ctrl;
602 u8 snap_oui[3];
603 __be16 snap_type;
604} __packed;
605
606struct amsdu_subframe_hdr {
607 u8 dst[ETH_ALEN];
608 u8 src[ETH_ALEN];
609 __be16 len;
610} __packed;
611
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100612static const u8 rx_legacy_rate_idx[] = {
613 3, /* 0x00 - 11Mbps */
614 2, /* 0x01 - 5.5Mbps */
615 1, /* 0x02 - 2Mbps */
616 0, /* 0x03 - 1Mbps */
617 3, /* 0x04 - 11Mbps */
618 2, /* 0x05 - 5.5Mbps */
619 1, /* 0x06 - 2Mbps */
620 0, /* 0x07 - 1Mbps */
621 10, /* 0x08 - 48Mbps */
622 8, /* 0x09 - 24Mbps */
623 6, /* 0x0A - 12Mbps */
624 4, /* 0x0B - 6Mbps */
625 11, /* 0x0C - 54Mbps */
626 9, /* 0x0D - 36Mbps */
627 7, /* 0x0E - 18Mbps */
628 5, /* 0x0F - 9Mbps */
629};
630
Janusz Dziedzic87326c92014-03-24 21:23:19 +0100631static void ath10k_htt_rx_h_rates(struct ath10k *ar,
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200632 struct ieee80211_rx_status *status,
633 struct htt_rx_desc *rxd)
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100634{
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200635 enum ieee80211_band band;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100636 u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100637 u8 preamble = 0;
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200638 u32 info1, info2, info3;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100639
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200640 /* Band value can't be set as undefined but freq can be 0 - use that to
641 * determine whether band is provided.
642 *
643 * FIXME: Perhaps this can go away if CCK rate reporting is a little
644 * reworked?
645 */
646 if (!status->freq)
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100647 return;
648
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200649 band = status->band;
650 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
651 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
652 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
653
654 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100655
656 switch (preamble) {
657 case HTT_RX_LEGACY:
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200658 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
659 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100660 rate_idx = 0;
661
662 if (rate < 0x08 || rate > 0x0F)
663 break;
664
665 switch (band) {
666 case IEEE80211_BAND_2GHZ:
667 if (cck)
668 rate &= ~BIT(3);
669 rate_idx = rx_legacy_rate_idx[rate];
670 break;
671 case IEEE80211_BAND_5GHZ:
672 rate_idx = rx_legacy_rate_idx[rate];
673 /* We are using same rate table registering
674 HW - ath10k_rates[]. In case of 5GHz skip
675 CCK rates, so -4 here */
676 rate_idx -= 4;
677 break;
678 default:
679 break;
680 }
681
682 status->rate_idx = rate_idx;
683 break;
684 case HTT_RX_HT:
685 case HTT_RX_HT_WITH_TXBF:
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200686 /* HT-SIG - Table 20-11 in info2 and info3 */
687 mcs = info2 & 0x1F;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100688 nss = mcs >> 3;
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200689 bw = (info2 >> 7) & 1;
690 sgi = (info3 >> 7) & 1;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100691
692 status->rate_idx = mcs;
693 status->flag |= RX_FLAG_HT;
694 if (sgi)
695 status->flag |= RX_FLAG_SHORT_GI;
696 if (bw)
697 status->flag |= RX_FLAG_40MHZ;
698 break;
699 case HTT_RX_VHT:
700 case HTT_RX_VHT_WITH_TXBF:
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200701 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100702 TODO check this */
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200703 mcs = (info3 >> 4) & 0x0F;
704 nss = ((info2 >> 10) & 0x07) + 1;
705 bw = info2 & 3;
706 sgi = info3 & 1;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100707
708 status->rate_idx = mcs;
709 status->vht_nss = nss;
710
711 if (sgi)
712 status->flag |= RX_FLAG_SHORT_GI;
713
714 switch (bw) {
715 /* 20MHZ */
716 case 0:
717 break;
718 /* 40MHZ */
719 case 1:
720 status->flag |= RX_FLAG_40MHZ;
721 break;
722 /* 80MHZ */
723 case 2:
724 status->vht_flag |= RX_VHT_FLAG_80MHZ;
725 }
726
727 status->flag |= RX_FLAG_VHT;
728 break;
729 default:
730 break;
731 }
732}
733
Janusz Dziedzic36653f052014-03-24 21:23:18 +0100734static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
735 struct ieee80211_rx_status *status)
736{
737 struct ieee80211_channel *ch;
738
739 spin_lock_bh(&ar->data_lock);
740 ch = ar->scan_channel;
741 if (!ch)
742 ch = ar->rx_channel;
743 spin_unlock_bh(&ar->data_lock);
744
745 if (!ch)
746 return false;
747
748 status->band = ch->band;
749 status->freq = ch->center_freq;
750
751 return true;
752}
753
Michal Kaziorb9fd8a82014-11-18 09:24:49 +0200754static void ath10k_htt_rx_h_signal(struct ath10k *ar,
755 struct ieee80211_rx_status *status,
756 struct htt_rx_desc *rxd)
757{
758 /* FIXME: Get real NF */
759 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
760 rxd->ppdu_start.rssi_comb;
761 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
762}
763
764static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
765 struct ieee80211_rx_status *status,
766 struct htt_rx_desc *rxd)
767{
768 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
769 * means all prior MSDUs in a PPDU are reported to mac80211 without the
770 * TSF. Is it worth holding frames until end of PPDU is known?
771 *
772 * FIXME: Can we get/compute 64bit TSF?
773 */
774 status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp);
775 status->flag |= RX_FLAG_MACTIME_END;
776}
777
778static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
779 struct sk_buff_head *amsdu,
780 struct ieee80211_rx_status *status)
781{
782 struct sk_buff *first;
783 struct htt_rx_desc *rxd;
784 bool is_first_ppdu;
785 bool is_last_ppdu;
786
787 if (skb_queue_empty(amsdu))
788 return;
789
790 first = skb_peek(amsdu);
791 rxd = (void *)first->data - sizeof(*rxd);
792
793 is_first_ppdu = !!(rxd->attention.flags &
794 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
795 is_last_ppdu = !!(rxd->attention.flags &
796 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
797
798 if (is_first_ppdu) {
799 /* New PPDU starts so clear out the old per-PPDU status. */
800 status->freq = 0;
801 status->rate_idx = 0;
802 status->vht_nss = 0;
803 status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
804 status->flag &= ~(RX_FLAG_HT |
805 RX_FLAG_VHT |
806 RX_FLAG_SHORT_GI |
807 RX_FLAG_40MHZ |
808 RX_FLAG_MACTIME_END);
809 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
810
811 ath10k_htt_rx_h_signal(ar, status, rxd);
812 ath10k_htt_rx_h_channel(ar, status);
813 ath10k_htt_rx_h_rates(ar, status, rxd);
814 }
815
816 if (is_last_ppdu)
817 ath10k_htt_rx_h_mactime(ar, status, rxd);
818}
819
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300820static const char * const tid_to_ac[] = {
821 "BE",
822 "BK",
823 "BK",
824 "BE",
825 "VI",
826 "VI",
827 "VO",
828 "VO",
829};
830
831static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
832{
833 u8 *qc;
834 int tid;
835
836 if (!ieee80211_is_data_qos(hdr->frame_control))
837 return "";
838
839 qc = ieee80211_get_qos_ctl(hdr);
840 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
841 if (tid < 8)
842 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
843 else
844 snprintf(out, size, "tid %d", tid);
845
846 return out;
847}
848
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100849static void ath10k_process_rx(struct ath10k *ar,
850 struct ieee80211_rx_status *rx_status,
851 struct sk_buff *skb)
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100852{
853 struct ieee80211_rx_status *status;
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300854 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
855 char tid[32];
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100856
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100857 status = IEEE80211_SKB_RXCB(skb);
858 *status = *rx_status;
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100859
Michal Kazior7aa7a722014-08-25 12:09:38 +0200860 ath10k_dbg(ar, ATH10K_DBG_DATA,
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300861 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100862 skb,
863 skb->len,
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300864 ieee80211_get_SA(hdr),
865 ath10k_get_tid(hdr, tid, sizeof(tid)),
866 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
867 "mcast" : "ucast",
868 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100869 status->flag == 0 ? "legacy" : "",
870 status->flag & RX_FLAG_HT ? "ht" : "",
871 status->flag & RX_FLAG_VHT ? "vht" : "",
872 status->flag & RX_FLAG_40MHZ ? "40" : "",
873 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
874 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
875 status->rate_idx,
876 status->vht_nss,
877 status->freq,
Janusz Dziedzic87326c92014-03-24 21:23:19 +0100878 status->band, status->flag,
Janusz Dziedzic78433f92014-03-24 21:23:21 +0100879 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
Janusz Dziedzic76f53292014-07-28 23:59:43 +0300880 !!(status->flag & RX_FLAG_MMIC_ERROR),
881 !!(status->flag & RX_FLAG_AMSDU_MORE));
Michal Kazior7aa7a722014-08-25 12:09:38 +0200882 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100883 skb->data, skb->len);
Rajkumar Manoharan5ce8e7f2014-11-05 19:14:31 +0530884 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
885 trace_ath10k_rx_payload(ar, skb->data, skb->len);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100886
Janusz Dziedzic85f6d7c2014-03-24 21:23:22 +0100887 ieee80211_rx(ar->hw, skb);
Janusz Dziedzic73539b42014-03-24 21:23:15 +0100888}
889
Michal Kaziord960c362014-02-25 09:29:57 +0200890static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
891{
892 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
893 return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
894}
895
Michal Kazior581c25f2014-11-18 09:24:48 +0200896static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
897 struct sk_buff *msdu,
898 struct ieee80211_rx_status *status,
899 enum htt_rx_mpdu_encrypt_type enctype,
900 bool is_decrypted)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300901{
Michal Kaziorf6dc2092013-09-26 10:12:22 +0300902 struct ieee80211_hdr *hdr;
Michal Kazior581c25f2014-11-18 09:24:48 +0200903 struct htt_rx_desc *rxd;
904 size_t hdr_len;
905 size_t crypto_len;
906 bool is_first;
907 bool is_last;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300908
Michal Kazior581c25f2014-11-18 09:24:48 +0200909 rxd = (void *)msdu->data - sizeof(*rxd);
910 is_first = !!(rxd->msdu_end.info0 &
911 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
912 is_last = !!(rxd->msdu_end.info0 &
913 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
Michal Kazior9aa505d2014-11-18 09:24:47 +0200914
Michal Kazior581c25f2014-11-18 09:24:48 +0200915 /* Delivered decapped frame:
916 * [802.11 header]
917 * [crypto param] <-- can be trimmed if !fcs_err &&
918 * !decrypt_err && !peer_idx_invalid
919 * [amsdu header] <-- only if A-MSDU
920 * [rfc1042/llc]
921 * [payload]
922 * [FCS] <-- at end, needs to be trimmed
923 */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300924
Michal Kazior581c25f2014-11-18 09:24:48 +0200925 /* This probably shouldn't happen but warn just in case */
926 if (unlikely(WARN_ON_ONCE(!is_first)))
927 return;
928
929 /* This probably shouldn't happen but warn just in case */
930 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
931 return;
932
933 skb_trim(msdu, msdu->len - FCS_LEN);
934
935 /* In most cases this will be true for sniffed frames. It makes sense
936 * to deliver them as-is without stripping the crypto param. This would
937 * also make sense for software based decryption (which is not
938 * implemented in ath10k).
939 *
940 * If there's no error then the frame is decrypted. At least that is
941 * the case for frames that come in via fragmented rx indication.
942 */
943 if (!is_decrypted)
944 return;
945
946 /* The payload is decrypted so strip crypto params. Start from tail
947 * since hdr is used to compute some stuff.
948 */
949
950 hdr = (void *)msdu->data;
951
952 /* Tail */
953 skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
954
955 /* MMIC */
956 if (!ieee80211_has_morefrags(hdr->frame_control) &&
957 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
958 skb_trim(msdu, msdu->len - 8);
959
960 /* Head */
Michal Kaziorf6dc2092013-09-26 10:12:22 +0300961 hdr_len = ieee80211_hdrlen(hdr->frame_control);
Michal Kazior581c25f2014-11-18 09:24:48 +0200962 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300963
Michal Kazior581c25f2014-11-18 09:24:48 +0200964 memmove((void *)msdu->data + crypto_len,
965 (void *)msdu->data, hdr_len);
966 skb_pull(msdu, crypto_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300967}
968
Michal Kazior581c25f2014-11-18 09:24:48 +0200969static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
970 struct sk_buff *msdu,
971 struct ieee80211_rx_status *status,
972 const u8 first_hdr[64])
Kalle Valo5e3dd152013-06-12 20:52:10 +0300973{
Kalle Valo5e3dd152013-06-12 20:52:10 +0300974 struct ieee80211_hdr *hdr;
Michal Kazior581c25f2014-11-18 09:24:48 +0200975 size_t hdr_len;
976 u8 da[ETH_ALEN];
977 u8 sa[ETH_ALEN];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300978
Michal Kazior581c25f2014-11-18 09:24:48 +0200979 /* Delivered decapped frame:
980 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
981 * [rfc1042/llc]
982 *
983 * Note: The nwifi header doesn't have QoS Control and is
984 * (always?) a 3addr frame.
985 *
986 * Note2: There's no A-MSDU subframe header. Even if it's part
987 * of an A-MSDU.
988 */
989
990 /* pull decapped header and copy SA & DA */
991 hdr = (struct ieee80211_hdr *)msdu->data;
992 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
993 ether_addr_copy(da, ieee80211_get_DA(hdr));
994 ether_addr_copy(sa, ieee80211_get_SA(hdr));
995 skb_pull(msdu, hdr_len);
996
997 /* push original 802.11 header */
998 hdr = (struct ieee80211_hdr *)first_hdr;
Michal Kaziore3fbf8d2013-09-26 10:12:23 +0300999 hdr_len = ieee80211_hdrlen(hdr->frame_control);
Michal Kazior581c25f2014-11-18 09:24:48 +02001000 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001001
Michal Kazior581c25f2014-11-18 09:24:48 +02001002 /* original 802.11 header has a different DA and in
1003 * case of 4addr it may also have different SA
1004 */
1005 hdr = (struct ieee80211_hdr *)msdu->data;
1006 ether_addr_copy(ieee80211_get_DA(hdr), da);
1007 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1008}
Michal Kaziorf6dc2092013-09-26 10:12:22 +03001009
Michal Kazior581c25f2014-11-18 09:24:48 +02001010static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1011 struct sk_buff *msdu,
1012 enum htt_rx_mpdu_encrypt_type enctype)
1013{
1014 struct ieee80211_hdr *hdr;
1015 struct htt_rx_desc *rxd;
1016 size_t hdr_len, crypto_len;
1017 void *rfc1042;
1018 bool is_first, is_last, is_amsdu;
Michal Kazior784f69d2013-09-26 10:12:23 +03001019
Michal Kazior581c25f2014-11-18 09:24:48 +02001020 rxd = (void *)msdu->data - sizeof(*rxd);
1021 hdr = (void *)rxd->rx_hdr_status;
1022
1023 is_first = !!(rxd->msdu_end.info0 &
1024 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1025 is_last = !!(rxd->msdu_end.info0 &
1026 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1027 is_amsdu = !(is_first && is_last);
1028
1029 rfc1042 = hdr;
1030
1031 if (is_first) {
Michal Kazior784f69d2013-09-26 10:12:23 +03001032 hdr_len = ieee80211_hdrlen(hdr->frame_control);
Michal Kazior581c25f2014-11-18 09:24:48 +02001033 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
Michal Kaziore3fbf8d2013-09-26 10:12:23 +03001034
Michal Kazior581c25f2014-11-18 09:24:48 +02001035 rfc1042 += round_up(hdr_len, 4) +
1036 round_up(crypto_len, 4);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001037 }
1038
Michal Kazior581c25f2014-11-18 09:24:48 +02001039 if (is_amsdu)
1040 rfc1042 += sizeof(struct amsdu_subframe_hdr);
Michal Kaziorf6dc2092013-09-26 10:12:22 +03001041
Michal Kazior581c25f2014-11-18 09:24:48 +02001042 return rfc1042;
1043}
1044
1045static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1046 struct sk_buff *msdu,
1047 struct ieee80211_rx_status *status,
1048 const u8 first_hdr[64],
1049 enum htt_rx_mpdu_encrypt_type enctype)
1050{
1051 struct ieee80211_hdr *hdr;
1052 struct ethhdr *eth;
1053 size_t hdr_len;
1054 void *rfc1042;
1055 u8 da[ETH_ALEN];
1056 u8 sa[ETH_ALEN];
1057
1058 /* Delivered decapped frame:
1059 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1060 * [payload]
1061 */
1062
1063 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1064 if (WARN_ON_ONCE(!rfc1042))
1065 return;
1066
1067 /* pull decapped header and copy SA & DA */
1068 eth = (struct ethhdr *)msdu->data;
1069 ether_addr_copy(da, eth->h_dest);
1070 ether_addr_copy(sa, eth->h_source);
1071 skb_pull(msdu, sizeof(struct ethhdr));
1072
1073 /* push rfc1042/llc/snap */
1074 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1075 sizeof(struct rfc1042_hdr));
1076
1077 /* push original 802.11 header */
1078 hdr = (struct ieee80211_hdr *)first_hdr;
1079 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1080 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1081
1082 /* original 802.11 header has a different DA and in
1083 * case of 4addr it may also have different SA
1084 */
1085 hdr = (struct ieee80211_hdr *)msdu->data;
1086 ether_addr_copy(ieee80211_get_DA(hdr), da);
1087 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1088}
1089
1090static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1091 struct sk_buff *msdu,
1092 struct ieee80211_rx_status *status,
1093 const u8 first_hdr[64])
1094{
1095 struct ieee80211_hdr *hdr;
1096 size_t hdr_len;
1097
1098 /* Delivered decapped frame:
1099 * [amsdu header] <-- replaced with 802.11 hdr
1100 * [rfc1042/llc]
1101 * [payload]
1102 */
1103
1104 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1105
1106 hdr = (struct ieee80211_hdr *)first_hdr;
1107 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1108 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1109}
1110
1111static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1112 struct sk_buff *msdu,
1113 struct ieee80211_rx_status *status,
1114 u8 first_hdr[64],
1115 enum htt_rx_mpdu_encrypt_type enctype,
1116 bool is_decrypted)
1117{
1118 struct htt_rx_desc *rxd;
1119 enum rx_msdu_decap_format decap;
1120 struct ieee80211_hdr *hdr;
1121
1122 /* First msdu's decapped header:
1123 * [802.11 header] <-- padded to 4 bytes long
1124 * [crypto param] <-- padded to 4 bytes long
1125 * [amsdu header] <-- only if A-MSDU
1126 * [rfc1042/llc]
1127 *
1128 * Other (2nd, 3rd, ..) msdu's decapped header:
1129 * [amsdu header] <-- only if A-MSDU
1130 * [rfc1042/llc]
1131 */
1132
1133 rxd = (void *)msdu->data - sizeof(*rxd);
1134 hdr = (void *)rxd->rx_hdr_status;
1135 decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
1136 RX_MSDU_START_INFO1_DECAP_FORMAT);
1137
1138 switch (decap) {
1139 case RX_MSDU_DECAP_RAW:
1140 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1141 is_decrypted);
1142 break;
1143 case RX_MSDU_DECAP_NATIVE_WIFI:
1144 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1145 break;
1146 case RX_MSDU_DECAP_ETHERNET2_DIX:
1147 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1148 break;
1149 case RX_MSDU_DECAP_8023_SNAP_LLC:
1150 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1151 break;
1152 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001153}
1154
Michal Kazior605f81a2013-07-31 10:47:56 +02001155static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1156{
1157 struct htt_rx_desc *rxd;
1158 u32 flags, info;
1159 bool is_ip4, is_ip6;
1160 bool is_tcp, is_udp;
1161 bool ip_csum_ok, tcpudp_csum_ok;
1162
1163 rxd = (void *)skb->data - sizeof(*rxd);
1164 flags = __le32_to_cpu(rxd->attention.flags);
1165 info = __le32_to_cpu(rxd->msdu_start.info1);
1166
1167 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1168 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1169 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1170 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1171 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1172 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1173
1174 if (!is_ip4 && !is_ip6)
1175 return CHECKSUM_NONE;
1176 if (!is_tcp && !is_udp)
1177 return CHECKSUM_NONE;
1178 if (!ip_csum_ok)
1179 return CHECKSUM_NONE;
1180 if (!tcpudp_csum_ok)
1181 return CHECKSUM_NONE;
1182
1183 return CHECKSUM_UNNECESSARY;
1184}
1185
Michal Kazior581c25f2014-11-18 09:24:48 +02001186static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1187{
1188 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1189}
1190
1191static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1192 struct sk_buff_head *amsdu,
1193 struct ieee80211_rx_status *status)
1194{
1195 struct sk_buff *first;
1196 struct sk_buff *last;
1197 struct sk_buff *msdu;
1198 struct htt_rx_desc *rxd;
1199 struct ieee80211_hdr *hdr;
1200 enum htt_rx_mpdu_encrypt_type enctype;
1201 u8 first_hdr[64];
1202 u8 *qos;
1203 size_t hdr_len;
1204 bool has_fcs_err;
1205 bool has_crypto_err;
1206 bool has_tkip_err;
1207 bool has_peer_idx_invalid;
1208 bool is_decrypted;
1209 u32 attention;
1210
1211 if (skb_queue_empty(amsdu))
1212 return;
1213
1214 first = skb_peek(amsdu);
1215 rxd = (void *)first->data - sizeof(*rxd);
1216
1217 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1218 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1219
1220 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1221 * decapped header. It'll be used for undecapping of each MSDU.
1222 */
1223 hdr = (void *)rxd->rx_hdr_status;
1224 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1225 memcpy(first_hdr, hdr, hdr_len);
1226
1227 /* Each A-MSDU subframe will use the original header as the base and be
1228 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1229 */
1230 hdr = (void *)first_hdr;
1231 qos = ieee80211_get_qos_ctl(hdr);
1232 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1233
1234 /* Some attention flags are valid only in the last MSDU. */
1235 last = skb_peek_tail(amsdu);
1236 rxd = (void *)last->data - sizeof(*rxd);
1237 attention = __le32_to_cpu(rxd->attention.flags);
1238
1239 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1240 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1241 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1242 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1243
1244 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1245 * e.g. due to fcs error, missing peer or invalid key data it will
1246 * report the frame as raw.
1247 */
1248 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1249 !has_fcs_err &&
1250 !has_crypto_err &&
1251 !has_peer_idx_invalid);
1252
1253 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1254 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1255 RX_FLAG_MMIC_ERROR |
1256 RX_FLAG_DECRYPTED |
1257 RX_FLAG_IV_STRIPPED |
1258 RX_FLAG_MMIC_STRIPPED);
1259
1260 if (has_fcs_err)
1261 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1262
1263 if (has_tkip_err)
1264 status->flag |= RX_FLAG_MMIC_ERROR;
1265
1266 if (is_decrypted)
1267 status->flag |= RX_FLAG_DECRYPTED |
1268 RX_FLAG_IV_STRIPPED |
1269 RX_FLAG_MMIC_STRIPPED;
1270
1271 skb_queue_walk(amsdu, msdu) {
1272 ath10k_htt_rx_h_csum_offload(msdu);
1273 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1274 is_decrypted);
1275
1276 /* Undecapping involves copying the original 802.11 header back
1277 * to sk_buff. If frame is protected and hardware has decrypted
1278 * it then remove the protected bit.
1279 */
1280 if (!is_decrypted)
1281 continue;
1282
1283 hdr = (void *)msdu->data;
1284 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1285 }
1286}
1287
1288static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1289 struct sk_buff_head *amsdu,
1290 struct ieee80211_rx_status *status)
1291{
1292 struct sk_buff *msdu;
1293
1294 while ((msdu = __skb_dequeue(amsdu))) {
1295 /* Setup per-MSDU flags */
1296 if (skb_queue_empty(amsdu))
1297 status->flag &= ~RX_FLAG_AMSDU_MORE;
1298 else
1299 status->flag |= RX_FLAG_AMSDU_MORE;
1300
1301 ath10k_process_rx(ar, status, msdu);
1302 }
1303}
1304
Michal Kazior9aa505d2014-11-18 09:24:47 +02001305static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
Ben Greearbfa35362014-03-03 14:07:09 -08001306{
Michal Kazior9aa505d2014-11-18 09:24:47 +02001307 struct sk_buff *skb, *first;
Ben Greearbfa35362014-03-03 14:07:09 -08001308 int space;
1309 int total_len = 0;
1310
1311 /* TODO: Might could optimize this by using
1312 * skb_try_coalesce or similar method to
1313 * decrease copying, or maybe get mac80211 to
1314 * provide a way to just receive a list of
1315 * skb?
1316 */
1317
Michal Kazior9aa505d2014-11-18 09:24:47 +02001318 first = __skb_dequeue(amsdu);
Ben Greearbfa35362014-03-03 14:07:09 -08001319
1320 /* Allocate total length all at once. */
Michal Kazior9aa505d2014-11-18 09:24:47 +02001321 skb_queue_walk(amsdu, skb)
1322 total_len += skb->len;
Ben Greearbfa35362014-03-03 14:07:09 -08001323
Michal Kazior9aa505d2014-11-18 09:24:47 +02001324 space = total_len - skb_tailroom(first);
Ben Greearbfa35362014-03-03 14:07:09 -08001325 if ((space > 0) &&
Michal Kazior9aa505d2014-11-18 09:24:47 +02001326 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
Ben Greearbfa35362014-03-03 14:07:09 -08001327 /* TODO: bump some rx-oom error stat */
1328 /* put it back together so we can free the
1329 * whole list at once.
1330 */
Michal Kazior9aa505d2014-11-18 09:24:47 +02001331 __skb_queue_head(amsdu, first);
Ben Greearbfa35362014-03-03 14:07:09 -08001332 return -1;
1333 }
1334
1335 /* Walk list again, copying contents into
1336 * msdu_head
1337 */
Michal Kazior9aa505d2014-11-18 09:24:47 +02001338 while ((skb = __skb_dequeue(amsdu))) {
1339 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1340 skb->len);
1341 dev_kfree_skb_any(skb);
Ben Greearbfa35362014-03-03 14:07:09 -08001342 }
1343
Michal Kazior9aa505d2014-11-18 09:24:47 +02001344 __skb_queue_head(amsdu, first);
Ben Greearbfa35362014-03-03 14:07:09 -08001345 return 0;
1346}
1347
Michal Kazior581c25f2014-11-18 09:24:48 +02001348static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1349 struct sk_buff_head *amsdu,
1350 bool chained)
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001351{
Michal Kazior581c25f2014-11-18 09:24:48 +02001352 struct sk_buff *first;
1353 struct htt_rx_desc *rxd;
1354 enum rx_msdu_decap_format decap;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001355
Michal Kazior581c25f2014-11-18 09:24:48 +02001356 first = skb_peek(amsdu);
1357 rxd = (void *)first->data - sizeof(*rxd);
1358 decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
1359 RX_MSDU_START_INFO1_DECAP_FORMAT);
1360
1361 if (!chained)
1362 return;
1363
1364 /* FIXME: Current unchaining logic can only handle simple case of raw
1365 * msdu chaining. If decapping is other than raw the chaining may be
1366 * more complex and this isn't handled by the current code. Don't even
1367 * try re-constructing such frames - it'll be pretty much garbage.
1368 */
1369 if (decap != RX_MSDU_DECAP_RAW ||
1370 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1371 __skb_queue_purge(amsdu);
1372 return;
1373 }
1374
1375 ath10k_unchain_msdu(amsdu);
1376}
1377
1378static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1379 struct sk_buff_head *amsdu,
1380 struct ieee80211_rx_status *rx_status)
1381{
1382 struct sk_buff *msdu;
1383 struct htt_rx_desc *rxd;
Michal Kaziord67d0a02014-11-24 15:34:08 +01001384 bool is_mgmt;
1385 bool has_fcs_err;
Michal Kazior581c25f2014-11-18 09:24:48 +02001386
1387 msdu = skb_peek(amsdu);
1388 rxd = (void *)msdu->data - sizeof(*rxd);
1389
1390 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1391 * invalid/dangerous frames.
1392 */
1393
1394 if (!rx_status->freq) {
1395 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001396 return false;
1397 }
1398
Michal Kaziord67d0a02014-11-24 15:34:08 +01001399 is_mgmt = !!(rxd->attention.flags &
1400 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1401 has_fcs_err = !!(rxd->attention.flags &
1402 __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
1403
Michal Kazior581c25f2014-11-18 09:24:48 +02001404 /* Management frames are handled via WMI events. The pros of such
1405 * approach is that channel is explicitly provided in WMI events
1406 * whereas HTT doesn't provide channel information for Rxed frames.
Michal Kaziord67d0a02014-11-24 15:34:08 +01001407 *
1408 * However some firmware revisions don't report corrupted frames via
1409 * WMI so don't drop them.
Michal Kazior581c25f2014-11-18 09:24:48 +02001410 */
Michal Kaziord67d0a02014-11-24 15:34:08 +01001411 if (is_mgmt && !has_fcs_err) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001412 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001413 return false;
1414 }
1415
Michal Kazior581c25f2014-11-18 09:24:48 +02001416 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1417 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
Janusz Dziedzic2acc4eb2014-03-19 07:09:40 +01001418 return false;
1419 }
1420
1421 return true;
1422}
1423
Michal Kazior581c25f2014-11-18 09:24:48 +02001424static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1425 struct sk_buff_head *amsdu,
1426 struct ieee80211_rx_status *rx_status)
1427{
1428 if (skb_queue_empty(amsdu))
1429 return;
1430
1431 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1432 return;
1433
1434 __skb_queue_purge(amsdu);
1435}
1436
Kalle Valo5e3dd152013-06-12 20:52:10 +03001437static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1438 struct htt_rx_indication *rx)
1439{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001440 struct ath10k *ar = htt->ar;
Janusz Dziedzic6df92a32014-03-24 21:24:57 +01001441 struct ieee80211_rx_status *rx_status = &htt->rx_status;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001442 struct htt_rx_indication_mpdu_range *mpdu_ranges;
Michal Kazior9aa505d2014-11-18 09:24:47 +02001443 struct sk_buff_head amsdu;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001444 int num_mpdu_ranges;
1445 int fw_desc_len;
1446 u8 *fw_desc;
Michal Kaziord5406902014-11-18 09:24:47 +02001447 int i, ret, mpdu_count = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001448
Michal Kazior45967082014-02-27 18:50:05 +02001449 lockdep_assert_held(&htt->rx_ring.lock);
1450
Michal Kaziore0bd7512014-11-18 09:24:48 +02001451 if (htt->rx_confused)
1452 return;
1453
Kalle Valo5e3dd152013-06-12 20:52:10 +03001454 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
1455 fw_desc = (u8 *)&rx->fw_desc;
1456
1457 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1458 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1459 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1460
Michal Kazior7aa7a722014-08-25 12:09:38 +02001461 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001462 rx, sizeof(*rx) +
1463 (sizeof(struct htt_rx_indication_mpdu_range) *
1464 num_mpdu_ranges));
1465
Michal Kaziord5406902014-11-18 09:24:47 +02001466 for (i = 0; i < num_mpdu_ranges; i++)
1467 mpdu_count += mpdu_ranges[i].mpdu_count;
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001468
Michal Kaziord5406902014-11-18 09:24:47 +02001469 while (mpdu_count--) {
Michal Kaziord5406902014-11-18 09:24:47 +02001470 __skb_queue_head_init(&amsdu);
1471 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
Michal Kaziorf0e27702014-11-18 09:24:49 +02001472 &fw_desc_len, &amsdu);
Michal Kaziord5406902014-11-18 09:24:47 +02001473 if (ret < 0) {
Michal Kaziore0bd7512014-11-18 09:24:48 +02001474 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
Michal Kaziord5406902014-11-18 09:24:47 +02001475 __skb_queue_purge(&amsdu);
Michal Kaziore0bd7512014-11-18 09:24:48 +02001476 /* FIXME: It's probably a good idea to reboot the
1477 * device instead of leaving it inoperable.
1478 */
1479 htt->rx_confused = true;
1480 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001481 }
Michal Kaziord5406902014-11-18 09:24:47 +02001482
Michal Kaziorb9fd8a82014-11-18 09:24:49 +02001483 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
Michal Kazior581c25f2014-11-18 09:24:48 +02001484 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1485 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1486 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1487 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001488 }
1489
Michal Kazior6e712d42013-09-24 10:18:36 +02001490 tasklet_schedule(&htt->rx_replenish_task);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001491}
1492
1493static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
Kalle Valo5b07e072014-09-14 12:50:06 +03001494 struct htt_rx_fragment_indication *frag)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001495{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001496 struct ath10k *ar = htt->ar;
Janusz Dziedzic6df92a32014-03-24 21:24:57 +01001497 struct ieee80211_rx_status *rx_status = &htt->rx_status;
Michal Kazior9aa505d2014-11-18 09:24:47 +02001498 struct sk_buff_head amsdu;
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001499 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001500 u8 *fw_desc;
Michal Kazior581c25f2014-11-18 09:24:48 +02001501 int fw_desc_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001502
1503 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1504 fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1505
Michal Kazior9aa505d2014-11-18 09:24:47 +02001506 __skb_queue_head_init(&amsdu);
Michal Kazior45967082014-02-27 18:50:05 +02001507
1508 spin_lock_bh(&htt->rx_ring.lock);
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001509 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
Michal Kaziorf0e27702014-11-18 09:24:49 +02001510 &amsdu);
Michal Kazior45967082014-02-27 18:50:05 +02001511 spin_unlock_bh(&htt->rx_ring.lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001512
Michal Kazior686687c2014-10-23 17:04:24 +03001513 tasklet_schedule(&htt->rx_replenish_task);
1514
Michal Kazior7aa7a722014-08-25 12:09:38 +02001515 ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001516
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001517 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001518 ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
Janusz Dziedzicd84dd602014-03-24 21:23:20 +01001519 ret);
Michal Kazior9aa505d2014-11-18 09:24:47 +02001520 __skb_queue_purge(&amsdu);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001521 return;
1522 }
1523
Michal Kazior9aa505d2014-11-18 09:24:47 +02001524 if (skb_queue_len(&amsdu) != 1) {
1525 ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
1526 __skb_queue_purge(&amsdu);
1527 return;
1528 }
1529
Michal Kazior89a5a312014-11-18 09:24:49 +02001530 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
Michal Kazior581c25f2014-11-18 09:24:48 +02001531 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1532 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1533 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001534
Kalle Valo5e3dd152013-06-12 20:52:10 +03001535 if (fw_desc_len > 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001536 ath10k_dbg(ar, ATH10K_DBG_HTT,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001537 "expecting more fragmented rx in one indication %d\n",
1538 fw_desc_len);
1539 }
1540}
1541
Michal Kazior6c5151a2014-02-27 18:50:04 +02001542static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1543 struct sk_buff *skb)
1544{
1545 struct ath10k_htt *htt = &ar->htt;
1546 struct htt_resp *resp = (struct htt_resp *)skb->data;
1547 struct htt_tx_done tx_done = {};
1548 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1549 __le16 msdu_id;
1550 int i;
1551
Michal Kazior45967082014-02-27 18:50:05 +02001552 lockdep_assert_held(&htt->tx_lock);
1553
Michal Kazior6c5151a2014-02-27 18:50:04 +02001554 switch (status) {
1555 case HTT_DATA_TX_STATUS_NO_ACK:
1556 tx_done.no_ack = true;
1557 break;
1558 case HTT_DATA_TX_STATUS_OK:
1559 break;
1560 case HTT_DATA_TX_STATUS_DISCARD:
1561 case HTT_DATA_TX_STATUS_POSTPONE:
1562 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1563 tx_done.discard = true;
1564 break;
1565 default:
Michal Kazior7aa7a722014-08-25 12:09:38 +02001566 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
Michal Kazior6c5151a2014-02-27 18:50:04 +02001567 tx_done.discard = true;
1568 break;
1569 }
1570
Michal Kazior7aa7a722014-08-25 12:09:38 +02001571 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
Michal Kazior6c5151a2014-02-27 18:50:04 +02001572 resp->data_tx_completion.num_msdus);
1573
1574 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1575 msdu_id = resp->data_tx_completion.msdus[i];
1576 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1577 ath10k_txrx_tx_unref(htt, &tx_done);
1578 }
1579}
1580
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001581static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1582{
1583 struct htt_rx_addba *ev = &resp->rx_addba;
1584 struct ath10k_peer *peer;
1585 struct ath10k_vif *arvif;
1586 u16 info0, tid, peer_id;
1587
1588 info0 = __le16_to_cpu(ev->info0);
1589 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1590 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1591
Michal Kazior7aa7a722014-08-25 12:09:38 +02001592 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001593 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1594 tid, peer_id, ev->window_size);
1595
1596 spin_lock_bh(&ar->data_lock);
1597 peer = ath10k_peer_find_by_id(ar, peer_id);
1598 if (!peer) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001599 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001600 peer_id);
1601 spin_unlock_bh(&ar->data_lock);
1602 return;
1603 }
1604
1605 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1606 if (!arvif) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001607 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001608 peer->vdev_id);
1609 spin_unlock_bh(&ar->data_lock);
1610 return;
1611 }
1612
Michal Kazior7aa7a722014-08-25 12:09:38 +02001613 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001614 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1615 peer->addr, tid, ev->window_size);
1616
1617 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1618 spin_unlock_bh(&ar->data_lock);
1619}
1620
1621static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1622{
1623 struct htt_rx_delba *ev = &resp->rx_delba;
1624 struct ath10k_peer *peer;
1625 struct ath10k_vif *arvif;
1626 u16 info0, tid, peer_id;
1627
1628 info0 = __le16_to_cpu(ev->info0);
1629 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1630 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1631
Michal Kazior7aa7a722014-08-25 12:09:38 +02001632 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001633 "htt rx delba tid %hu peer_id %hu\n",
1634 tid, peer_id);
1635
1636 spin_lock_bh(&ar->data_lock);
1637 peer = ath10k_peer_find_by_id(ar, peer_id);
1638 if (!peer) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001639 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001640 peer_id);
1641 spin_unlock_bh(&ar->data_lock);
1642 return;
1643 }
1644
1645 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1646 if (!arvif) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001647 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001648 peer->vdev_id);
1649 spin_unlock_bh(&ar->data_lock);
1650 return;
1651 }
1652
Michal Kazior7aa7a722014-08-25 12:09:38 +02001653 ath10k_dbg(ar, ATH10K_DBG_HTT,
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001654 "htt rx stop rx ba session sta %pM tid %hu\n",
1655 peer->addr, tid);
1656
1657 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1658 spin_unlock_bh(&ar->data_lock);
1659}
1660
Kalle Valo5e3dd152013-06-12 20:52:10 +03001661void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1662{
Michal Kazioredb82362013-07-05 16:15:14 +03001663 struct ath10k_htt *htt = &ar->htt;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001664 struct htt_resp *resp = (struct htt_resp *)skb->data;
1665
1666 /* confirm alignment */
1667 if (!IS_ALIGNED((unsigned long)skb->data, 4))
Michal Kazior7aa7a722014-08-25 12:09:38 +02001668 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001669
Michal Kazior7aa7a722014-08-25 12:09:38 +02001670 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001671 resp->hdr.msg_type);
1672 switch (resp->hdr.msg_type) {
1673 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1674 htt->target_version_major = resp->ver_resp.major;
1675 htt->target_version_minor = resp->ver_resp.minor;
1676 complete(&htt->target_version_received);
1677 break;
1678 }
Michal Kazior6c5151a2014-02-27 18:50:04 +02001679 case HTT_T2H_MSG_TYPE_RX_IND:
Michal Kazior45967082014-02-27 18:50:05 +02001680 spin_lock_bh(&htt->rx_ring.lock);
1681 __skb_queue_tail(&htt->rx_compl_q, skb);
1682 spin_unlock_bh(&htt->rx_ring.lock);
Michal Kazior6c5151a2014-02-27 18:50:04 +02001683 tasklet_schedule(&htt->txrx_compl_task);
1684 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001685 case HTT_T2H_MSG_TYPE_PEER_MAP: {
1686 struct htt_peer_map_event ev = {
1687 .vdev_id = resp->peer_map.vdev_id,
1688 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1689 };
1690 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1691 ath10k_peer_map_event(htt, &ev);
1692 break;
1693 }
1694 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1695 struct htt_peer_unmap_event ev = {
1696 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1697 };
1698 ath10k_peer_unmap_event(htt, &ev);
1699 break;
1700 }
1701 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1702 struct htt_tx_done tx_done = {};
1703 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1704
1705 tx_done.msdu_id =
1706 __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1707
1708 switch (status) {
1709 case HTT_MGMT_TX_STATUS_OK:
1710 break;
1711 case HTT_MGMT_TX_STATUS_RETRY:
1712 tx_done.no_ack = true;
1713 break;
1714 case HTT_MGMT_TX_STATUS_DROP:
1715 tx_done.discard = true;
1716 break;
1717 }
1718
Michal Kazior6c5151a2014-02-27 18:50:04 +02001719 spin_lock_bh(&htt->tx_lock);
Michal Kazior0a89f8a2013-09-18 14:43:20 +02001720 ath10k_txrx_tx_unref(htt, &tx_done);
Michal Kazior6c5151a2014-02-27 18:50:04 +02001721 spin_unlock_bh(&htt->tx_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001722 break;
1723 }
Michal Kazior6c5151a2014-02-27 18:50:04 +02001724 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1725 spin_lock_bh(&htt->tx_lock);
1726 __skb_queue_tail(&htt->tx_compl_q, skb);
1727 spin_unlock_bh(&htt->tx_lock);
1728 tasklet_schedule(&htt->txrx_compl_task);
1729 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001730 case HTT_T2H_MSG_TYPE_SEC_IND: {
1731 struct ath10k *ar = htt->ar;
1732 struct htt_security_indication *ev = &resp->security_indication;
1733
Michal Kazior7aa7a722014-08-25 12:09:38 +02001734 ath10k_dbg(ar, ATH10K_DBG_HTT,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001735 "sec ind peer_id %d unicast %d type %d\n",
1736 __le16_to_cpu(ev->peer_id),
1737 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
1738 MS(ev->flags, HTT_SECURITY_TYPE));
1739 complete(&ar->install_key_done);
1740 break;
1741 }
1742 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001743 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001744 skb->data, skb->len);
1745 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
1746 break;
1747 }
1748 case HTT_T2H_MSG_TYPE_TEST:
1749 /* FIX THIS */
1750 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001751 case HTT_T2H_MSG_TYPE_STATS_CONF:
Michal Kaziord35a6c12014-09-02 11:00:21 +03001752 trace_ath10k_htt_stats(ar, skb->data, skb->len);
Kalle Valoa9bf0502013-09-03 11:43:55 +03001753 break;
1754 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
Michal Kazior708b9bd2014-07-21 20:52:59 +03001755 /* Firmware can return tx frames if it's unable to fully
1756 * process them and suspects host may be able to fix it. ath10k
1757 * sends all tx frames as already inspected so this shouldn't
1758 * happen unless fw has a bug.
1759 */
Michal Kazior7aa7a722014-08-25 12:09:38 +02001760 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
Michal Kazior708b9bd2014-07-21 20:52:59 +03001761 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001762 case HTT_T2H_MSG_TYPE_RX_ADDBA:
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001763 ath10k_htt_rx_addba(ar, resp);
1764 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001765 case HTT_T2H_MSG_TYPE_RX_DELBA:
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001766 ath10k_htt_rx_delba(ar, resp);
1767 break;
Rajkumar Manoharanbfdd7932014-10-03 08:02:40 +03001768 case HTT_T2H_MSG_TYPE_PKTLOG: {
1769 struct ath10k_pktlog_hdr *hdr =
1770 (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
1771
1772 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
1773 sizeof(*hdr) +
1774 __le16_to_cpu(hdr->size));
1775 break;
1776 }
Michal Kazioraa5b4fb2014-07-23 12:20:33 +02001777 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
1778 /* Ignore this event because mac80211 takes care of Rx
1779 * aggregation reordering.
1780 */
1781 break;
1782 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001783 default:
Michal Kazior2358a542014-10-02 13:32:55 +02001784 ath10k_warn(ar, "htt event (%d) not handled\n",
1785 resp->hdr.msg_type);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001786 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001787 skb->data, skb->len);
1788 break;
1789 };
1790
1791 /* Free the indication buffer */
1792 dev_kfree_skb_any(skb);
1793}
Michal Kazior6c5151a2014-02-27 18:50:04 +02001794
1795static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1796{
1797 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
1798 struct htt_resp *resp;
1799 struct sk_buff *skb;
1800
Michal Kazior45967082014-02-27 18:50:05 +02001801 spin_lock_bh(&htt->tx_lock);
1802 while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
Michal Kazior6c5151a2014-02-27 18:50:04 +02001803 ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
1804 dev_kfree_skb_any(skb);
1805 }
Michal Kazior45967082014-02-27 18:50:05 +02001806 spin_unlock_bh(&htt->tx_lock);
Michal Kazior6c5151a2014-02-27 18:50:04 +02001807
Michal Kazior45967082014-02-27 18:50:05 +02001808 spin_lock_bh(&htt->rx_ring.lock);
1809 while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
Michal Kazior6c5151a2014-02-27 18:50:04 +02001810 resp = (struct htt_resp *)skb->data;
1811 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1812 dev_kfree_skb_any(skb);
1813 }
Michal Kazior45967082014-02-27 18:50:05 +02001814 spin_unlock_bh(&htt->rx_ring.lock);
Michal Kazior6c5151a2014-02-27 18:50:04 +02001815}