Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
| 3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software for any |
| 6 | * purpose with or without fee is hereby granted, provided that the above |
| 7 | * copyright notice and this permission notice appear in all copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 16 | */ |
| 17 | |
Michal Kazior | edb8236 | 2013-07-05 16:15:14 +0300 | [diff] [blame] | 18 | #include "core.h" |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 19 | #include "htc.h" |
| 20 | #include "htt.h" |
| 21 | #include "txrx.h" |
| 22 | #include "debug.h" |
Kalle Valo | a9bf050 | 2013-09-03 11:43:55 +0300 | [diff] [blame] | 23 | #include "trace.h" |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 24 | |
| 25 | #include <linux/log2.h> |
| 26 | |
| 27 | /* slightly larger than one large A-MPDU */ |
| 28 | #define HTT_RX_RING_SIZE_MIN 128 |
| 29 | |
| 30 | /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ |
| 31 | #define HTT_RX_RING_SIZE_MAX 2048 |
| 32 | |
| 33 | #define HTT_RX_AVG_FRM_BYTES 1000 |
| 34 | |
| 35 | /* ms, very conservative */ |
| 36 | #define HTT_RX_HOST_LATENCY_MAX_MS 20 |
| 37 | |
| 38 | /* ms, conservative */ |
| 39 | #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 |
| 40 | |
| 41 | /* when under memory pressure rx ring refill may fail and needs a retry */ |
| 42 | #define HTT_RX_RING_REFILL_RETRY_MS 50 |
| 43 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 44 | |
| 45 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 46 | static void ath10k_htt_txrx_compl_task(unsigned long ptr); |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 47 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 48 | static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) |
| 49 | { |
| 50 | int size; |
| 51 | |
| 52 | /* |
| 53 | * It is expected that the host CPU will typically be able to |
| 54 | * service the rx indication from one A-MPDU before the rx |
| 55 | * indication from the subsequent A-MPDU happens, roughly 1-2 ms |
| 56 | * later. However, the rx ring should be sized very conservatively, |
| 57 | * to accomodate the worst reasonable delay before the host CPU |
| 58 | * services a rx indication interrupt. |
| 59 | * |
| 60 | * The rx ring need not be kept full of empty buffers. In theory, |
| 61 | * the htt host SW can dynamically track the low-water mark in the |
| 62 | * rx ring, and dynamically adjust the level to which the rx ring |
| 63 | * is filled with empty buffers, to dynamically meet the desired |
| 64 | * low-water mark. |
| 65 | * |
| 66 | * In contrast, it's difficult to resize the rx ring itself, once |
| 67 | * it's in use. Thus, the ring itself should be sized very |
| 68 | * conservatively, while the degree to which the ring is filled |
| 69 | * with empty buffers should be sized moderately conservatively. |
| 70 | */ |
| 71 | |
| 72 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ |
| 73 | size = |
| 74 | htt->max_throughput_mbps + |
| 75 | 1000 / |
| 76 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; |
| 77 | |
| 78 | if (size < HTT_RX_RING_SIZE_MIN) |
| 79 | size = HTT_RX_RING_SIZE_MIN; |
| 80 | |
| 81 | if (size > HTT_RX_RING_SIZE_MAX) |
| 82 | size = HTT_RX_RING_SIZE_MAX; |
| 83 | |
| 84 | size = roundup_pow_of_two(size); |
| 85 | |
| 86 | return size; |
| 87 | } |
| 88 | |
| 89 | static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) |
| 90 | { |
| 91 | int size; |
| 92 | |
| 93 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ |
| 94 | size = |
| 95 | htt->max_throughput_mbps * |
| 96 | 1000 / |
| 97 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; |
| 98 | |
| 99 | /* |
| 100 | * Make sure the fill level is at least 1 less than the ring size. |
| 101 | * Leaving 1 element empty allows the SW to easily distinguish |
| 102 | * between a full ring vs. an empty ring. |
| 103 | */ |
| 104 | if (size >= htt->rx_ring.size) |
| 105 | size = htt->rx_ring.size - 1; |
| 106 | |
| 107 | return size; |
| 108 | } |
| 109 | |
| 110 | static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) |
| 111 | { |
| 112 | struct sk_buff *skb; |
| 113 | struct ath10k_skb_cb *cb; |
| 114 | int i; |
| 115 | |
| 116 | for (i = 0; i < htt->rx_ring.fill_cnt; i++) { |
| 117 | skb = htt->rx_ring.netbufs_ring[i]; |
| 118 | cb = ATH10K_SKB_CB(skb); |
| 119 | dma_unmap_single(htt->ar->dev, cb->paddr, |
| 120 | skb->len + skb_tailroom(skb), |
| 121 | DMA_FROM_DEVICE); |
| 122 | dev_kfree_skb_any(skb); |
| 123 | } |
| 124 | |
| 125 | htt->rx_ring.fill_cnt = 0; |
| 126 | } |
| 127 | |
| 128 | static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) |
| 129 | { |
| 130 | struct htt_rx_desc *rx_desc; |
| 131 | struct sk_buff *skb; |
| 132 | dma_addr_t paddr; |
| 133 | int ret = 0, idx; |
| 134 | |
| 135 | idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); |
| 136 | while (num > 0) { |
| 137 | skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); |
| 138 | if (!skb) { |
| 139 | ret = -ENOMEM; |
| 140 | goto fail; |
| 141 | } |
| 142 | |
| 143 | if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) |
| 144 | skb_pull(skb, |
| 145 | PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - |
| 146 | skb->data); |
| 147 | |
| 148 | /* Clear rx_desc attention word before posting to Rx ring */ |
| 149 | rx_desc = (struct htt_rx_desc *)skb->data; |
| 150 | rx_desc->attention.flags = __cpu_to_le32(0); |
| 151 | |
| 152 | paddr = dma_map_single(htt->ar->dev, skb->data, |
| 153 | skb->len + skb_tailroom(skb), |
| 154 | DMA_FROM_DEVICE); |
| 155 | |
| 156 | if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { |
| 157 | dev_kfree_skb_any(skb); |
| 158 | ret = -ENOMEM; |
| 159 | goto fail; |
| 160 | } |
| 161 | |
| 162 | ATH10K_SKB_CB(skb)->paddr = paddr; |
| 163 | htt->rx_ring.netbufs_ring[idx] = skb; |
| 164 | htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); |
| 165 | htt->rx_ring.fill_cnt++; |
| 166 | |
| 167 | num--; |
| 168 | idx++; |
| 169 | idx &= htt->rx_ring.size_mask; |
| 170 | } |
| 171 | |
| 172 | fail: |
| 173 | *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); |
| 174 | return ret; |
| 175 | } |
| 176 | |
| 177 | static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) |
| 178 | { |
| 179 | lockdep_assert_held(&htt->rx_ring.lock); |
| 180 | return __ath10k_htt_rx_ring_fill_n(htt, num); |
| 181 | } |
| 182 | |
| 183 | static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) |
| 184 | { |
Michal Kazior | 6e712d4 | 2013-09-24 10:18:36 +0200 | [diff] [blame] | 185 | int ret, num_deficit, num_to_fill; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 186 | |
Michal Kazior | 6e712d4 | 2013-09-24 10:18:36 +0200 | [diff] [blame] | 187 | /* Refilling the whole RX ring buffer proves to be a bad idea. The |
| 188 | * reason is RX may take up significant amount of CPU cycles and starve |
| 189 | * other tasks, e.g. TX on an ethernet device while acting as a bridge |
| 190 | * with ath10k wlan interface. This ended up with very poor performance |
| 191 | * once CPU the host system was overwhelmed with RX on ath10k. |
| 192 | * |
| 193 | * By limiting the number of refills the replenishing occurs |
| 194 | * progressively. This in turns makes use of the fact tasklets are |
| 195 | * processed in FIFO order. This means actual RX processing can starve |
| 196 | * out refilling. If there's not enough buffers on RX ring FW will not |
| 197 | * report RX until it is refilled with enough buffers. This |
| 198 | * automatically balances load wrt to CPU power. |
| 199 | * |
| 200 | * This probably comes at a cost of lower maximum throughput but |
| 201 | * improves the avarage and stability. */ |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 202 | spin_lock_bh(&htt->rx_ring.lock); |
Michal Kazior | 6e712d4 | 2013-09-24 10:18:36 +0200 | [diff] [blame] | 203 | num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; |
| 204 | num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); |
| 205 | num_deficit -= num_to_fill; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 206 | ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); |
| 207 | if (ret == -ENOMEM) { |
| 208 | /* |
| 209 | * Failed to fill it to the desired level - |
| 210 | * we'll start a timer and try again next time. |
| 211 | * As long as enough buffers are left in the ring for |
| 212 | * another A-MPDU rx, no special recovery is needed. |
| 213 | */ |
| 214 | mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + |
| 215 | msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); |
Michal Kazior | 6e712d4 | 2013-09-24 10:18:36 +0200 | [diff] [blame] | 216 | } else if (num_deficit > 0) { |
| 217 | tasklet_schedule(&htt->rx_replenish_task); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 218 | } |
| 219 | spin_unlock_bh(&htt->rx_ring.lock); |
| 220 | } |
| 221 | |
| 222 | static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) |
| 223 | { |
| 224 | struct ath10k_htt *htt = (struct ath10k_htt *)arg; |
| 225 | ath10k_htt_rx_msdu_buff_replenish(htt); |
| 226 | } |
| 227 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 228 | void ath10k_htt_rx_detach(struct ath10k_htt *htt) |
| 229 | { |
| 230 | int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; |
| 231 | |
| 232 | del_timer_sync(&htt->rx_ring.refill_retry_timer); |
Michal Kazior | 6e712d4 | 2013-09-24 10:18:36 +0200 | [diff] [blame] | 233 | tasklet_kill(&htt->rx_replenish_task); |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 234 | tasklet_kill(&htt->txrx_compl_task); |
| 235 | |
| 236 | skb_queue_purge(&htt->tx_compl_q); |
| 237 | skb_queue_purge(&htt->rx_compl_q); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 238 | |
| 239 | while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { |
| 240 | struct sk_buff *skb = |
| 241 | htt->rx_ring.netbufs_ring[sw_rd_idx]; |
| 242 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); |
| 243 | |
| 244 | dma_unmap_single(htt->ar->dev, cb->paddr, |
| 245 | skb->len + skb_tailroom(skb), |
| 246 | DMA_FROM_DEVICE); |
| 247 | dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]); |
| 248 | sw_rd_idx++; |
| 249 | sw_rd_idx &= htt->rx_ring.size_mask; |
| 250 | } |
| 251 | |
| 252 | dma_free_coherent(htt->ar->dev, |
| 253 | (htt->rx_ring.size * |
| 254 | sizeof(htt->rx_ring.paddrs_ring)), |
| 255 | htt->rx_ring.paddrs_ring, |
| 256 | htt->rx_ring.base_paddr); |
| 257 | |
| 258 | dma_free_coherent(htt->ar->dev, |
| 259 | sizeof(*htt->rx_ring.alloc_idx.vaddr), |
| 260 | htt->rx_ring.alloc_idx.vaddr, |
| 261 | htt->rx_ring.alloc_idx.paddr); |
| 262 | |
| 263 | kfree(htt->rx_ring.netbufs_ring); |
| 264 | } |
| 265 | |
| 266 | static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) |
| 267 | { |
| 268 | int idx; |
| 269 | struct sk_buff *msdu; |
| 270 | |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 271 | lockdep_assert_held(&htt->rx_ring.lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 272 | |
Michal Kazior | 8d60ee8 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 273 | if (htt->rx_ring.fill_cnt == 0) { |
| 274 | ath10k_warn("tried to pop sk_buff from an empty rx ring\n"); |
| 275 | return NULL; |
| 276 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 277 | |
| 278 | idx = htt->rx_ring.sw_rd_idx.msdu_payld; |
| 279 | msdu = htt->rx_ring.netbufs_ring[idx]; |
| 280 | |
| 281 | idx++; |
| 282 | idx &= htt->rx_ring.size_mask; |
| 283 | htt->rx_ring.sw_rd_idx.msdu_payld = idx; |
| 284 | htt->rx_ring.fill_cnt--; |
| 285 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 286 | return msdu; |
| 287 | } |
| 288 | |
| 289 | static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) |
| 290 | { |
| 291 | struct sk_buff *next; |
| 292 | |
| 293 | while (skb) { |
| 294 | next = skb->next; |
| 295 | dev_kfree_skb_any(skb); |
| 296 | skb = next; |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, |
| 301 | u8 **fw_desc, int *fw_desc_len, |
| 302 | struct sk_buff **head_msdu, |
| 303 | struct sk_buff **tail_msdu) |
| 304 | { |
| 305 | int msdu_len, msdu_chaining = 0; |
| 306 | struct sk_buff *msdu; |
| 307 | struct htt_rx_desc *rx_desc; |
| 308 | |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 309 | lockdep_assert_held(&htt->rx_ring.lock); |
| 310 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 311 | if (htt->rx_confused) { |
| 312 | ath10k_warn("htt is confused. refusing rx\n"); |
| 313 | return 0; |
| 314 | } |
| 315 | |
| 316 | msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); |
| 317 | while (msdu) { |
| 318 | int last_msdu, msdu_len_invalid, msdu_chained; |
| 319 | |
| 320 | dma_unmap_single(htt->ar->dev, |
| 321 | ATH10K_SKB_CB(msdu)->paddr, |
| 322 | msdu->len + skb_tailroom(msdu), |
| 323 | DMA_FROM_DEVICE); |
| 324 | |
Ben Greear | 75fb2f9 | 2014-02-05 13:58:34 -0800 | [diff] [blame] | 325 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 326 | msdu->data, msdu->len + skb_tailroom(msdu)); |
| 327 | |
| 328 | rx_desc = (struct htt_rx_desc *)msdu->data; |
| 329 | |
| 330 | /* FIXME: we must report msdu payload since this is what caller |
| 331 | * expects now */ |
| 332 | skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); |
| 333 | skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); |
| 334 | |
| 335 | /* |
| 336 | * Sanity check - confirm the HW is finished filling in the |
| 337 | * rx data. |
| 338 | * If the HW and SW are working correctly, then it's guaranteed |
| 339 | * that the HW's MAC DMA is done before this point in the SW. |
| 340 | * To prevent the case that we handle a stale Rx descriptor, |
| 341 | * just assert for now until we have a way to recover. |
| 342 | */ |
| 343 | if (!(__le32_to_cpu(rx_desc->attention.flags) |
| 344 | & RX_ATTENTION_FLAGS_MSDU_DONE)) { |
| 345 | ath10k_htt_rx_free_msdu_chain(*head_msdu); |
| 346 | *head_msdu = NULL; |
| 347 | msdu = NULL; |
| 348 | ath10k_err("htt rx stopped. cannot recover\n"); |
| 349 | htt->rx_confused = true; |
| 350 | break; |
| 351 | } |
| 352 | |
| 353 | /* |
| 354 | * Copy the FW rx descriptor for this MSDU from the rx |
| 355 | * indication message into the MSDU's netbuf. HL uses the |
| 356 | * same rx indication message definition as LL, and simply |
| 357 | * appends new info (fields from the HW rx desc, and the |
| 358 | * MSDU payload itself). So, the offset into the rx |
| 359 | * indication message only has to account for the standard |
| 360 | * offset of the per-MSDU FW rx desc info within the |
| 361 | * message, and how many bytes of the per-MSDU FW rx desc |
| 362 | * info have already been consumed. (And the endianness of |
| 363 | * the host, since for a big-endian host, the rx ind |
| 364 | * message contents, including the per-MSDU rx desc bytes, |
| 365 | * were byteswapped during upload.) |
| 366 | */ |
| 367 | if (*fw_desc_len > 0) { |
| 368 | rx_desc->fw_desc.info0 = **fw_desc; |
| 369 | /* |
| 370 | * The target is expected to only provide the basic |
| 371 | * per-MSDU rx descriptors. Just to be sure, verify |
| 372 | * that the target has not attached extension data |
| 373 | * (e.g. LRO flow ID). |
| 374 | */ |
| 375 | |
| 376 | /* or more, if there's extension data */ |
| 377 | (*fw_desc)++; |
| 378 | (*fw_desc_len)--; |
| 379 | } else { |
| 380 | /* |
| 381 | * When an oversized AMSDU happened, FW will lost |
| 382 | * some of MSDU status - in this case, the FW |
| 383 | * descriptors provided will be less than the |
| 384 | * actual MSDUs inside this MPDU. Mark the FW |
| 385 | * descriptors so that it will still deliver to |
| 386 | * upper stack, if no CRC error for this MPDU. |
| 387 | * |
| 388 | * FIX THIS - the FW descriptors are actually for |
| 389 | * MSDUs in the end of this A-MSDU instead of the |
| 390 | * beginning. |
| 391 | */ |
| 392 | rx_desc->fw_desc.info0 = 0; |
| 393 | } |
| 394 | |
| 395 | msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) |
| 396 | & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | |
| 397 | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); |
| 398 | msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), |
| 399 | RX_MSDU_START_INFO0_MSDU_LENGTH); |
| 400 | msdu_chained = rx_desc->frag_info.ring2_more_count; |
Ben Greear | bfa3536 | 2014-03-03 14:07:09 -0800 | [diff] [blame] | 401 | msdu_chaining = msdu_chained; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 402 | |
| 403 | if (msdu_len_invalid) |
| 404 | msdu_len = 0; |
| 405 | |
| 406 | skb_trim(msdu, 0); |
| 407 | skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); |
| 408 | msdu_len -= msdu->len; |
| 409 | |
| 410 | /* FIXME: Do chained buffers include htt_rx_desc or not? */ |
| 411 | while (msdu_chained--) { |
| 412 | struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); |
| 413 | |
| 414 | dma_unmap_single(htt->ar->dev, |
| 415 | ATH10K_SKB_CB(next)->paddr, |
| 416 | next->len + skb_tailroom(next), |
| 417 | DMA_FROM_DEVICE); |
| 418 | |
Ben Greear | 75fb2f9 | 2014-02-05 13:58:34 -0800 | [diff] [blame] | 419 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, |
| 420 | "htt rx chained: ", next->data, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 421 | next->len + skb_tailroom(next)); |
| 422 | |
| 423 | skb_trim(next, 0); |
| 424 | skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); |
| 425 | msdu_len -= next->len; |
| 426 | |
| 427 | msdu->next = next; |
| 428 | msdu = next; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 429 | } |
| 430 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 431 | last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & |
| 432 | RX_MSDU_END_INFO0_LAST_MSDU; |
| 433 | |
| 434 | if (last_msdu) { |
| 435 | msdu->next = NULL; |
| 436 | break; |
| 437 | } else { |
| 438 | struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); |
| 439 | msdu->next = next; |
| 440 | msdu = next; |
| 441 | } |
| 442 | } |
| 443 | *tail_msdu = msdu; |
| 444 | |
| 445 | /* |
| 446 | * Don't refill the ring yet. |
| 447 | * |
| 448 | * First, the elements popped here are still in use - it is not |
| 449 | * safe to overwrite them until the matching call to |
| 450 | * mpdu_desc_list_next. Second, for efficiency it is preferable to |
| 451 | * refill the rx ring with 1 PPDU's worth of rx buffers (something |
| 452 | * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers |
| 453 | * (something like 3 buffers). Consequently, we'll rely on the txrx |
| 454 | * SW to tell us when it is done pulling all the PPDU's rx buffers |
| 455 | * out of the rx ring, and then refill it just once. |
| 456 | */ |
| 457 | |
| 458 | return msdu_chaining; |
| 459 | } |
| 460 | |
Michal Kazior | 6e712d4 | 2013-09-24 10:18:36 +0200 | [diff] [blame] | 461 | static void ath10k_htt_rx_replenish_task(unsigned long ptr) |
| 462 | { |
| 463 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; |
| 464 | ath10k_htt_rx_msdu_buff_replenish(htt); |
| 465 | } |
| 466 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 467 | int ath10k_htt_rx_attach(struct ath10k_htt *htt) |
| 468 | { |
| 469 | dma_addr_t paddr; |
| 470 | void *vaddr; |
| 471 | struct timer_list *timer = &htt->rx_ring.refill_retry_timer; |
| 472 | |
| 473 | htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); |
| 474 | if (!is_power_of_2(htt->rx_ring.size)) { |
| 475 | ath10k_warn("htt rx ring size is not power of 2\n"); |
| 476 | return -EINVAL; |
| 477 | } |
| 478 | |
| 479 | htt->rx_ring.size_mask = htt->rx_ring.size - 1; |
| 480 | |
| 481 | /* |
| 482 | * Set the initial value for the level to which the rx ring |
| 483 | * should be filled, based on the max throughput and the |
| 484 | * worst likely latency for the host to fill the rx ring |
| 485 | * with new buffers. In theory, this fill level can be |
| 486 | * dynamically adjusted from the initial value set here, to |
| 487 | * reflect the actual host latency rather than a |
| 488 | * conservative assumption about the host latency. |
| 489 | */ |
| 490 | htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); |
| 491 | |
| 492 | htt->rx_ring.netbufs_ring = |
| 493 | kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), |
| 494 | GFP_KERNEL); |
| 495 | if (!htt->rx_ring.netbufs_ring) |
| 496 | goto err_netbuf; |
| 497 | |
| 498 | vaddr = dma_alloc_coherent(htt->ar->dev, |
| 499 | (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)), |
| 500 | &paddr, GFP_DMA); |
| 501 | if (!vaddr) |
| 502 | goto err_dma_ring; |
| 503 | |
| 504 | htt->rx_ring.paddrs_ring = vaddr; |
| 505 | htt->rx_ring.base_paddr = paddr; |
| 506 | |
| 507 | vaddr = dma_alloc_coherent(htt->ar->dev, |
| 508 | sizeof(*htt->rx_ring.alloc_idx.vaddr), |
| 509 | &paddr, GFP_DMA); |
| 510 | if (!vaddr) |
| 511 | goto err_dma_idx; |
| 512 | |
| 513 | htt->rx_ring.alloc_idx.vaddr = vaddr; |
| 514 | htt->rx_ring.alloc_idx.paddr = paddr; |
| 515 | htt->rx_ring.sw_rd_idx.msdu_payld = 0; |
| 516 | *htt->rx_ring.alloc_idx.vaddr = 0; |
| 517 | |
| 518 | /* Initialize the Rx refill retry timer */ |
| 519 | setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); |
| 520 | |
| 521 | spin_lock_init(&htt->rx_ring.lock); |
| 522 | |
| 523 | htt->rx_ring.fill_cnt = 0; |
| 524 | if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) |
| 525 | goto err_fill_ring; |
| 526 | |
Michal Kazior | 6e712d4 | 2013-09-24 10:18:36 +0200 | [diff] [blame] | 527 | tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, |
| 528 | (unsigned long)htt); |
| 529 | |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 530 | skb_queue_head_init(&htt->tx_compl_q); |
| 531 | skb_queue_head_init(&htt->rx_compl_q); |
| 532 | |
| 533 | tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, |
| 534 | (unsigned long)htt); |
| 535 | |
Kalle Valo | aad0b65 | 2013-09-08 17:56:02 +0300 | [diff] [blame] | 536 | ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 537 | htt->rx_ring.size, htt->rx_ring.fill_level); |
| 538 | return 0; |
| 539 | |
| 540 | err_fill_ring: |
| 541 | ath10k_htt_rx_ring_free(htt); |
| 542 | dma_free_coherent(htt->ar->dev, |
| 543 | sizeof(*htt->rx_ring.alloc_idx.vaddr), |
| 544 | htt->rx_ring.alloc_idx.vaddr, |
| 545 | htt->rx_ring.alloc_idx.paddr); |
| 546 | err_dma_idx: |
| 547 | dma_free_coherent(htt->ar->dev, |
| 548 | (htt->rx_ring.size * |
| 549 | sizeof(htt->rx_ring.paddrs_ring)), |
| 550 | htt->rx_ring.paddrs_ring, |
| 551 | htt->rx_ring.base_paddr); |
| 552 | err_dma_ring: |
| 553 | kfree(htt->rx_ring.netbufs_ring); |
| 554 | err_netbuf: |
| 555 | return -ENOMEM; |
| 556 | } |
| 557 | |
| 558 | static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) |
| 559 | { |
| 560 | switch (type) { |
| 561 | case HTT_RX_MPDU_ENCRYPT_WEP40: |
| 562 | case HTT_RX_MPDU_ENCRYPT_WEP104: |
| 563 | return 4; |
| 564 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: |
| 565 | case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ |
| 566 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: |
| 567 | case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ |
| 568 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: |
| 569 | return 8; |
| 570 | case HTT_RX_MPDU_ENCRYPT_NONE: |
| 571 | return 0; |
| 572 | } |
| 573 | |
| 574 | ath10k_warn("unknown encryption type %d\n", type); |
| 575 | return 0; |
| 576 | } |
| 577 | |
| 578 | static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) |
| 579 | { |
| 580 | switch (type) { |
| 581 | case HTT_RX_MPDU_ENCRYPT_NONE: |
| 582 | case HTT_RX_MPDU_ENCRYPT_WEP40: |
| 583 | case HTT_RX_MPDU_ENCRYPT_WEP104: |
| 584 | case HTT_RX_MPDU_ENCRYPT_WEP128: |
| 585 | case HTT_RX_MPDU_ENCRYPT_WAPI: |
| 586 | return 0; |
| 587 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: |
| 588 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: |
| 589 | return 4; |
| 590 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: |
| 591 | return 8; |
| 592 | } |
| 593 | |
| 594 | ath10k_warn("unknown encryption type %d\n", type); |
| 595 | return 0; |
| 596 | } |
| 597 | |
| 598 | /* Applies for first msdu in chain, before altering it. */ |
| 599 | static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) |
| 600 | { |
| 601 | struct htt_rx_desc *rxd; |
| 602 | enum rx_msdu_decap_format fmt; |
| 603 | |
| 604 | rxd = (void *)skb->data - sizeof(*rxd); |
| 605 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), |
| 606 | RX_MSDU_START_INFO1_DECAP_FORMAT); |
| 607 | |
| 608 | if (fmt == RX_MSDU_DECAP_RAW) |
| 609 | return (void *)skb->data; |
| 610 | else |
| 611 | return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; |
| 612 | } |
| 613 | |
| 614 | /* This function only applies for first msdu in an msdu chain */ |
| 615 | static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) |
| 616 | { |
| 617 | if (ieee80211_is_data_qos(hdr->frame_control)) { |
| 618 | u8 *qc = ieee80211_get_qos_ctl(hdr); |
| 619 | if (qc[0] & 0x80) |
| 620 | return true; |
| 621 | } |
| 622 | return false; |
| 623 | } |
| 624 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 625 | struct rfc1042_hdr { |
| 626 | u8 llc_dsap; |
| 627 | u8 llc_ssap; |
| 628 | u8 llc_ctrl; |
| 629 | u8 snap_oui[3]; |
| 630 | __be16 snap_type; |
| 631 | } __packed; |
| 632 | |
| 633 | struct amsdu_subframe_hdr { |
| 634 | u8 dst[ETH_ALEN]; |
| 635 | u8 src[ETH_ALEN]; |
| 636 | __be16 len; |
| 637 | } __packed; |
| 638 | |
Michal Kazior | d960c36 | 2014-02-25 09:29:57 +0200 | [diff] [blame] | 639 | static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) |
| 640 | { |
| 641 | /* nwifi header is padded to 4 bytes. this fixes 4addr rx */ |
| 642 | return round_up(ieee80211_hdrlen(hdr->frame_control), 4); |
| 643 | } |
| 644 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 645 | static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, |
| 646 | struct htt_rx_info *info) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 647 | { |
| 648 | struct htt_rx_desc *rxd; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 649 | struct sk_buff *first; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 650 | struct sk_buff *skb = info->skb; |
| 651 | enum rx_msdu_decap_format fmt; |
| 652 | enum htt_rx_mpdu_encrypt_type enctype; |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 653 | struct ieee80211_hdr *hdr; |
Michal Kazior | 784f69d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 654 | u8 hdr_buf[64], addr[ETH_ALEN], *qos; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 655 | unsigned int hdr_len; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 656 | |
| 657 | rxd = (void *)skb->data - sizeof(*rxd); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 658 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), |
| 659 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); |
| 660 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 661 | hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; |
| 662 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
| 663 | memcpy(hdr_buf, hdr, hdr_len); |
| 664 | hdr = (struct ieee80211_hdr *)hdr_buf; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 665 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 666 | first = skb; |
| 667 | while (skb) { |
| 668 | void *decap_hdr; |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 669 | int len; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 670 | |
| 671 | rxd = (void *)skb->data - sizeof(*rxd); |
| 672 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 673 | RX_MSDU_START_INFO1_DECAP_FORMAT); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 674 | decap_hdr = (void *)rxd->rx_hdr_status; |
| 675 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 676 | skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); |
| 677 | |
| 678 | /* First frame in an A-MSDU chain has more decapped data. */ |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 679 | if (skb == first) { |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 680 | len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); |
| 681 | len += round_up(ath10k_htt_rx_crypto_param_len(enctype), |
| 682 | 4); |
| 683 | decap_hdr += len; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 684 | } |
| 685 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 686 | switch (fmt) { |
| 687 | case RX_MSDU_DECAP_RAW: |
Michal Kazior | e3fbf8d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 688 | /* remove trailing FCS */ |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 689 | skb_trim(skb, skb->len - FCS_LEN); |
| 690 | break; |
| 691 | case RX_MSDU_DECAP_NATIVE_WIFI: |
Michal Kazior | 784f69d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 692 | /* pull decapped header and copy DA */ |
| 693 | hdr = (struct ieee80211_hdr *)skb->data; |
Michal Kazior | d960c36 | 2014-02-25 09:29:57 +0200 | [diff] [blame] | 694 | hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); |
Michal Kazior | 784f69d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 695 | memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); |
| 696 | skb_pull(skb, hdr_len); |
| 697 | |
| 698 | /* push original 802.11 header */ |
| 699 | hdr = (struct ieee80211_hdr *)hdr_buf; |
| 700 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
| 701 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); |
| 702 | |
| 703 | /* original A-MSDU header has the bit set but we're |
| 704 | * not including A-MSDU subframe header */ |
| 705 | hdr = (struct ieee80211_hdr *)skb->data; |
| 706 | qos = ieee80211_get_qos_ctl(hdr); |
| 707 | qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; |
| 708 | |
| 709 | /* original 802.11 header has a different DA */ |
| 710 | memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN); |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 711 | break; |
| 712 | case RX_MSDU_DECAP_ETHERNET2_DIX: |
Michal Kazior | e3fbf8d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 713 | /* strip ethernet header and insert decapped 802.11 |
| 714 | * header, amsdu subframe header and rfc1042 header */ |
| 715 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 716 | len = 0; |
| 717 | len += sizeof(struct rfc1042_hdr); |
| 718 | len += sizeof(struct amsdu_subframe_hdr); |
Michal Kazior | dfa95b5 | 2013-08-13 07:59:37 +0200 | [diff] [blame] | 719 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 720 | skb_pull(skb, sizeof(struct ethhdr)); |
| 721 | memcpy(skb_push(skb, len), decap_hdr, len); |
| 722 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); |
| 723 | break; |
| 724 | case RX_MSDU_DECAP_8023_SNAP_LLC: |
Michal Kazior | e3fbf8d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 725 | /* insert decapped 802.11 header making a singly |
| 726 | * A-MSDU */ |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 727 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); |
| 728 | break; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 729 | } |
| 730 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 731 | info->skb = skb; |
| 732 | info->encrypt_type = enctype; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 733 | skb = skb->next; |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 734 | info->skb->next = NULL; |
| 735 | |
Kalle Valo | 652de35 | 2013-11-13 15:23:30 +0200 | [diff] [blame] | 736 | if (skb) |
| 737 | info->amsdu_more = true; |
| 738 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 739 | ath10k_process_rx(htt->ar, info); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 740 | } |
| 741 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 742 | /* FIXME: It might be nice to re-assemble the A-MSDU when there's a |
| 743 | * monitor interface active for sniffing purposes. */ |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 744 | } |
| 745 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 746 | static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 747 | { |
| 748 | struct sk_buff *skb = info->skb; |
| 749 | struct htt_rx_desc *rxd; |
| 750 | struct ieee80211_hdr *hdr; |
| 751 | enum rx_msdu_decap_format fmt; |
| 752 | enum htt_rx_mpdu_encrypt_type enctype; |
Michal Kazior | e3fbf8d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 753 | int hdr_len; |
| 754 | void *rfc1042; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 755 | |
| 756 | /* This shouldn't happen. If it does than it may be a FW bug. */ |
| 757 | if (skb->next) { |
Ben Greear | 75fb2f9 | 2014-02-05 13:58:34 -0800 | [diff] [blame] | 758 | ath10k_warn("htt rx received chained non A-MSDU frame\n"); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 759 | ath10k_htt_rx_free_msdu_chain(skb->next); |
| 760 | skb->next = NULL; |
| 761 | } |
| 762 | |
| 763 | rxd = (void *)skb->data - sizeof(*rxd); |
| 764 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), |
| 765 | RX_MSDU_START_INFO1_DECAP_FORMAT); |
| 766 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), |
| 767 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); |
Michal Kazior | e3fbf8d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 768 | hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; |
| 769 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 770 | |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 771 | skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); |
| 772 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 773 | switch (fmt) { |
| 774 | case RX_MSDU_DECAP_RAW: |
| 775 | /* remove trailing FCS */ |
Michal Kazior | e3fbf8d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 776 | skb_trim(skb, skb->len - FCS_LEN); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 777 | break; |
| 778 | case RX_MSDU_DECAP_NATIVE_WIFI: |
Michal Kazior | 784f69d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 779 | /* Pull decapped header */ |
| 780 | hdr = (struct ieee80211_hdr *)skb->data; |
Michal Kazior | d960c36 | 2014-02-25 09:29:57 +0200 | [diff] [blame] | 781 | hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); |
Michal Kazior | 784f69d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 782 | skb_pull(skb, hdr_len); |
| 783 | |
| 784 | /* Push original header */ |
| 785 | hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; |
| 786 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
| 787 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 788 | break; |
| 789 | case RX_MSDU_DECAP_ETHERNET2_DIX: |
Michal Kazior | e3fbf8d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 790 | /* strip ethernet header and insert decapped 802.11 header and |
| 791 | * rfc1042 header */ |
| 792 | |
| 793 | rfc1042 = hdr; |
| 794 | rfc1042 += roundup(hdr_len, 4); |
| 795 | rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); |
| 796 | |
| 797 | skb_pull(skb, sizeof(struct ethhdr)); |
| 798 | memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), |
| 799 | rfc1042, sizeof(struct rfc1042_hdr)); |
| 800 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 801 | break; |
| 802 | case RX_MSDU_DECAP_8023_SNAP_LLC: |
Michal Kazior | e3fbf8d | 2013-09-26 10:12:23 +0300 | [diff] [blame] | 803 | /* remove A-MSDU subframe header and insert |
| 804 | * decapped 802.11 header. rfc1042 header is already there */ |
| 805 | |
| 806 | skb_pull(skb, sizeof(struct amsdu_subframe_hdr)); |
| 807 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 808 | break; |
| 809 | } |
| 810 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 811 | info->skb = skb; |
| 812 | info->encrypt_type = enctype; |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 813 | |
| 814 | ath10k_process_rx(htt->ar, info); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 815 | } |
| 816 | |
| 817 | static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) |
| 818 | { |
| 819 | struct htt_rx_desc *rxd; |
| 820 | u32 flags; |
| 821 | |
| 822 | rxd = (void *)skb->data - sizeof(*rxd); |
| 823 | flags = __le32_to_cpu(rxd->attention.flags); |
| 824 | |
| 825 | if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR) |
| 826 | return true; |
| 827 | |
| 828 | return false; |
| 829 | } |
| 830 | |
| 831 | static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) |
| 832 | { |
| 833 | struct htt_rx_desc *rxd; |
| 834 | u32 flags; |
| 835 | |
| 836 | rxd = (void *)skb->data - sizeof(*rxd); |
| 837 | flags = __le32_to_cpu(rxd->attention.flags); |
| 838 | |
| 839 | if (flags & RX_ATTENTION_FLAGS_FCS_ERR) |
| 840 | return true; |
| 841 | |
| 842 | return false; |
| 843 | } |
| 844 | |
Janusz Dziedzic | 2256940 | 2013-12-13 13:44:16 +0100 | [diff] [blame] | 845 | static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb) |
| 846 | { |
| 847 | struct htt_rx_desc *rxd; |
| 848 | u32 flags; |
| 849 | |
| 850 | rxd = (void *)skb->data - sizeof(*rxd); |
| 851 | flags = __le32_to_cpu(rxd->attention.flags); |
| 852 | |
| 853 | if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR) |
| 854 | return true; |
| 855 | |
| 856 | return false; |
| 857 | } |
| 858 | |
Janusz Dziedzic | a80ddb0 | 2014-02-25 07:56:39 +0100 | [diff] [blame] | 859 | static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb) |
| 860 | { |
| 861 | struct htt_rx_desc *rxd; |
| 862 | u32 flags; |
| 863 | |
| 864 | rxd = (void *)skb->data - sizeof(*rxd); |
| 865 | flags = __le32_to_cpu(rxd->attention.flags); |
| 866 | |
| 867 | if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE) |
| 868 | return true; |
| 869 | |
| 870 | return false; |
| 871 | } |
| 872 | |
Michal Kazior | 605f81a | 2013-07-31 10:47:56 +0200 | [diff] [blame] | 873 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) |
| 874 | { |
| 875 | struct htt_rx_desc *rxd; |
| 876 | u32 flags, info; |
| 877 | bool is_ip4, is_ip6; |
| 878 | bool is_tcp, is_udp; |
| 879 | bool ip_csum_ok, tcpudp_csum_ok; |
| 880 | |
| 881 | rxd = (void *)skb->data - sizeof(*rxd); |
| 882 | flags = __le32_to_cpu(rxd->attention.flags); |
| 883 | info = __le32_to_cpu(rxd->msdu_start.info1); |
| 884 | |
| 885 | is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); |
| 886 | is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); |
| 887 | is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); |
| 888 | is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); |
| 889 | ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); |
| 890 | tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); |
| 891 | |
| 892 | if (!is_ip4 && !is_ip6) |
| 893 | return CHECKSUM_NONE; |
| 894 | if (!is_tcp && !is_udp) |
| 895 | return CHECKSUM_NONE; |
| 896 | if (!ip_csum_ok) |
| 897 | return CHECKSUM_NONE; |
| 898 | if (!tcpudp_csum_ok) |
| 899 | return CHECKSUM_NONE; |
| 900 | |
| 901 | return CHECKSUM_UNNECESSARY; |
| 902 | } |
| 903 | |
Ben Greear | bfa3536 | 2014-03-03 14:07:09 -0800 | [diff] [blame] | 904 | static int ath10k_unchain_msdu(struct sk_buff *msdu_head) |
| 905 | { |
| 906 | struct sk_buff *next = msdu_head->next; |
| 907 | struct sk_buff *to_free = next; |
| 908 | int space; |
| 909 | int total_len = 0; |
| 910 | |
| 911 | /* TODO: Might could optimize this by using |
| 912 | * skb_try_coalesce or similar method to |
| 913 | * decrease copying, or maybe get mac80211 to |
| 914 | * provide a way to just receive a list of |
| 915 | * skb? |
| 916 | */ |
| 917 | |
| 918 | msdu_head->next = NULL; |
| 919 | |
| 920 | /* Allocate total length all at once. */ |
| 921 | while (next) { |
| 922 | total_len += next->len; |
| 923 | next = next->next; |
| 924 | } |
| 925 | |
| 926 | space = total_len - skb_tailroom(msdu_head); |
| 927 | if ((space > 0) && |
| 928 | (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) { |
| 929 | /* TODO: bump some rx-oom error stat */ |
| 930 | /* put it back together so we can free the |
| 931 | * whole list at once. |
| 932 | */ |
| 933 | msdu_head->next = to_free; |
| 934 | return -1; |
| 935 | } |
| 936 | |
| 937 | /* Walk list again, copying contents into |
| 938 | * msdu_head |
| 939 | */ |
| 940 | next = to_free; |
| 941 | while (next) { |
| 942 | skb_copy_from_linear_data(next, skb_put(msdu_head, next->len), |
| 943 | next->len); |
| 944 | next = next->next; |
| 945 | } |
| 946 | |
| 947 | /* If here, we have consolidated skb. Free the |
| 948 | * fragments and pass the main skb on up the |
| 949 | * stack. |
| 950 | */ |
| 951 | ath10k_htt_rx_free_msdu_chain(to_free); |
| 952 | return 0; |
| 953 | } |
| 954 | |
Janusz Dziedzic | 2acc4eb | 2014-03-19 07:09:40 +0100 | [diff] [blame^] | 955 | static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, |
| 956 | struct sk_buff *head, |
| 957 | struct htt_rx_info *info) |
| 958 | { |
| 959 | enum htt_rx_mpdu_status status = info->status; |
| 960 | |
| 961 | if (!head) { |
| 962 | ath10k_warn("htt rx no data!\n"); |
| 963 | return false; |
| 964 | } |
| 965 | |
| 966 | if (head->len == 0) { |
| 967 | ath10k_dbg(ATH10K_DBG_HTT, |
| 968 | "htt rx dropping due to zero-len\n"); |
| 969 | return false; |
| 970 | } |
| 971 | |
| 972 | if (ath10k_htt_rx_has_decrypt_err(head)) { |
| 973 | ath10k_dbg(ATH10K_DBG_HTT, |
| 974 | "htt rx dropping due to decrypt-err\n"); |
| 975 | return false; |
| 976 | } |
| 977 | |
| 978 | /* Skip mgmt frames while we handle this in WMI */ |
| 979 | if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL || |
| 980 | ath10k_htt_rx_is_mgmt(head)) { |
| 981 | ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); |
| 982 | return false; |
| 983 | } |
| 984 | |
| 985 | if (status != HTT_RX_IND_MPDU_STATUS_OK && |
| 986 | status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && |
| 987 | status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER && |
| 988 | !htt->ar->monitor_enabled) { |
| 989 | ath10k_dbg(ATH10K_DBG_HTT, |
| 990 | "htt rx ignoring frame w/ status %d\n", |
| 991 | status); |
| 992 | return false; |
| 993 | } |
| 994 | |
| 995 | if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { |
| 996 | ath10k_dbg(ATH10K_DBG_HTT, |
| 997 | "htt rx CAC running\n"); |
| 998 | return false; |
| 999 | } |
| 1000 | |
| 1001 | return true; |
| 1002 | } |
| 1003 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1004 | static void ath10k_htt_rx_handler(struct ath10k_htt *htt, |
| 1005 | struct htt_rx_indication *rx) |
| 1006 | { |
| 1007 | struct htt_rx_info info; |
| 1008 | struct htt_rx_indication_mpdu_range *mpdu_ranges; |
| 1009 | struct ieee80211_hdr *hdr; |
| 1010 | int num_mpdu_ranges; |
| 1011 | int fw_desc_len; |
| 1012 | u8 *fw_desc; |
| 1013 | int i, j; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1014 | |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 1015 | lockdep_assert_held(&htt->rx_ring.lock); |
| 1016 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1017 | memset(&info, 0, sizeof(info)); |
| 1018 | |
| 1019 | fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); |
| 1020 | fw_desc = (u8 *)&rx->fw_desc; |
| 1021 | |
| 1022 | num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), |
| 1023 | HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); |
| 1024 | mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); |
| 1025 | |
| 1026 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", |
| 1027 | rx, sizeof(*rx) + |
| 1028 | (sizeof(struct htt_rx_indication_mpdu_range) * |
| 1029 | num_mpdu_ranges)); |
| 1030 | |
| 1031 | for (i = 0; i < num_mpdu_ranges; i++) { |
| 1032 | info.status = mpdu_ranges[i].mpdu_range_status; |
| 1033 | |
| 1034 | for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { |
| 1035 | struct sk_buff *msdu_head, *msdu_tail; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1036 | int msdu_chaining; |
| 1037 | |
| 1038 | msdu_head = NULL; |
| 1039 | msdu_tail = NULL; |
| 1040 | msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, |
| 1041 | &fw_desc, |
| 1042 | &fw_desc_len, |
| 1043 | &msdu_head, |
| 1044 | &msdu_tail); |
| 1045 | |
Janusz Dziedzic | 2acc4eb | 2014-03-19 07:09:40 +0100 | [diff] [blame^] | 1046 | if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head, |
| 1047 | &info)) { |
Marek Puzyniak | e8a50f8 | 2013-11-20 09:59:47 +0200 | [diff] [blame] | 1048 | ath10k_htt_rx_free_msdu_chain(msdu_head); |
| 1049 | continue; |
| 1050 | } |
| 1051 | |
Ben Greear | bfa3536 | 2014-03-03 14:07:09 -0800 | [diff] [blame] | 1052 | if (msdu_chaining && |
| 1053 | (ath10k_unchain_msdu(msdu_head) < 0)) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1054 | ath10k_htt_rx_free_msdu_chain(msdu_head); |
| 1055 | continue; |
| 1056 | } |
| 1057 | |
| 1058 | info.skb = msdu_head; |
| 1059 | info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); |
Janusz Dziedzic | 2256940 | 2013-12-13 13:44:16 +0100 | [diff] [blame] | 1060 | info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head); |
Ben Greear | c6b56b0 | 2014-02-05 13:58:33 -0800 | [diff] [blame] | 1061 | |
| 1062 | if (info.fcs_err) |
| 1063 | ath10k_dbg(ATH10K_DBG_HTT, |
| 1064 | "htt rx has FCS err\n"); |
| 1065 | |
| 1066 | if (info.mic_err) |
| 1067 | ath10k_dbg(ATH10K_DBG_HTT, |
| 1068 | "htt rx has MIC err\n"); |
| 1069 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1070 | info.signal = ATH10K_DEFAULT_NOISE_FLOOR; |
| 1071 | info.signal += rx->ppdu.combined_rssi; |
| 1072 | |
| 1073 | info.rate.info0 = rx->ppdu.info0; |
| 1074 | info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); |
| 1075 | info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); |
Chun-Yeow Yeoh | e72698f | 2014-02-26 18:42:05 +0200 | [diff] [blame] | 1076 | info.tsf = __le32_to_cpu(rx->ppdu.tsf); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1077 | |
| 1078 | hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); |
| 1079 | |
| 1080 | if (ath10k_htt_rx_hdr_is_amsdu(hdr)) |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 1081 | ath10k_htt_rx_amsdu(htt, &info); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1082 | else |
Michal Kazior | f6dc209 | 2013-09-26 10:12:22 +0300 | [diff] [blame] | 1083 | ath10k_htt_rx_msdu(htt, &info); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1084 | } |
| 1085 | } |
| 1086 | |
Michal Kazior | 6e712d4 | 2013-09-24 10:18:36 +0200 | [diff] [blame] | 1087 | tasklet_schedule(&htt->rx_replenish_task); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1088 | } |
| 1089 | |
| 1090 | static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, |
| 1091 | struct htt_rx_fragment_indication *frag) |
| 1092 | { |
| 1093 | struct sk_buff *msdu_head, *msdu_tail; |
| 1094 | struct htt_rx_desc *rxd; |
| 1095 | enum rx_msdu_decap_format fmt; |
| 1096 | struct htt_rx_info info = {}; |
| 1097 | struct ieee80211_hdr *hdr; |
| 1098 | int msdu_chaining; |
| 1099 | bool tkip_mic_err; |
| 1100 | bool decrypt_err; |
| 1101 | u8 *fw_desc; |
| 1102 | int fw_desc_len, hdrlen, paramlen; |
| 1103 | int trim; |
| 1104 | |
| 1105 | fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); |
| 1106 | fw_desc = (u8 *)frag->fw_msdu_rx_desc; |
| 1107 | |
| 1108 | msdu_head = NULL; |
| 1109 | msdu_tail = NULL; |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 1110 | |
| 1111 | spin_lock_bh(&htt->rx_ring.lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1112 | msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, |
| 1113 | &msdu_head, &msdu_tail); |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 1114 | spin_unlock_bh(&htt->rx_ring.lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1115 | |
| 1116 | ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); |
| 1117 | |
| 1118 | if (!msdu_head) { |
| 1119 | ath10k_warn("htt rx frag no data\n"); |
| 1120 | return; |
| 1121 | } |
| 1122 | |
| 1123 | if (msdu_chaining || msdu_head != msdu_tail) { |
| 1124 | ath10k_warn("aggregation with fragmentation?!\n"); |
| 1125 | ath10k_htt_rx_free_msdu_chain(msdu_head); |
| 1126 | return; |
| 1127 | } |
| 1128 | |
| 1129 | /* FIXME: implement signal strength */ |
| 1130 | |
| 1131 | hdr = (struct ieee80211_hdr *)msdu_head->data; |
| 1132 | rxd = (void *)msdu_head->data - sizeof(*rxd); |
| 1133 | tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) & |
| 1134 | RX_ATTENTION_FLAGS_TKIP_MIC_ERR); |
| 1135 | decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) & |
| 1136 | RX_ATTENTION_FLAGS_DECRYPT_ERR); |
| 1137 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), |
| 1138 | RX_MSDU_START_INFO1_DECAP_FORMAT); |
| 1139 | |
| 1140 | if (fmt != RX_MSDU_DECAP_RAW) { |
| 1141 | ath10k_warn("we dont support non-raw fragmented rx yet\n"); |
| 1142 | dev_kfree_skb_any(msdu_head); |
| 1143 | goto end; |
| 1144 | } |
| 1145 | |
| 1146 | info.skb = msdu_head; |
| 1147 | info.status = HTT_RX_IND_MPDU_STATUS_OK; |
| 1148 | info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), |
| 1149 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); |
Michal Kazior | 605f81a | 2013-07-31 10:47:56 +0200 | [diff] [blame] | 1150 | info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1151 | |
| 1152 | if (tkip_mic_err) { |
| 1153 | ath10k_warn("tkip mic error\n"); |
| 1154 | info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR; |
| 1155 | } |
| 1156 | |
| 1157 | if (decrypt_err) { |
| 1158 | ath10k_warn("decryption err in fragmented rx\n"); |
| 1159 | dev_kfree_skb_any(info.skb); |
| 1160 | goto end; |
| 1161 | } |
| 1162 | |
| 1163 | if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { |
| 1164 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
| 1165 | paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); |
| 1166 | |
| 1167 | /* It is more efficient to move the header than the payload */ |
| 1168 | memmove((void *)info.skb->data + paramlen, |
| 1169 | (void *)info.skb->data, |
| 1170 | hdrlen); |
| 1171 | skb_pull(info.skb, paramlen); |
| 1172 | hdr = (struct ieee80211_hdr *)info.skb->data; |
| 1173 | } |
| 1174 | |
| 1175 | /* remove trailing FCS */ |
| 1176 | trim = 4; |
| 1177 | |
| 1178 | /* remove crypto trailer */ |
| 1179 | trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); |
| 1180 | |
| 1181 | /* last fragment of TKIP frags has MIC */ |
| 1182 | if (!ieee80211_has_morefrags(hdr->frame_control) && |
| 1183 | info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) |
| 1184 | trim += 8; |
| 1185 | |
| 1186 | if (trim > info.skb->len) { |
| 1187 | ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); |
| 1188 | dev_kfree_skb_any(info.skb); |
| 1189 | goto end; |
| 1190 | } |
| 1191 | |
| 1192 | skb_trim(info.skb, info.skb->len - trim); |
| 1193 | |
Ben Greear | 75fb2f9 | 2014-02-05 13:58:34 -0800 | [diff] [blame] | 1194 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1195 | info.skb->data, info.skb->len); |
| 1196 | ath10k_process_rx(htt->ar, &info); |
| 1197 | |
| 1198 | end: |
| 1199 | if (fw_desc_len > 0) { |
| 1200 | ath10k_dbg(ATH10K_DBG_HTT, |
| 1201 | "expecting more fragmented rx in one indication %d\n", |
| 1202 | fw_desc_len); |
| 1203 | } |
| 1204 | } |
| 1205 | |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1206 | static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, |
| 1207 | struct sk_buff *skb) |
| 1208 | { |
| 1209 | struct ath10k_htt *htt = &ar->htt; |
| 1210 | struct htt_resp *resp = (struct htt_resp *)skb->data; |
| 1211 | struct htt_tx_done tx_done = {}; |
| 1212 | int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); |
| 1213 | __le16 msdu_id; |
| 1214 | int i; |
| 1215 | |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 1216 | lockdep_assert_held(&htt->tx_lock); |
| 1217 | |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1218 | switch (status) { |
| 1219 | case HTT_DATA_TX_STATUS_NO_ACK: |
| 1220 | tx_done.no_ack = true; |
| 1221 | break; |
| 1222 | case HTT_DATA_TX_STATUS_OK: |
| 1223 | break; |
| 1224 | case HTT_DATA_TX_STATUS_DISCARD: |
| 1225 | case HTT_DATA_TX_STATUS_POSTPONE: |
| 1226 | case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: |
| 1227 | tx_done.discard = true; |
| 1228 | break; |
| 1229 | default: |
| 1230 | ath10k_warn("unhandled tx completion status %d\n", status); |
| 1231 | tx_done.discard = true; |
| 1232 | break; |
| 1233 | } |
| 1234 | |
| 1235 | ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", |
| 1236 | resp->data_tx_completion.num_msdus); |
| 1237 | |
| 1238 | for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { |
| 1239 | msdu_id = resp->data_tx_completion.msdus[i]; |
| 1240 | tx_done.msdu_id = __le16_to_cpu(msdu_id); |
| 1241 | ath10k_txrx_tx_unref(htt, &tx_done); |
| 1242 | } |
| 1243 | } |
| 1244 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1245 | void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) |
| 1246 | { |
Michal Kazior | edb8236 | 2013-07-05 16:15:14 +0300 | [diff] [blame] | 1247 | struct ath10k_htt *htt = &ar->htt; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1248 | struct htt_resp *resp = (struct htt_resp *)skb->data; |
| 1249 | |
| 1250 | /* confirm alignment */ |
| 1251 | if (!IS_ALIGNED((unsigned long)skb->data, 4)) |
| 1252 | ath10k_warn("unaligned htt message, expect trouble\n"); |
| 1253 | |
Ben Greear | 75fb2f9 | 2014-02-05 13:58:34 -0800 | [diff] [blame] | 1254 | ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1255 | resp->hdr.msg_type); |
| 1256 | switch (resp->hdr.msg_type) { |
| 1257 | case HTT_T2H_MSG_TYPE_VERSION_CONF: { |
| 1258 | htt->target_version_major = resp->ver_resp.major; |
| 1259 | htt->target_version_minor = resp->ver_resp.minor; |
| 1260 | complete(&htt->target_version_received); |
| 1261 | break; |
| 1262 | } |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1263 | case HTT_T2H_MSG_TYPE_RX_IND: |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 1264 | spin_lock_bh(&htt->rx_ring.lock); |
| 1265 | __skb_queue_tail(&htt->rx_compl_q, skb); |
| 1266 | spin_unlock_bh(&htt->rx_ring.lock); |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1267 | tasklet_schedule(&htt->txrx_compl_task); |
| 1268 | return; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1269 | case HTT_T2H_MSG_TYPE_PEER_MAP: { |
| 1270 | struct htt_peer_map_event ev = { |
| 1271 | .vdev_id = resp->peer_map.vdev_id, |
| 1272 | .peer_id = __le16_to_cpu(resp->peer_map.peer_id), |
| 1273 | }; |
| 1274 | memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); |
| 1275 | ath10k_peer_map_event(htt, &ev); |
| 1276 | break; |
| 1277 | } |
| 1278 | case HTT_T2H_MSG_TYPE_PEER_UNMAP: { |
| 1279 | struct htt_peer_unmap_event ev = { |
| 1280 | .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), |
| 1281 | }; |
| 1282 | ath10k_peer_unmap_event(htt, &ev); |
| 1283 | break; |
| 1284 | } |
| 1285 | case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { |
| 1286 | struct htt_tx_done tx_done = {}; |
| 1287 | int status = __le32_to_cpu(resp->mgmt_tx_completion.status); |
| 1288 | |
| 1289 | tx_done.msdu_id = |
| 1290 | __le32_to_cpu(resp->mgmt_tx_completion.desc_id); |
| 1291 | |
| 1292 | switch (status) { |
| 1293 | case HTT_MGMT_TX_STATUS_OK: |
| 1294 | break; |
| 1295 | case HTT_MGMT_TX_STATUS_RETRY: |
| 1296 | tx_done.no_ack = true; |
| 1297 | break; |
| 1298 | case HTT_MGMT_TX_STATUS_DROP: |
| 1299 | tx_done.discard = true; |
| 1300 | break; |
| 1301 | } |
| 1302 | |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1303 | spin_lock_bh(&htt->tx_lock); |
Michal Kazior | 0a89f8a | 2013-09-18 14:43:20 +0200 | [diff] [blame] | 1304 | ath10k_txrx_tx_unref(htt, &tx_done); |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1305 | spin_unlock_bh(&htt->tx_lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1306 | break; |
| 1307 | } |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1308 | case HTT_T2H_MSG_TYPE_TX_COMPL_IND: |
| 1309 | spin_lock_bh(&htt->tx_lock); |
| 1310 | __skb_queue_tail(&htt->tx_compl_q, skb); |
| 1311 | spin_unlock_bh(&htt->tx_lock); |
| 1312 | tasklet_schedule(&htt->txrx_compl_task); |
| 1313 | return; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1314 | case HTT_T2H_MSG_TYPE_SEC_IND: { |
| 1315 | struct ath10k *ar = htt->ar; |
| 1316 | struct htt_security_indication *ev = &resp->security_indication; |
| 1317 | |
| 1318 | ath10k_dbg(ATH10K_DBG_HTT, |
| 1319 | "sec ind peer_id %d unicast %d type %d\n", |
| 1320 | __le16_to_cpu(ev->peer_id), |
| 1321 | !!(ev->flags & HTT_SECURITY_IS_UNICAST), |
| 1322 | MS(ev->flags, HTT_SECURITY_TYPE)); |
| 1323 | complete(&ar->install_key_done); |
| 1324 | break; |
| 1325 | } |
| 1326 | case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { |
| 1327 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", |
| 1328 | skb->data, skb->len); |
| 1329 | ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); |
| 1330 | break; |
| 1331 | } |
| 1332 | case HTT_T2H_MSG_TYPE_TEST: |
| 1333 | /* FIX THIS */ |
| 1334 | break; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1335 | case HTT_T2H_MSG_TYPE_STATS_CONF: |
Kalle Valo | a9bf050 | 2013-09-03 11:43:55 +0300 | [diff] [blame] | 1336 | trace_ath10k_htt_stats(skb->data, skb->len); |
| 1337 | break; |
| 1338 | case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1339 | case HTT_T2H_MSG_TYPE_RX_ADDBA: |
| 1340 | case HTT_T2H_MSG_TYPE_RX_DELBA: |
| 1341 | case HTT_T2H_MSG_TYPE_RX_FLUSH: |
| 1342 | default: |
| 1343 | ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", |
| 1344 | resp->hdr.msg_type); |
| 1345 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", |
| 1346 | skb->data, skb->len); |
| 1347 | break; |
| 1348 | }; |
| 1349 | |
| 1350 | /* Free the indication buffer */ |
| 1351 | dev_kfree_skb_any(skb); |
| 1352 | } |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1353 | |
| 1354 | static void ath10k_htt_txrx_compl_task(unsigned long ptr) |
| 1355 | { |
| 1356 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; |
| 1357 | struct htt_resp *resp; |
| 1358 | struct sk_buff *skb; |
| 1359 | |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 1360 | spin_lock_bh(&htt->tx_lock); |
| 1361 | while ((skb = __skb_dequeue(&htt->tx_compl_q))) { |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1362 | ath10k_htt_rx_frm_tx_compl(htt->ar, skb); |
| 1363 | dev_kfree_skb_any(skb); |
| 1364 | } |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 1365 | spin_unlock_bh(&htt->tx_lock); |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1366 | |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 1367 | spin_lock_bh(&htt->rx_ring.lock); |
| 1368 | while ((skb = __skb_dequeue(&htt->rx_compl_q))) { |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1369 | resp = (struct htt_resp *)skb->data; |
| 1370 | ath10k_htt_rx_handler(htt, &resp->rx_ind); |
| 1371 | dev_kfree_skb_any(skb); |
| 1372 | } |
Michal Kazior | 4596708 | 2014-02-27 18:50:05 +0200 | [diff] [blame] | 1373 | spin_unlock_bh(&htt->rx_ring.lock); |
Michal Kazior | 6c5151a | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1374 | } |