blob: 4fb2cbc2e48ef2f188b6cd9307fae87f74d7bd5a [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*-
29 * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
42 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
45 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52#include <ol_htt_api.h>
53#include <ol_txrx_api.h>
54#include <ol_txrx_htt_api.h>
55#include <ol_htt_rx_api.h>
56#include <ol_txrx_types.h>
57#include <ol_rx_reorder.h>
58#include <ol_rx_pn.h>
59#include <ol_rx_fwd.h>
60#include <ol_rx.h>
61#include <ol_txrx_internal.h>
62#include <ol_ctrl_txrx_api.h>
63#include <ol_txrx_peer_find.h>
64#include <cdf_nbuf.h>
65#include <ieee80211.h>
66#include <cdf_util.h>
67#include <athdefs.h>
68#include <cdf_memory.h>
69#include <ol_rx_defrag.h>
70#include <enet.h>
71#include <cdf_time.h> /* cdf_system_time */
72
73#define DEFRAG_IEEE80211_ADDR_EQ(a1, a2) \
74 (cdf_mem_compare(a1, a2, IEEE80211_ADDR_LEN) == 0)
75
76#define DEFRAG_IEEE80211_ADDR_COPY(dst, src) \
77 cdf_mem_copy(dst, src, IEEE80211_ADDR_LEN)
78
79#define DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \
80 (((wh)->i_fc[0] & \
81 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
82 (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
83
84#define DEFRAG_IEEE80211_QOS_GET_TID(_x) \
85 ((_x)->i_qos[0] & IEEE80211_QOS_TID)
86
87const struct ol_rx_defrag_cipher f_ccmp = {
88 "AES-CCM",
89 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
90 IEEE80211_WEP_MICLEN,
91 0,
92};
93
94const struct ol_rx_defrag_cipher f_tkip = {
95 "TKIP",
96 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
97 IEEE80211_WEP_CRCLEN,
98 IEEE80211_WEP_MICLEN,
99};
100
101const struct ol_rx_defrag_cipher f_wep = {
102 "WEP",
103 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
104 IEEE80211_WEP_CRCLEN,
105 0,
106};
107
108inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
109 htt_pdev_handle htt_pdev,
110 cdf_nbuf_t frag)
111{
112 return
113 (struct ieee80211_frame *) cdf_nbuf_data(frag);
114}
115#define ol_rx_frag_pull_hdr(pdev, frag, hdrsize) \
116 cdf_nbuf_pull_head(frag, hdrsize);
117#define OL_RX_FRAG_CLONE(frag) NULL /* no-op */
118
119static inline void
120ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
121 cdf_nbuf_t msdu,
122 void **rx_desc_old_position,
123 void **ind_old_position, int *rx_desc_len)
124{
125 *rx_desc_old_position = NULL;
126 *ind_old_position = NULL;
127 *rx_desc_len = 0;
128}
129
130/*
131 * Process incoming fragments
132 */
133void
134ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
135 cdf_nbuf_t rx_frag_ind_msg,
136 uint16_t peer_id, uint8_t tid)
137{
138 uint16_t seq_num;
139 int seq_num_start, seq_num_end;
140 struct ol_txrx_peer_t *peer;
141 htt_pdev_handle htt_pdev;
142 cdf_nbuf_t head_msdu, tail_msdu;
143 void *rx_mpdu_desc;
144
145 htt_pdev = pdev->htt_pdev;
146 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
147
148 if (!ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) &&
149 htt_rx_ind_flush(pdev->htt_pdev, rx_frag_ind_msg) && peer) {
150 htt_rx_frag_ind_flush_seq_num_range(pdev->htt_pdev,
151 rx_frag_ind_msg,
152 &seq_num_start,
153 &seq_num_end);
154 /*
155 * Assuming flush indication for frags sent from target is
156 * separate from normal frames
157 */
158 ol_rx_reorder_flush_frag(htt_pdev, peer, tid, seq_num_start);
159 }
160 if (peer) {
161 htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
162 &tail_msdu);
163 cdf_assert(head_msdu == tail_msdu);
164 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
165 rx_mpdu_desc =
166 htt_rx_mpdu_desc_list_next(htt_pdev, head_msdu);
167 } else {
168 rx_mpdu_desc =
169 htt_rx_mpdu_desc_list_next(htt_pdev,
170 rx_frag_ind_msg);
171 }
172 seq_num = htt_rx_mpdu_desc_seq_num(htt_pdev, rx_mpdu_desc);
173 OL_RX_ERR_STATISTICS_1(pdev, peer->vdev, peer, rx_mpdu_desc,
174 OL_RX_ERR_NONE_FRAG);
175 ol_rx_reorder_store_frag(pdev, peer, tid, seq_num, head_msdu);
176 } else {
177 /* invalid frame - discard it */
178 htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
179 &tail_msdu);
180 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
181 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu);
182 else
183 htt_rx_mpdu_desc_list_next(htt_pdev, rx_frag_ind_msg);
184
185 htt_rx_desc_frame_free(htt_pdev, head_msdu);
186 }
187 /* request HTT to provide new rx MSDU buffers for the target to fill. */
188 htt_rx_msdu_buff_replenish(htt_pdev);
189}
190
191/*
192 * Flushing fragments
193 */
194void
195ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev,
196 struct ol_txrx_peer_t *peer, unsigned tid, int seq_num)
197{
198 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
199 int seq;
200
201 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
202 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
203 if (rx_reorder_array_elem->head) {
204 ol_rx_frames_free(htt_pdev, rx_reorder_array_elem->head);
205 rx_reorder_array_elem->head = NULL;
206 rx_reorder_array_elem->tail = NULL;
207 }
208}
209
210/*
211 * Reorder and store fragments
212 */
213void
214ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
215 struct ol_txrx_peer_t *peer,
216 unsigned tid, uint16_t seq_num, cdf_nbuf_t frag)
217{
218 struct ieee80211_frame *fmac_hdr, *mac_hdr;
219 uint8_t fragno, more_frag, all_frag_present = 0;
220 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
221 uint16_t frxseq, rxseq, seq;
222 htt_pdev_handle htt_pdev = pdev->htt_pdev;
223
224 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
225 cdf_assert(seq == 0);
226 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
227
228 mac_hdr = (struct ieee80211_frame *)
229 ol_rx_frag_get_mac_hdr(htt_pdev, frag);
230 rxseq = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >>
231 IEEE80211_SEQ_SEQ_SHIFT;
232 fragno = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
233 IEEE80211_SEQ_FRAG_MASK;
234 more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
235
236 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
237 rx_reorder_array_elem->head = frag;
238 rx_reorder_array_elem->tail = frag;
239 cdf_nbuf_set_next(frag, NULL);
240 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
241 rx_reorder_array_elem->head = NULL;
242 rx_reorder_array_elem->tail = NULL;
243 return;
244 }
245 if (rx_reorder_array_elem->head) {
246 fmac_hdr = (struct ieee80211_frame *)
247 ol_rx_frag_get_mac_hdr(htt_pdev,
248 rx_reorder_array_elem->head);
249 frxseq = cdf_le16_to_cpu(*(uint16_t *) fmac_hdr->i_seq) >>
250 IEEE80211_SEQ_SEQ_SHIFT;
251 if (rxseq != frxseq
252 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr1,
253 fmac_hdr->i_addr1)
254 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr2,
255 fmac_hdr->i_addr2)) {
256 ol_rx_frames_free(htt_pdev,
257 rx_reorder_array_elem->head);
258 rx_reorder_array_elem->head = NULL;
259 rx_reorder_array_elem->tail = NULL;
260 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
261 "\n ol_rx_reorder_store: %s mismatch \n",
262 (rxseq == frxseq)
263 ? "address"
264 : "seq number");
265 }
266 }
267
268 ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head,
269 &rx_reorder_array_elem->tail, frag,
270 &all_frag_present);
271
272 if (pdev->rx.flags.defrag_timeout_check)
273 ol_rx_defrag_waitlist_remove(peer, tid);
274
275 if (all_frag_present) {
276 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
277 rx_reorder_array_elem->head = NULL;
278 rx_reorder_array_elem->tail = NULL;
279 peer->tids_rx_reorder[tid].defrag_timeout_ms = 0;
280 peer->tids_last_seq[tid] = seq_num;
281 } else if (pdev->rx.flags.defrag_timeout_check) {
282 uint32_t now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
283
284 peer->tids_rx_reorder[tid].defrag_timeout_ms =
285 now_ms + pdev->rx.defrag.timeout_ms;
286 ol_rx_defrag_waitlist_add(peer, tid);
287 }
288}
289
290/*
291 * Insert and store fragments
292 */
293void
294ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
295 cdf_nbuf_t *head_addr,
296 cdf_nbuf_t *tail_addr,
297 cdf_nbuf_t frag, uint8_t *all_frag_present)
298{
299 cdf_nbuf_t next, prev = NULL, cur = *head_addr;
300 struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr;
301 uint8_t fragno, cur_fragno, lfragno, next_fragno;
302 uint8_t last_morefrag = 1, count = 0;
303 cdf_nbuf_t frag_clone;
304
305 cdf_assert(frag);
306 frag_clone = OL_RX_FRAG_CLONE(frag);
307 frag = frag_clone ? frag_clone : frag;
308
309 mac_hdr = (struct ieee80211_frame *)
310 ol_rx_frag_get_mac_hdr(htt_pdev, frag);
311 fragno = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
312 IEEE80211_SEQ_FRAG_MASK;
313
314 if (!(*head_addr)) {
315 *head_addr = frag;
316 *tail_addr = frag;
317 cdf_nbuf_set_next(*tail_addr, NULL);
318 return;
319 }
320 /* For efficiency, compare with tail first */
321 lmac_hdr = (struct ieee80211_frame *)
322 ol_rx_frag_get_mac_hdr(htt_pdev, *tail_addr);
323 lfragno = cdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) &
324 IEEE80211_SEQ_FRAG_MASK;
325 if (fragno > lfragno) {
326 cdf_nbuf_set_next(*tail_addr, frag);
327 *tail_addr = frag;
328 cdf_nbuf_set_next(*tail_addr, NULL);
329 } else {
330 do {
331 cmac_hdr = (struct ieee80211_frame *)
332 ol_rx_frag_get_mac_hdr(htt_pdev, cur);
333 cur_fragno =
334 cdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) &
335 IEEE80211_SEQ_FRAG_MASK;
336 prev = cur;
337 cur = cdf_nbuf_next(cur);
338 } while (fragno > cur_fragno);
339
340 if (fragno == cur_fragno) {
341 htt_rx_desc_frame_free(htt_pdev, frag);
342 *all_frag_present = 0;
343 return;
344 } else {
345 cdf_nbuf_set_next(prev, frag);
346 cdf_nbuf_set_next(frag, cur);
347 }
348 }
349 next = cdf_nbuf_next(*head_addr);
350 lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
351 *tail_addr);
352 last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
353 if (!last_morefrag) {
354 do {
355 next_hdr =
356 (struct ieee80211_frame *)
357 ol_rx_frag_get_mac_hdr(htt_pdev, next);
358 next_fragno =
359 cdf_le16_to_cpu(*(uint16_t *) next_hdr->i_seq) &
360 IEEE80211_SEQ_FRAG_MASK;
361 count++;
362 if (next_fragno != count)
363 break;
364
365 next = cdf_nbuf_next(next);
366 } while (next);
367
368 if (!next) {
369 *all_frag_present = 1;
370 return;
371 }
372 }
373 *all_frag_present = 0;
374}
375
376/*
377 * add tid to pending fragment wait list
378 */
379void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned tid)
380{
381 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
382 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
383
384 TAILQ_INSERT_TAIL(&pdev->rx.defrag.waitlist, rx_reorder,
385 defrag_waitlist_elem);
386}
387
388/*
389 * remove tid from pending fragment wait list
390 */
391void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned tid)
392{
393 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
394 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
395
396 if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL ||
397 rx_reorder->defrag_waitlist_elem.tqe_prev != NULL) {
398
399 TAILQ_REMOVE(&pdev->rx.defrag.waitlist, rx_reorder,
400 defrag_waitlist_elem);
401
402 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
403 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
404 }
405}
406
407#ifndef container_of
408#define container_of(ptr, type, member) \
409 ((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
410#endif
411
412/*
413 * flush stale fragments from the waitlist
414 */
415void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev)
416{
417 struct ol_rx_reorder_t *rx_reorder, *tmp;
418 uint32_t now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
419
420 TAILQ_FOREACH_SAFE(rx_reorder, &pdev->rx.defrag.waitlist,
421 defrag_waitlist_elem, tmp) {
422 struct ol_txrx_peer_t *peer;
423 struct ol_rx_reorder_t *rx_reorder_base;
424 unsigned tid;
425
426 if (rx_reorder->defrag_timeout_ms > now_ms)
427 break;
428
429 tid = rx_reorder->tid;
430 /* get index 0 of the rx_reorder array */
431 rx_reorder_base = rx_reorder - tid;
432 peer =
433 container_of(rx_reorder_base, struct ol_txrx_peer_t,
434 tids_rx_reorder[0]);
435
436 ol_rx_defrag_waitlist_remove(peer, tid);
437 ol_rx_reorder_flush_frag(pdev->htt_pdev, peer, tid,
438 0 /* frags always stored at seq 0 */);
439 }
440}
441
442/*
443 * Handling security checking and processing fragments
444 */
445void
446ol_rx_defrag(ol_txrx_pdev_handle pdev,
447 struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t frag_list)
448{
449 struct ol_txrx_vdev_t *vdev = NULL;
450 cdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list;
451 uint8_t index, tkip_demic = 0;
452 uint16_t hdr_space;
453 void *rx_desc;
454 struct ieee80211_frame *wh;
455 uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
456
457 htt_pdev_handle htt_pdev = pdev->htt_pdev;
458 vdev = peer->vdev;
459
460 /* bypass defrag for safe mode */
461 if (vdev->safemode) {
462 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
463 ol_rx_in_order_deliver(vdev, peer, tid, frag_list);
464 else
465 ol_rx_deliver(vdev, peer, tid, frag_list);
466 return;
467 }
468
469 while (cur) {
470 tmp_next = cdf_nbuf_next(cur);
471 cdf_nbuf_set_next(cur, NULL);
472 if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) {
473 /* PN check failed,discard frags */
474 if (prev) {
475 cdf_nbuf_set_next(prev, NULL);
476 ol_rx_frames_free(htt_pdev, frag_list);
477 }
478 ol_rx_frames_free(htt_pdev, tmp_next);
479 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
480 "ol_rx_defrag: PN Check failed\n");
481 return;
482 }
483 /* remove FCS from each fragment */
484 cdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
485 prev = cur;
486 cdf_nbuf_set_next(cur, tmp_next);
487 cur = tmp_next;
488 }
489 cur = frag_list;
490 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, cur);
491 hdr_space = ol_rx_frag_hdrsize(wh);
492 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag_list);
493 cdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc));
494 index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ?
495 txrx_sec_mcast : txrx_sec_ucast;
496
497 switch (peer->security[index].sec_type) {
498 case htt_sec_type_tkip:
499 tkip_demic = 1;
500 /* fall-through to rest of tkip ops */
501 case htt_sec_type_tkip_nomic:
502 while (cur) {
503 tmp_next = cdf_nbuf_next(cur);
504 if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) {
505 /* TKIP decap failed, discard frags */
506 ol_rx_frames_free(htt_pdev, frag_list);
507 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
508 "\n ol_rx_defrag: TKIP decap failed\n");
509 return;
510 }
511 cur = tmp_next;
512 }
513 break;
514
515 case htt_sec_type_aes_ccmp:
516 while (cur) {
517 tmp_next = cdf_nbuf_next(cur);
518 if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) {
519 /* CCMP demic failed, discard frags */
520 ol_rx_frames_free(htt_pdev, frag_list);
521 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
522 "\n ol_rx_defrag: CCMP demic failed\n");
523 return;
524 }
525 if (!ol_rx_frag_ccmp_decap(pdev, cur, hdr_space)) {
526 /* CCMP decap failed, discard frags */
527 ol_rx_frames_free(htt_pdev, frag_list);
528 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
529 "\n ol_rx_defrag: CCMP decap failed\n");
530 return;
531 }
532 cur = tmp_next;
533 }
534 break;
535
536 case htt_sec_type_wep40:
537 case htt_sec_type_wep104:
538 case htt_sec_type_wep128:
539 while (cur) {
540 tmp_next = cdf_nbuf_next(cur);
541 if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) {
542 /* wep decap failed, discard frags */
543 ol_rx_frames_free(htt_pdev, frag_list);
544 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
545 "\n ol_rx_defrag: wep decap failed\n");
546 return;
547 }
548 cur = tmp_next;
549 }
550 break;
551
552 default:
553 break;
554 }
555
556 msdu = ol_rx_defrag_decap_recombine(htt_pdev, frag_list, hdr_space);
557 if (!msdu)
558 return;
559
560 if (tkip_demic) {
561 cdf_mem_copy(key,
562 peer->security[index].michael_key,
563 sizeof(peer->security[index].michael_key));
564 if (!ol_rx_frag_tkip_demic(pdev, key, msdu, hdr_space)) {
565 htt_rx_desc_frame_free(htt_pdev, msdu);
566 ol_rx_err(pdev->ctrl_pdev,
567 vdev->vdev_id, peer->mac_addr.raw, tid, 0,
568 OL_RX_DEFRAG_ERR, msdu, NULL, 0);
569 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
570 "\n ol_rx_defrag: TKIP demic failed\n");
571 return;
572 }
573 }
574 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, msdu);
575 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh))
576 ol_rx_defrag_qos_decap(pdev, msdu, hdr_space);
577 if (ol_cfg_frame_type(pdev->ctrl_pdev) == wlan_frm_fmt_802_3)
578 ol_rx_defrag_nwifi_to_8023(pdev, msdu);
579
580 ol_rx_fwd_check(vdev, peer, tid, msdu);
581}
582
583/*
584 * Handling TKIP processing for defragmentation
585 */
586int
587ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
588 cdf_nbuf_t msdu, uint16_t hdrlen)
589{
590 uint8_t *ivp, *origHdr;
591
592 void *rx_desc_old_position = NULL;
593 void *ind_old_position = NULL;
594 int rx_desc_len = 0;
595
596 ol_rx_frag_desc_adjust(pdev,
597 msdu,
598 &rx_desc_old_position,
599 &ind_old_position, &rx_desc_len);
600 /* Header should have extended IV */
601 origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
602
603 ivp = origHdr + hdrlen;
604 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
605 return OL_RX_DEFRAG_ERR;
606
607 cdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen);
608 cdf_nbuf_pull_head(msdu, f_tkip.ic_header);
609 cdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
610 return OL_RX_DEFRAG_OK;
611}
612
613/*
614 * Handling WEP processing for defragmentation
615 */
616int
617ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t hdrlen)
618{
619 uint8_t *origHdr;
620 void *rx_desc_old_position = NULL;
621 void *ind_old_position = NULL;
622 int rx_desc_len = 0;
623
624 ol_rx_frag_desc_adjust(pdev,
625 msdu,
626 &rx_desc_old_position,
627 &ind_old_position, &rx_desc_len);
628 origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
629 cdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen);
630 cdf_nbuf_pull_head(msdu, f_wep.ic_header);
631 cdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
632 return OL_RX_DEFRAG_OK;
633}
634
635/*
636 * Verify and strip MIC from the frame.
637 */
638int
639ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key,
640 cdf_nbuf_t msdu, uint16_t hdrlen)
641{
642 int status;
643 uint32_t pktlen;
644 uint8_t mic[IEEE80211_WEP_MICLEN];
645 uint8_t mic0[IEEE80211_WEP_MICLEN];
646 void *rx_desc_old_position = NULL;
647 void *ind_old_position = NULL;
648 int rx_desc_len = 0;
649
650 ol_rx_frag_desc_adjust(pdev,
651 msdu,
652 &rx_desc_old_position,
653 &ind_old_position, &rx_desc_len);
654
655 pktlen = ol_rx_defrag_len(msdu) - rx_desc_len;
656
657 status = ol_rx_defrag_mic(pdev, key, msdu, hdrlen,
658 pktlen - (hdrlen + f_tkip.ic_miclen), mic);
659 if (status != OL_RX_DEFRAG_OK)
660 return OL_RX_DEFRAG_ERR;
661
662 ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len,
663 f_tkip.ic_miclen, (caddr_t) mic0);
664 if (cdf_mem_compare(mic, mic0, f_tkip.ic_miclen))
665 return OL_RX_DEFRAG_ERR;
666
667 cdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen);
668 return OL_RX_DEFRAG_OK;
669}
670
671/*
672 * Handling CCMP processing for defragmentation
673 */
674int
675ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
676 cdf_nbuf_t nbuf, uint16_t hdrlen)
677{
678 uint8_t *ivp, *origHdr;
679 void *rx_desc_old_position = NULL;
680 void *ind_old_position = NULL;
681 int rx_desc_len = 0;
682
683 ol_rx_frag_desc_adjust(pdev,
684 nbuf,
685 &rx_desc_old_position,
686 &ind_old_position, &rx_desc_len);
687
688 origHdr = (uint8_t *) (cdf_nbuf_data(nbuf) + rx_desc_len);
689 ivp = origHdr + hdrlen;
690 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
691 return OL_RX_DEFRAG_ERR;
692
693 cdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen);
694 cdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
695
696 return OL_RX_DEFRAG_OK;
697}
698
699/*
700 * Verify and strip MIC from the frame.
701 */
702int
703ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,
704 cdf_nbuf_t wbuf, uint16_t hdrlen)
705{
706 uint8_t *ivp, *origHdr;
707 void *rx_desc_old_position = NULL;
708 void *ind_old_position = NULL;
709 int rx_desc_len = 0;
710
711 ol_rx_frag_desc_adjust(pdev,
712 wbuf,
713 &rx_desc_old_position,
714 &ind_old_position, &rx_desc_len);
715
716 origHdr = (uint8_t *) (cdf_nbuf_data(wbuf) + rx_desc_len);
717
718 ivp = origHdr + hdrlen;
719 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
720 return OL_RX_DEFRAG_ERR;
721
722 cdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer);
723
724 return OL_RX_DEFRAG_OK;
725}
726
727/*
728 * Craft pseudo header used to calculate the MIC.
729 */
730void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[])
731{
732 const struct ieee80211_frame_addr4 *wh =
733 (const struct ieee80211_frame_addr4 *)wh0;
734
735 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
736 case IEEE80211_FC1_DIR_NODS:
737 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
738 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
739 wh->i_addr2);
740 break;
741 case IEEE80211_FC1_DIR_TODS:
742 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
743 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
744 wh->i_addr2);
745 break;
746 case IEEE80211_FC1_DIR_FROMDS:
747 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
748 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
749 wh->i_addr3);
750 break;
751 case IEEE80211_FC1_DIR_DSTODS:
752 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
753 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
754 wh->i_addr4);
755 break;
756 }
757 /*
758 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
759 * it could also be set for deauth, disassoc, action, etc. for
760 * a mgt type frame. It comes into picture for MFP.
761 */
762 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
763 const struct ieee80211_qosframe *qwh =
764 (const struct ieee80211_qosframe *)wh;
765 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
766 } else {
767 hdr[12] = 0;
768 }
769 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
770}
771
772/*
773 * Michael_mic for defragmentation
774 */
775int
776ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
777 const uint8_t *key,
778 cdf_nbuf_t wbuf,
779 uint16_t off, uint16_t data_len, uint8_t mic[])
780{
781 uint8_t hdr[16] = { 0, };
782 uint32_t l, r;
783 const uint8_t *data;
784 uint32_t space;
785 void *rx_desc_old_position = NULL;
786 void *ind_old_position = NULL;
787 int rx_desc_len = 0;
788
789 ol_rx_frag_desc_adjust(pdev,
790 wbuf,
791 &rx_desc_old_position,
792 &ind_old_position, &rx_desc_len);
793
794 ol_rx_defrag_michdr((struct ieee80211_frame *)(cdf_nbuf_data(wbuf) +
795 rx_desc_len), hdr);
796 l = get_le32(key);
797 r = get_le32(key + 4);
798
799 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
800 l ^= get_le32(hdr);
801 michael_block(l, r);
802 l ^= get_le32(&hdr[4]);
803 michael_block(l, r);
804 l ^= get_le32(&hdr[8]);
805 michael_block(l, r);
806 l ^= get_le32(&hdr[12]);
807 michael_block(l, r);
808
809 /* first buffer has special handling */
810 data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len + off;
811 space = ol_rx_defrag_len(wbuf) - rx_desc_len - off;
812 for (;; ) {
813 if (space > data_len)
814 space = data_len;
815
816 /* collect 32-bit blocks from current buffer */
817 while (space >= sizeof(uint32_t)) {
818 l ^= get_le32(data);
819 michael_block(l, r);
820 data += sizeof(uint32_t);
821 space -= sizeof(uint32_t);
822 data_len -= sizeof(uint32_t);
823 }
824 if (data_len < sizeof(uint32_t))
825 break;
826
827 wbuf = cdf_nbuf_next(wbuf);
828 if (wbuf == NULL)
829 return OL_RX_DEFRAG_ERR;
830
831 rx_desc_len = 0;
832
833 if (space != 0) {
834 const uint8_t *data_next;
835 /*
836 * Block straddles buffers, split references.
837 */
838 data_next =
839 (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
840 if ((ol_rx_defrag_len(wbuf) - rx_desc_len) <
841 sizeof(uint32_t) - space) {
842 return OL_RX_DEFRAG_ERR;
843 }
844 switch (space) {
845 case 1:
846 l ^= get_le32_split(data[0], data_next[0],
847 data_next[1], data_next[2]);
848 data = data_next + 3;
849 space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
850 - 3;
851 break;
852 case 2:
853 l ^= get_le32_split(data[0], data[1],
854 data_next[0], data_next[1]);
855 data = data_next + 2;
856 space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
857 - 2;
858 break;
859 case 3:
860 l ^= get_le32_split(data[0], data[1], data[2],
861 data_next[0]);
862 data = data_next + 1;
863 space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
864 - 1;
865 break;
866 }
867 michael_block(l, r);
868 data_len -= sizeof(uint32_t);
869 } else {
870 /*
871 * Setup for next buffer.
872 */
873 data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
874 space = ol_rx_defrag_len(wbuf) - rx_desc_len;
875 }
876 }
877 /* Last block and padding (0x5a, 4..7 x 0) */
878 switch (data_len) {
879 case 0:
880 l ^= get_le32_split(0x5a, 0, 0, 0);
881 break;
882 case 1:
883 l ^= get_le32_split(data[0], 0x5a, 0, 0);
884 break;
885 case 2:
886 l ^= get_le32_split(data[0], data[1], 0x5a, 0);
887 break;
888 case 3:
889 l ^= get_le32_split(data[0], data[1], data[2], 0x5a);
890 break;
891 }
892 michael_block(l, r);
893 michael_block(l, r);
894 put_le32(mic, l);
895 put_le32(mic + 4, r);
896
897 return OL_RX_DEFRAG_OK;
898}
899
900/*
901 * Calculate headersize
902 */
903uint16_t ol_rx_frag_hdrsize(const void *data)
904{
905 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
906 uint16_t size = sizeof(struct ieee80211_frame);
907
908 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
909 size += IEEE80211_ADDR_LEN;
910
911 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
912 size += sizeof(uint16_t);
913 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
914 size += sizeof(struct ieee80211_htc);
915 }
916 return size;
917}
918
919/*
920 * Recombine and decap fragments
921 */
922cdf_nbuf_t
923ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,
924 cdf_nbuf_t frag_list, uint16_t hdrsize)
925{
926 cdf_nbuf_t tmp;
927 cdf_nbuf_t msdu = frag_list;
928 cdf_nbuf_t rx_nbuf = frag_list;
929 struct ieee80211_frame *wh;
930
931 msdu = cdf_nbuf_next(msdu);
932 cdf_nbuf_set_next(rx_nbuf, NULL);
933 while (msdu) {
934 htt_rx_msdu_desc_free(htt_pdev, msdu);
935 tmp = cdf_nbuf_next(msdu);
936 cdf_nbuf_set_next(msdu, NULL);
937 ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize);
938 if (!ol_rx_defrag_concat(rx_nbuf, msdu)) {
939 ol_rx_frames_free(htt_pdev, tmp);
940 htt_rx_desc_frame_free(htt_pdev, rx_nbuf);
941 cdf_nbuf_free(msdu);
942 /* msdu rx desc already freed above */
943 return NULL;
944 }
945 msdu = tmp;
946 }
947 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
948 rx_nbuf);
949 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG;
950 *(uint16_t *) wh->i_seq &= ~IEEE80211_SEQ_FRAG_MASK;
951
952 return rx_nbuf;
953}
954
955void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu)
956{
957 struct ieee80211_frame wh;
958 uint32_t hdrsize;
959 struct llc_snap_hdr_t llchdr;
960 struct ethernet_hdr_t *eth_hdr;
961 void *rx_desc_old_position = NULL;
962 void *ind_old_position = NULL;
963 int rx_desc_len = 0;
964 struct ieee80211_frame *wh_ptr;
965
966 ol_rx_frag_desc_adjust(pdev,
967 msdu,
968 &rx_desc_old_position,
969 &ind_old_position, &rx_desc_len);
970
971 wh_ptr = (struct ieee80211_frame *)(cdf_nbuf_data(msdu) + rx_desc_len);
972 cdf_mem_copy(&wh, wh_ptr, sizeof(wh));
973 hdrsize = sizeof(struct ieee80211_frame);
974 cdf_mem_copy(&llchdr, ((uint8_t *) (cdf_nbuf_data(msdu) +
975 rx_desc_len)) + hdrsize,
976 sizeof(struct llc_snap_hdr_t));
977
978 /*
979 * Now move the data pointer to the beginning of the mac header :
980 * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
981 */
982 cdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
983 sizeof(struct llc_snap_hdr_t) -
984 sizeof(struct ethernet_hdr_t)));
985 eth_hdr = (struct ethernet_hdr_t *)(cdf_nbuf_data(msdu));
986 switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
987 case IEEE80211_FC1_DIR_NODS:
988 cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
989 IEEE80211_ADDR_LEN);
990 cdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
991 break;
992 case IEEE80211_FC1_DIR_TODS:
993 cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3,
994 IEEE80211_ADDR_LEN);
995 cdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
996 break;
997 case IEEE80211_FC1_DIR_FROMDS:
998 cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
999 IEEE80211_ADDR_LEN);
1000 cdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, IEEE80211_ADDR_LEN);
1001 break;
1002 case IEEE80211_FC1_DIR_DSTODS:
1003 break;
1004 }
1005
1006 cdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
1007 sizeof(llchdr.ethertype));
1008}
1009
1010/*
1011 * Handling QOS for defragmentation
1012 */
1013void
1014ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
1015 cdf_nbuf_t nbuf, uint16_t hdrlen)
1016{
1017 struct ieee80211_frame *wh;
1018 uint16_t qoslen;
1019 void *rx_desc_old_position = NULL;
1020 void *ind_old_position = NULL;
1021 int rx_desc_len = 0;
1022
1023 ol_rx_frag_desc_adjust(pdev,
1024 nbuf,
1025 &rx_desc_old_position,
1026 &ind_old_position, &rx_desc_len);
1027
1028 wh = (struct ieee80211_frame *)(cdf_nbuf_data(nbuf) + rx_desc_len);
1029 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
1030 qoslen = sizeof(struct ieee80211_qoscntl);
1031 /* Qos frame with Order bit set indicates a HTC frame */
1032 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
1033 qoslen += sizeof(struct ieee80211_htc);
1034
1035 /* remove QoS filed from header */
1036 hdrlen -= qoslen;
1037 cdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen);
1038 wh = (struct ieee80211_frame *)cdf_nbuf_pull_head(nbuf,
1039 rx_desc_len +
1040 qoslen);
1041 /* clear QoS bit */
1042 /*
1043 * KW# 6154 'cdf_nbuf_pull_head' in turn calls
1044 * __cdf_nbuf_pull_head,
1045 * which returns NULL if there is not sufficient data to pull.
1046 * It's guaranteed that cdf_nbuf_pull_head will succeed rather
1047 * than returning NULL, since the entire rx frame is already
1048 * present in the rx buffer.
1049 * However, to make it obvious to static analyzers that this
1050 * code is safe, add an explicit check that cdf_nbuf_pull_head
1051 * returns a non-NULL value.
1052 * Since this part of the code is not performance-critical,
1053 * adding this explicit check is okay.
1054 */
1055 if (wh)
1056 wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
1057
1058 }
1059}