blob: 33e829ed5ebfcc84e841284fa1473632a37e822f [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*-
29 * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
42 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
45 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52#include <ol_htt_api.h>
53#include <ol_txrx_api.h>
54#include <ol_txrx_htt_api.h>
55#include <ol_htt_rx_api.h>
56#include <ol_txrx_types.h>
57#include <ol_rx_reorder.h>
58#include <ol_rx_pn.h>
59#include <ol_rx_fwd.h>
60#include <ol_rx.h>
61#include <ol_txrx_internal.h>
62#include <ol_ctrl_txrx_api.h>
63#include <ol_txrx_peer_find.h>
64#include <cdf_nbuf.h>
65#include <ieee80211.h>
66#include <cdf_util.h>
67#include <athdefs.h>
68#include <cdf_memory.h>
69#include <ol_rx_defrag.h>
70#include <enet.h>
71#include <cdf_time.h> /* cdf_system_time */
72
73#define DEFRAG_IEEE80211_ADDR_EQ(a1, a2) \
74 (cdf_mem_compare(a1, a2, IEEE80211_ADDR_LEN) == 0)
75
76#define DEFRAG_IEEE80211_ADDR_COPY(dst, src) \
77 cdf_mem_copy(dst, src, IEEE80211_ADDR_LEN)
78
79#define DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \
80 (((wh)->i_fc[0] & \
81 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
82 (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
83
84#define DEFRAG_IEEE80211_QOS_GET_TID(_x) \
85 ((_x)->i_qos[0] & IEEE80211_QOS_TID)
86
87const struct ol_rx_defrag_cipher f_ccmp = {
88 "AES-CCM",
89 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
90 IEEE80211_WEP_MICLEN,
91 0,
92};
93
94const struct ol_rx_defrag_cipher f_tkip = {
95 "TKIP",
96 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
97 IEEE80211_WEP_CRCLEN,
98 IEEE80211_WEP_MICLEN,
99};
100
101const struct ol_rx_defrag_cipher f_wep = {
102 "WEP",
103 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
104 IEEE80211_WEP_CRCLEN,
105 0,
106};
107
108inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
109 htt_pdev_handle htt_pdev,
110 cdf_nbuf_t frag)
111{
112 return
113 (struct ieee80211_frame *) cdf_nbuf_data(frag);
114}
115#define ol_rx_frag_pull_hdr(pdev, frag, hdrsize) \
116 cdf_nbuf_pull_head(frag, hdrsize);
117#define OL_RX_FRAG_CLONE(frag) NULL /* no-op */
118
119static inline void
120ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
121 cdf_nbuf_t msdu,
122 void **rx_desc_old_position,
123 void **ind_old_position, int *rx_desc_len)
124{
125 *rx_desc_old_position = NULL;
126 *ind_old_position = NULL;
127 *rx_desc_len = 0;
128}
129
130/*
131 * Process incoming fragments
132 */
133void
134ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
135 cdf_nbuf_t rx_frag_ind_msg,
136 uint16_t peer_id, uint8_t tid)
137{
138 uint16_t seq_num;
139 int seq_num_start, seq_num_end;
140 struct ol_txrx_peer_t *peer;
141 htt_pdev_handle htt_pdev;
142 cdf_nbuf_t head_msdu, tail_msdu;
143 void *rx_mpdu_desc;
144
145 htt_pdev = pdev->htt_pdev;
146 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
147
148 if (!ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) &&
149 htt_rx_ind_flush(pdev->htt_pdev, rx_frag_ind_msg) && peer) {
150 htt_rx_frag_ind_flush_seq_num_range(pdev->htt_pdev,
151 rx_frag_ind_msg,
152 &seq_num_start,
153 &seq_num_end);
154 /*
155 * Assuming flush indication for frags sent from target is
156 * separate from normal frames
157 */
158 ol_rx_reorder_flush_frag(htt_pdev, peer, tid, seq_num_start);
159 }
160 if (peer) {
161 htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
162 &tail_msdu);
163 cdf_assert(head_msdu == tail_msdu);
164 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
165 rx_mpdu_desc =
166 htt_rx_mpdu_desc_list_next(htt_pdev, head_msdu);
167 } else {
168 rx_mpdu_desc =
169 htt_rx_mpdu_desc_list_next(htt_pdev,
170 rx_frag_ind_msg);
171 }
172 seq_num = htt_rx_mpdu_desc_seq_num(htt_pdev, rx_mpdu_desc);
173 OL_RX_ERR_STATISTICS_1(pdev, peer->vdev, peer, rx_mpdu_desc,
174 OL_RX_ERR_NONE_FRAG);
175 ol_rx_reorder_store_frag(pdev, peer, tid, seq_num, head_msdu);
176 } else {
177 /* invalid frame - discard it */
178 htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
179 &tail_msdu);
180 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
181 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu);
182 else
183 htt_rx_mpdu_desc_list_next(htt_pdev, rx_frag_ind_msg);
184
185 htt_rx_desc_frame_free(htt_pdev, head_msdu);
186 }
187 /* request HTT to provide new rx MSDU buffers for the target to fill. */
188 htt_rx_msdu_buff_replenish(htt_pdev);
189}
190
191/*
192 * Flushing fragments
193 */
194void
195ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev,
196 struct ol_txrx_peer_t *peer, unsigned tid, int seq_num)
197{
198 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
199 int seq;
200
201 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
202 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
203 if (rx_reorder_array_elem->head) {
204 ol_rx_frames_free(htt_pdev, rx_reorder_array_elem->head);
205 rx_reorder_array_elem->head = NULL;
206 rx_reorder_array_elem->tail = NULL;
207 }
208}
209
210/*
211 * Reorder and store fragments
212 */
213void
214ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
215 struct ol_txrx_peer_t *peer,
216 unsigned tid, uint16_t seq_num, cdf_nbuf_t frag)
217{
218 struct ieee80211_frame *fmac_hdr, *mac_hdr;
219 uint8_t fragno, more_frag, all_frag_present = 0;
220 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
221 uint16_t frxseq, rxseq, seq;
222 htt_pdev_handle htt_pdev = pdev->htt_pdev;
223
224 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
225 cdf_assert(seq == 0);
226 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
227
228 mac_hdr = (struct ieee80211_frame *)
229 ol_rx_frag_get_mac_hdr(htt_pdev, frag);
230 rxseq = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >>
231 IEEE80211_SEQ_SEQ_SHIFT;
232 fragno = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
233 IEEE80211_SEQ_FRAG_MASK;
234 more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
235
236 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
237 rx_reorder_array_elem->head = frag;
238 rx_reorder_array_elem->tail = frag;
239 cdf_nbuf_set_next(frag, NULL);
240 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
241 rx_reorder_array_elem->head = NULL;
242 rx_reorder_array_elem->tail = NULL;
243 return;
244 }
245 if (rx_reorder_array_elem->head) {
246 fmac_hdr = (struct ieee80211_frame *)
247 ol_rx_frag_get_mac_hdr(htt_pdev,
248 rx_reorder_array_elem->head);
249 frxseq = cdf_le16_to_cpu(*(uint16_t *) fmac_hdr->i_seq) >>
250 IEEE80211_SEQ_SEQ_SHIFT;
251 if (rxseq != frxseq
252 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr1,
253 fmac_hdr->i_addr1)
254 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr2,
255 fmac_hdr->i_addr2)) {
256 ol_rx_frames_free(htt_pdev,
257 rx_reorder_array_elem->head);
258 rx_reorder_array_elem->head = NULL;
259 rx_reorder_array_elem->tail = NULL;
260 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
261 "\n ol_rx_reorder_store: %s mismatch \n",
262 (rxseq == frxseq)
263 ? "address"
264 : "seq number");
265 }
266 }
267
268 ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head,
269 &rx_reorder_array_elem->tail, frag,
270 &all_frag_present);
271
272 if (pdev->rx.flags.defrag_timeout_check)
273 ol_rx_defrag_waitlist_remove(peer, tid);
274
275 if (all_frag_present) {
276 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
277 rx_reorder_array_elem->head = NULL;
278 rx_reorder_array_elem->tail = NULL;
279 peer->tids_rx_reorder[tid].defrag_timeout_ms = 0;
280 peer->tids_last_seq[tid] = seq_num;
281 } else if (pdev->rx.flags.defrag_timeout_check) {
282 uint32_t now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
283
284 peer->tids_rx_reorder[tid].defrag_timeout_ms =
285 now_ms + pdev->rx.defrag.timeout_ms;
286 ol_rx_defrag_waitlist_add(peer, tid);
287 }
288}
289
290/*
291 * Insert and store fragments
292 */
293void
294ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
295 cdf_nbuf_t *head_addr,
296 cdf_nbuf_t *tail_addr,
297 cdf_nbuf_t frag, uint8_t *all_frag_present)
298{
299 cdf_nbuf_t next, prev = NULL, cur = *head_addr;
300 struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr;
301 uint8_t fragno, cur_fragno, lfragno, next_fragno;
302 uint8_t last_morefrag = 1, count = 0;
303 cdf_nbuf_t frag_clone;
304
305 cdf_assert(frag);
306 frag_clone = OL_RX_FRAG_CLONE(frag);
307 frag = frag_clone ? frag_clone : frag;
308
309 mac_hdr = (struct ieee80211_frame *)
310 ol_rx_frag_get_mac_hdr(htt_pdev, frag);
311 fragno = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
312 IEEE80211_SEQ_FRAG_MASK;
313
314 if (!(*head_addr)) {
315 *head_addr = frag;
316 *tail_addr = frag;
317 cdf_nbuf_set_next(*tail_addr, NULL);
318 return;
319 }
320 /* For efficiency, compare with tail first */
321 lmac_hdr = (struct ieee80211_frame *)
322 ol_rx_frag_get_mac_hdr(htt_pdev, *tail_addr);
323 lfragno = cdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) &
324 IEEE80211_SEQ_FRAG_MASK;
325 if (fragno > lfragno) {
326 cdf_nbuf_set_next(*tail_addr, frag);
327 *tail_addr = frag;
328 cdf_nbuf_set_next(*tail_addr, NULL);
329 } else {
330 do {
331 cmac_hdr = (struct ieee80211_frame *)
332 ol_rx_frag_get_mac_hdr(htt_pdev, cur);
333 cur_fragno =
334 cdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) &
335 IEEE80211_SEQ_FRAG_MASK;
336 prev = cur;
337 cur = cdf_nbuf_next(cur);
338 } while (fragno > cur_fragno);
339
340 if (fragno == cur_fragno) {
341 htt_rx_desc_frame_free(htt_pdev, frag);
342 *all_frag_present = 0;
343 return;
344 } else {
345 cdf_nbuf_set_next(prev, frag);
346 cdf_nbuf_set_next(frag, cur);
347 }
348 }
349 next = cdf_nbuf_next(*head_addr);
350 lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
351 *tail_addr);
352 last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
353 if (!last_morefrag) {
354 do {
355 next_hdr =
356 (struct ieee80211_frame *)
357 ol_rx_frag_get_mac_hdr(htt_pdev, next);
358 next_fragno =
359 cdf_le16_to_cpu(*(uint16_t *) next_hdr->i_seq) &
360 IEEE80211_SEQ_FRAG_MASK;
361 count++;
362 if (next_fragno != count)
363 break;
364
365 next = cdf_nbuf_next(next);
366 } while (next);
367
368 if (!next) {
369 *all_frag_present = 1;
370 return;
371 }
372 }
373 *all_frag_present = 0;
374}
375
376/*
377 * add tid to pending fragment wait list
378 */
379void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned tid)
380{
381 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
382 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
383
384 TAILQ_INSERT_TAIL(&pdev->rx.defrag.waitlist, rx_reorder,
385 defrag_waitlist_elem);
386}
387
388/*
389 * remove tid from pending fragment wait list
390 */
391void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned tid)
392{
393 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
394 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
395
DARAM SUDHAc653bba2015-05-14 18:44:59 +0530396 if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800397
398 TAILQ_REMOVE(&pdev->rx.defrag.waitlist, rx_reorder,
399 defrag_waitlist_elem);
400
401 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
402 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
DARAM SUDHA6d0ea362015-05-16 08:53:02 +0530403 } else if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
DARAM SUDHAc653bba2015-05-14 18:44:59 +0530404 TXRX_PRINT(TXRX_PRINT_LEVEL_FATAL_ERR,
405 "waitlist->tqe_prv = NULL\n");
406 CDF_ASSERT(0);
407 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408 }
409}
410
411#ifndef container_of
412#define container_of(ptr, type, member) \
413 ((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
414#endif
415
416/*
417 * flush stale fragments from the waitlist
418 */
419void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev)
420{
421 struct ol_rx_reorder_t *rx_reorder, *tmp;
422 uint32_t now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
423
424 TAILQ_FOREACH_SAFE(rx_reorder, &pdev->rx.defrag.waitlist,
425 defrag_waitlist_elem, tmp) {
426 struct ol_txrx_peer_t *peer;
427 struct ol_rx_reorder_t *rx_reorder_base;
428 unsigned tid;
429
430 if (rx_reorder->defrag_timeout_ms > now_ms)
431 break;
432
433 tid = rx_reorder->tid;
434 /* get index 0 of the rx_reorder array */
435 rx_reorder_base = rx_reorder - tid;
436 peer =
437 container_of(rx_reorder_base, struct ol_txrx_peer_t,
438 tids_rx_reorder[0]);
439
440 ol_rx_defrag_waitlist_remove(peer, tid);
441 ol_rx_reorder_flush_frag(pdev->htt_pdev, peer, tid,
442 0 /* frags always stored at seq 0 */);
443 }
444}
445
446/*
447 * Handling security checking and processing fragments
448 */
449void
450ol_rx_defrag(ol_txrx_pdev_handle pdev,
451 struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t frag_list)
452{
453 struct ol_txrx_vdev_t *vdev = NULL;
454 cdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list;
455 uint8_t index, tkip_demic = 0;
456 uint16_t hdr_space;
457 void *rx_desc;
458 struct ieee80211_frame *wh;
459 uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
460
461 htt_pdev_handle htt_pdev = pdev->htt_pdev;
462 vdev = peer->vdev;
463
464 /* bypass defrag for safe mode */
465 if (vdev->safemode) {
466 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
467 ol_rx_in_order_deliver(vdev, peer, tid, frag_list);
468 else
469 ol_rx_deliver(vdev, peer, tid, frag_list);
470 return;
471 }
472
473 while (cur) {
474 tmp_next = cdf_nbuf_next(cur);
475 cdf_nbuf_set_next(cur, NULL);
476 if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) {
477 /* PN check failed,discard frags */
478 if (prev) {
479 cdf_nbuf_set_next(prev, NULL);
480 ol_rx_frames_free(htt_pdev, frag_list);
481 }
482 ol_rx_frames_free(htt_pdev, tmp_next);
483 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
484 "ol_rx_defrag: PN Check failed\n");
485 return;
486 }
487 /* remove FCS from each fragment */
488 cdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
489 prev = cur;
490 cdf_nbuf_set_next(cur, tmp_next);
491 cur = tmp_next;
492 }
493 cur = frag_list;
494 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, cur);
495 hdr_space = ol_rx_frag_hdrsize(wh);
496 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag_list);
497 cdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc));
498 index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ?
499 txrx_sec_mcast : txrx_sec_ucast;
500
501 switch (peer->security[index].sec_type) {
502 case htt_sec_type_tkip:
503 tkip_demic = 1;
504 /* fall-through to rest of tkip ops */
505 case htt_sec_type_tkip_nomic:
506 while (cur) {
507 tmp_next = cdf_nbuf_next(cur);
508 if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) {
509 /* TKIP decap failed, discard frags */
510 ol_rx_frames_free(htt_pdev, frag_list);
511 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
512 "\n ol_rx_defrag: TKIP decap failed\n");
513 return;
514 }
515 cur = tmp_next;
516 }
517 break;
518
519 case htt_sec_type_aes_ccmp:
520 while (cur) {
521 tmp_next = cdf_nbuf_next(cur);
522 if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) {
523 /* CCMP demic failed, discard frags */
524 ol_rx_frames_free(htt_pdev, frag_list);
525 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
526 "\n ol_rx_defrag: CCMP demic failed\n");
527 return;
528 }
529 if (!ol_rx_frag_ccmp_decap(pdev, cur, hdr_space)) {
530 /* CCMP decap failed, discard frags */
531 ol_rx_frames_free(htt_pdev, frag_list);
532 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
533 "\n ol_rx_defrag: CCMP decap failed\n");
534 return;
535 }
536 cur = tmp_next;
537 }
538 break;
539
540 case htt_sec_type_wep40:
541 case htt_sec_type_wep104:
542 case htt_sec_type_wep128:
543 while (cur) {
544 tmp_next = cdf_nbuf_next(cur);
545 if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) {
546 /* wep decap failed, discard frags */
547 ol_rx_frames_free(htt_pdev, frag_list);
548 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
549 "\n ol_rx_defrag: wep decap failed\n");
550 return;
551 }
552 cur = tmp_next;
553 }
554 break;
555
556 default:
557 break;
558 }
559
560 msdu = ol_rx_defrag_decap_recombine(htt_pdev, frag_list, hdr_space);
561 if (!msdu)
562 return;
563
564 if (tkip_demic) {
565 cdf_mem_copy(key,
566 peer->security[index].michael_key,
567 sizeof(peer->security[index].michael_key));
568 if (!ol_rx_frag_tkip_demic(pdev, key, msdu, hdr_space)) {
569 htt_rx_desc_frame_free(htt_pdev, msdu);
570 ol_rx_err(pdev->ctrl_pdev,
571 vdev->vdev_id, peer->mac_addr.raw, tid, 0,
572 OL_RX_DEFRAG_ERR, msdu, NULL, 0);
573 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
574 "\n ol_rx_defrag: TKIP demic failed\n");
575 return;
576 }
577 }
578 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, msdu);
579 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh))
580 ol_rx_defrag_qos_decap(pdev, msdu, hdr_space);
581 if (ol_cfg_frame_type(pdev->ctrl_pdev) == wlan_frm_fmt_802_3)
582 ol_rx_defrag_nwifi_to_8023(pdev, msdu);
583
584 ol_rx_fwd_check(vdev, peer, tid, msdu);
585}
586
587/*
588 * Handling TKIP processing for defragmentation
589 */
590int
591ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
592 cdf_nbuf_t msdu, uint16_t hdrlen)
593{
594 uint8_t *ivp, *origHdr;
595
596 void *rx_desc_old_position = NULL;
597 void *ind_old_position = NULL;
598 int rx_desc_len = 0;
599
600 ol_rx_frag_desc_adjust(pdev,
601 msdu,
602 &rx_desc_old_position,
603 &ind_old_position, &rx_desc_len);
604 /* Header should have extended IV */
605 origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
606
607 ivp = origHdr + hdrlen;
608 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
609 return OL_RX_DEFRAG_ERR;
610
611 cdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen);
612 cdf_nbuf_pull_head(msdu, f_tkip.ic_header);
613 cdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
614 return OL_RX_DEFRAG_OK;
615}
616
617/*
618 * Handling WEP processing for defragmentation
619 */
620int
621ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t hdrlen)
622{
623 uint8_t *origHdr;
624 void *rx_desc_old_position = NULL;
625 void *ind_old_position = NULL;
626 int rx_desc_len = 0;
627
628 ol_rx_frag_desc_adjust(pdev,
629 msdu,
630 &rx_desc_old_position,
631 &ind_old_position, &rx_desc_len);
632 origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
633 cdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen);
634 cdf_nbuf_pull_head(msdu, f_wep.ic_header);
635 cdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
636 return OL_RX_DEFRAG_OK;
637}
638
639/*
640 * Verify and strip MIC from the frame.
641 */
642int
643ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key,
644 cdf_nbuf_t msdu, uint16_t hdrlen)
645{
646 int status;
647 uint32_t pktlen;
648 uint8_t mic[IEEE80211_WEP_MICLEN];
649 uint8_t mic0[IEEE80211_WEP_MICLEN];
650 void *rx_desc_old_position = NULL;
651 void *ind_old_position = NULL;
652 int rx_desc_len = 0;
653
654 ol_rx_frag_desc_adjust(pdev,
655 msdu,
656 &rx_desc_old_position,
657 &ind_old_position, &rx_desc_len);
658
659 pktlen = ol_rx_defrag_len(msdu) - rx_desc_len;
660
661 status = ol_rx_defrag_mic(pdev, key, msdu, hdrlen,
662 pktlen - (hdrlen + f_tkip.ic_miclen), mic);
663 if (status != OL_RX_DEFRAG_OK)
664 return OL_RX_DEFRAG_ERR;
665
666 ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len,
667 f_tkip.ic_miclen, (caddr_t) mic0);
668 if (cdf_mem_compare(mic, mic0, f_tkip.ic_miclen))
669 return OL_RX_DEFRAG_ERR;
670
671 cdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen);
672 return OL_RX_DEFRAG_OK;
673}
674
675/*
676 * Handling CCMP processing for defragmentation
677 */
678int
679ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
680 cdf_nbuf_t nbuf, uint16_t hdrlen)
681{
682 uint8_t *ivp, *origHdr;
683 void *rx_desc_old_position = NULL;
684 void *ind_old_position = NULL;
685 int rx_desc_len = 0;
686
687 ol_rx_frag_desc_adjust(pdev,
688 nbuf,
689 &rx_desc_old_position,
690 &ind_old_position, &rx_desc_len);
691
692 origHdr = (uint8_t *) (cdf_nbuf_data(nbuf) + rx_desc_len);
693 ivp = origHdr + hdrlen;
694 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
695 return OL_RX_DEFRAG_ERR;
696
697 cdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen);
698 cdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
699
700 return OL_RX_DEFRAG_OK;
701}
702
703/*
704 * Verify and strip MIC from the frame.
705 */
706int
707ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,
708 cdf_nbuf_t wbuf, uint16_t hdrlen)
709{
710 uint8_t *ivp, *origHdr;
711 void *rx_desc_old_position = NULL;
712 void *ind_old_position = NULL;
713 int rx_desc_len = 0;
714
715 ol_rx_frag_desc_adjust(pdev,
716 wbuf,
717 &rx_desc_old_position,
718 &ind_old_position, &rx_desc_len);
719
720 origHdr = (uint8_t *) (cdf_nbuf_data(wbuf) + rx_desc_len);
721
722 ivp = origHdr + hdrlen;
723 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
724 return OL_RX_DEFRAG_ERR;
725
726 cdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer);
727
728 return OL_RX_DEFRAG_OK;
729}
730
731/*
732 * Craft pseudo header used to calculate the MIC.
733 */
734void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[])
735{
736 const struct ieee80211_frame_addr4 *wh =
737 (const struct ieee80211_frame_addr4 *)wh0;
738
739 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
740 case IEEE80211_FC1_DIR_NODS:
741 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
742 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
743 wh->i_addr2);
744 break;
745 case IEEE80211_FC1_DIR_TODS:
746 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
747 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
748 wh->i_addr2);
749 break;
750 case IEEE80211_FC1_DIR_FROMDS:
751 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
752 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
753 wh->i_addr3);
754 break;
755 case IEEE80211_FC1_DIR_DSTODS:
756 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
757 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
758 wh->i_addr4);
759 break;
760 }
761 /*
762 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
763 * it could also be set for deauth, disassoc, action, etc. for
764 * a mgt type frame. It comes into picture for MFP.
765 */
766 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
767 const struct ieee80211_qosframe *qwh =
768 (const struct ieee80211_qosframe *)wh;
769 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
770 } else {
771 hdr[12] = 0;
772 }
773 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
774}
775
776/*
777 * Michael_mic for defragmentation
778 */
779int
780ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
781 const uint8_t *key,
782 cdf_nbuf_t wbuf,
783 uint16_t off, uint16_t data_len, uint8_t mic[])
784{
785 uint8_t hdr[16] = { 0, };
786 uint32_t l, r;
787 const uint8_t *data;
788 uint32_t space;
789 void *rx_desc_old_position = NULL;
790 void *ind_old_position = NULL;
791 int rx_desc_len = 0;
792
793 ol_rx_frag_desc_adjust(pdev,
794 wbuf,
795 &rx_desc_old_position,
796 &ind_old_position, &rx_desc_len);
797
798 ol_rx_defrag_michdr((struct ieee80211_frame *)(cdf_nbuf_data(wbuf) +
799 rx_desc_len), hdr);
800 l = get_le32(key);
801 r = get_le32(key + 4);
802
803 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
804 l ^= get_le32(hdr);
805 michael_block(l, r);
806 l ^= get_le32(&hdr[4]);
807 michael_block(l, r);
808 l ^= get_le32(&hdr[8]);
809 michael_block(l, r);
810 l ^= get_le32(&hdr[12]);
811 michael_block(l, r);
812
813 /* first buffer has special handling */
814 data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len + off;
815 space = ol_rx_defrag_len(wbuf) - rx_desc_len - off;
816 for (;; ) {
817 if (space > data_len)
818 space = data_len;
819
820 /* collect 32-bit blocks from current buffer */
821 while (space >= sizeof(uint32_t)) {
822 l ^= get_le32(data);
823 michael_block(l, r);
824 data += sizeof(uint32_t);
825 space -= sizeof(uint32_t);
826 data_len -= sizeof(uint32_t);
827 }
828 if (data_len < sizeof(uint32_t))
829 break;
830
831 wbuf = cdf_nbuf_next(wbuf);
832 if (wbuf == NULL)
833 return OL_RX_DEFRAG_ERR;
834
835 rx_desc_len = 0;
836
837 if (space != 0) {
838 const uint8_t *data_next;
839 /*
840 * Block straddles buffers, split references.
841 */
842 data_next =
843 (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
844 if ((ol_rx_defrag_len(wbuf) - rx_desc_len) <
845 sizeof(uint32_t) - space) {
846 return OL_RX_DEFRAG_ERR;
847 }
848 switch (space) {
849 case 1:
850 l ^= get_le32_split(data[0], data_next[0],
851 data_next[1], data_next[2]);
852 data = data_next + 3;
853 space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
854 - 3;
855 break;
856 case 2:
857 l ^= get_le32_split(data[0], data[1],
858 data_next[0], data_next[1]);
859 data = data_next + 2;
860 space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
861 - 2;
862 break;
863 case 3:
864 l ^= get_le32_split(data[0], data[1], data[2],
865 data_next[0]);
866 data = data_next + 1;
867 space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
868 - 1;
869 break;
870 }
871 michael_block(l, r);
872 data_len -= sizeof(uint32_t);
873 } else {
874 /*
875 * Setup for next buffer.
876 */
877 data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
878 space = ol_rx_defrag_len(wbuf) - rx_desc_len;
879 }
880 }
881 /* Last block and padding (0x5a, 4..7 x 0) */
882 switch (data_len) {
883 case 0:
884 l ^= get_le32_split(0x5a, 0, 0, 0);
885 break;
886 case 1:
887 l ^= get_le32_split(data[0], 0x5a, 0, 0);
888 break;
889 case 2:
890 l ^= get_le32_split(data[0], data[1], 0x5a, 0);
891 break;
892 case 3:
893 l ^= get_le32_split(data[0], data[1], data[2], 0x5a);
894 break;
895 }
896 michael_block(l, r);
897 michael_block(l, r);
898 put_le32(mic, l);
899 put_le32(mic + 4, r);
900
901 return OL_RX_DEFRAG_OK;
902}
903
904/*
905 * Calculate headersize
906 */
907uint16_t ol_rx_frag_hdrsize(const void *data)
908{
909 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
910 uint16_t size = sizeof(struct ieee80211_frame);
911
912 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
913 size += IEEE80211_ADDR_LEN;
914
915 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
916 size += sizeof(uint16_t);
917 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
918 size += sizeof(struct ieee80211_htc);
919 }
920 return size;
921}
922
923/*
924 * Recombine and decap fragments
925 */
926cdf_nbuf_t
927ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,
928 cdf_nbuf_t frag_list, uint16_t hdrsize)
929{
930 cdf_nbuf_t tmp;
931 cdf_nbuf_t msdu = frag_list;
932 cdf_nbuf_t rx_nbuf = frag_list;
933 struct ieee80211_frame *wh;
934
935 msdu = cdf_nbuf_next(msdu);
936 cdf_nbuf_set_next(rx_nbuf, NULL);
937 while (msdu) {
938 htt_rx_msdu_desc_free(htt_pdev, msdu);
939 tmp = cdf_nbuf_next(msdu);
940 cdf_nbuf_set_next(msdu, NULL);
941 ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize);
942 if (!ol_rx_defrag_concat(rx_nbuf, msdu)) {
943 ol_rx_frames_free(htt_pdev, tmp);
944 htt_rx_desc_frame_free(htt_pdev, rx_nbuf);
945 cdf_nbuf_free(msdu);
946 /* msdu rx desc already freed above */
947 return NULL;
948 }
949 msdu = tmp;
950 }
951 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
952 rx_nbuf);
953 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG;
954 *(uint16_t *) wh->i_seq &= ~IEEE80211_SEQ_FRAG_MASK;
955
956 return rx_nbuf;
957}
958
959void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu)
960{
961 struct ieee80211_frame wh;
962 uint32_t hdrsize;
963 struct llc_snap_hdr_t llchdr;
964 struct ethernet_hdr_t *eth_hdr;
965 void *rx_desc_old_position = NULL;
966 void *ind_old_position = NULL;
967 int rx_desc_len = 0;
968 struct ieee80211_frame *wh_ptr;
969
970 ol_rx_frag_desc_adjust(pdev,
971 msdu,
972 &rx_desc_old_position,
973 &ind_old_position, &rx_desc_len);
974
975 wh_ptr = (struct ieee80211_frame *)(cdf_nbuf_data(msdu) + rx_desc_len);
976 cdf_mem_copy(&wh, wh_ptr, sizeof(wh));
977 hdrsize = sizeof(struct ieee80211_frame);
978 cdf_mem_copy(&llchdr, ((uint8_t *) (cdf_nbuf_data(msdu) +
979 rx_desc_len)) + hdrsize,
980 sizeof(struct llc_snap_hdr_t));
981
982 /*
983 * Now move the data pointer to the beginning of the mac header :
984 * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
985 */
986 cdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
987 sizeof(struct llc_snap_hdr_t) -
988 sizeof(struct ethernet_hdr_t)));
989 eth_hdr = (struct ethernet_hdr_t *)(cdf_nbuf_data(msdu));
990 switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
991 case IEEE80211_FC1_DIR_NODS:
992 cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
993 IEEE80211_ADDR_LEN);
994 cdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
995 break;
996 case IEEE80211_FC1_DIR_TODS:
997 cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3,
998 IEEE80211_ADDR_LEN);
999 cdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
1000 break;
1001 case IEEE80211_FC1_DIR_FROMDS:
1002 cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
1003 IEEE80211_ADDR_LEN);
1004 cdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, IEEE80211_ADDR_LEN);
1005 break;
1006 case IEEE80211_FC1_DIR_DSTODS:
1007 break;
1008 }
1009
1010 cdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
1011 sizeof(llchdr.ethertype));
1012}
1013
1014/*
1015 * Handling QOS for defragmentation
1016 */
1017void
1018ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
1019 cdf_nbuf_t nbuf, uint16_t hdrlen)
1020{
1021 struct ieee80211_frame *wh;
1022 uint16_t qoslen;
1023 void *rx_desc_old_position = NULL;
1024 void *ind_old_position = NULL;
1025 int rx_desc_len = 0;
1026
1027 ol_rx_frag_desc_adjust(pdev,
1028 nbuf,
1029 &rx_desc_old_position,
1030 &ind_old_position, &rx_desc_len);
1031
1032 wh = (struct ieee80211_frame *)(cdf_nbuf_data(nbuf) + rx_desc_len);
1033 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
1034 qoslen = sizeof(struct ieee80211_qoscntl);
1035 /* Qos frame with Order bit set indicates a HTC frame */
1036 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
1037 qoslen += sizeof(struct ieee80211_htc);
1038
1039 /* remove QoS filed from header */
1040 hdrlen -= qoslen;
1041 cdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen);
1042 wh = (struct ieee80211_frame *)cdf_nbuf_pull_head(nbuf,
1043 rx_desc_len +
1044 qoslen);
1045 /* clear QoS bit */
1046 /*
1047 * KW# 6154 'cdf_nbuf_pull_head' in turn calls
1048 * __cdf_nbuf_pull_head,
1049 * which returns NULL if there is not sufficient data to pull.
1050 * It's guaranteed that cdf_nbuf_pull_head will succeed rather
1051 * than returning NULL, since the entire rx frame is already
1052 * present in the rx buffer.
1053 * However, to make it obvious to static analyzers that this
1054 * code is safe, add an explicit check that cdf_nbuf_pull_head
1055 * returns a non-NULL value.
1056 * Since this part of the code is not performance-critical,
1057 * adding this explicit check is okay.
1058 */
1059 if (wh)
1060 wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
1061
1062 }
1063}