blob: e93e9d53c107111aae4ca5c9120a8fab568359e1 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Alok Kumar4278b692018-01-11 11:16:53 +05302 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*-
20 * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting
21 * All rights reserved.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
41 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 */
43#include <ol_htt_api.h>
44#include <ol_txrx_api.h>
45#include <ol_txrx_htt_api.h>
46#include <ol_htt_rx_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047#include <ol_rx_reorder.h>
48#include <ol_rx_pn.h>
49#include <ol_rx_fwd.h>
50#include <ol_rx.h>
51#include <ol_txrx_internal.h>
52#include <ol_ctrl_txrx_api.h>
53#include <ol_txrx_peer_find.h>
Nirav Shahcbc6d722016-03-01 16:24:53 +053054#include <qdf_nbuf.h>
Anurag Chouhanc5548422016-02-24 18:33:27 +053055#include <qdf_util.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080056#include <athdefs.h>
Anurag Chouhan600c3a02016-03-01 10:33:54 +053057#include <qdf_mem.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058#include <ol_rx_defrag.h>
59#include <enet.h>
Anurag Chouhan50220ce2016-02-18 20:11:33 +053060#include <qdf_time.h> /* qdf_system_time */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080061
62#define DEFRAG_IEEE80211_ADDR_EQ(a1, a2) \
Dhanashri Atre5e584fa2016-08-25 14:30:36 -070063 (!qdf_mem_cmp(a1, a2, IEEE80211_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080064
65#define DEFRAG_IEEE80211_ADDR_COPY(dst, src) \
Anurag Chouhan600c3a02016-03-01 10:33:54 +053066 qdf_mem_copy(dst, src, IEEE80211_ADDR_LEN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080067
68#define DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \
69 (((wh)->i_fc[0] & \
70 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
71 (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
72
73#define DEFRAG_IEEE80211_QOS_GET_TID(_x) \
74 ((_x)->i_qos[0] & IEEE80211_QOS_TID)
75
76const struct ol_rx_defrag_cipher f_ccmp = {
77 "AES-CCM",
78 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
79 IEEE80211_WEP_MICLEN,
80 0,
81};
82
83const struct ol_rx_defrag_cipher f_tkip = {
84 "TKIP",
85 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
86 IEEE80211_WEP_CRCLEN,
87 IEEE80211_WEP_MICLEN,
88};
89
90const struct ol_rx_defrag_cipher f_wep = {
91 "WEP",
92 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
93 IEEE80211_WEP_CRCLEN,
94 0,
95};
96
Siddarth Poddarb2011f62016-04-27 20:45:42 +053097#if defined(CONFIG_HL_SUPPORT)
98
99/**
100 * ol_rx_frag_get_mac_hdr() - retrieve mac header
101 * @htt_pdev: pointer to htt pdev handle
102 * @frag: rx fragment
103 *
104 * Return: pointer to ieee mac header of frag
105 */
106static struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
107 htt_pdev_handle htt_pdev, qdf_nbuf_t frag)
108{
109 void *rx_desc;
110 int rx_desc_len;
111
112 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag);
113 rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc);
114 return (struct ieee80211_frame *)(qdf_nbuf_data(frag) + rx_desc_len);
115}
116
117/**
118 * ol_rx_frag_pull_hdr() - point to payload of rx frag
119 * @htt_pdev: pointer to htt pdev handle
120 * @frag: rx fragment
121 * @hdrsize: header size
122 *
123 * Return: None
124 */
125static void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,
126 qdf_nbuf_t frag, int hdrsize)
127{
128 void *rx_desc;
129 int rx_desc_len;
130
131 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag);
132 rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc);
133 qdf_nbuf_pull_head(frag, rx_desc_len + hdrsize);
134}
135
136/**
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530137 * ol_rx_frag_desc_adjust() - adjust rx frag descriptor position
138 * @pdev: pointer to txrx handle
139 * @msdu: msdu
140 * @rx_desc_old_position: rx descriptor old position
141 * @ind_old_position:index of old position
142 * @rx_desc_len: rx desciptor length
143 *
144 * Return: None
145 */
146static void
147ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
148 qdf_nbuf_t msdu,
149 void **rx_desc_old_position,
150 void **ind_old_position, int *rx_desc_len)
151{
152 *rx_desc_old_position = htt_rx_msdu_desc_retrieve(pdev->htt_pdev,
153 msdu);
154 *ind_old_position = *rx_desc_old_position - HTT_RX_IND_HL_BYTES;
155 *rx_desc_len = htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev,
156 *rx_desc_old_position);
157}
158
159/**
160 * ol_rx_frag_restructure() - point to payload for HL
161 * @pdev: physical device object
162 * @msdu: the buffer containing the MSDU payload
163 * @rx_desc_old_position: rx MSDU descriptor
164 * @ind_old_position: rx msdu indication
165 * @f_type: pointing to rx defrag cipher
166 * @rx_desc_len: length by which rx descriptor to move
167 *
168 * Return: None
169 */
170static void
171ol_rx_frag_restructure(
172 ol_txrx_pdev_handle pdev,
173 qdf_nbuf_t msdu,
174 void *rx_desc_old_position,
175 void *ind_old_position,
176 const struct ol_rx_defrag_cipher *f_type,
177 int rx_desc_len)
178{
179 if ((ind_old_position == NULL) || (rx_desc_old_position == NULL)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530180 ol_txrx_err("ind_old_position,rx_desc_old_position is NULL\n");
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530181 ASSERT(0);
182 return;
183 }
184 /* move rx description*/
185 qdf_mem_move(rx_desc_old_position + f_type->ic_header,
186 rx_desc_old_position, rx_desc_len);
187 /* move rx indication*/
188 qdf_mem_move(ind_old_position + f_type->ic_header, ind_old_position,
189 HTT_RX_IND_HL_BYTES);
190}
191
192/**
193 * ol_rx_get_desc_len() - point to payload for HL
194 * @htt_pdev: the HTT instance the rx data was received on
195 * @wbuf: buffer containing the MSDU payload
196 * @rx_desc_old_position: rx MSDU descriptor
197 *
198 * Return: Return the HL rx desc size
199 */
200static
201int ol_rx_get_desc_len(htt_pdev_handle htt_pdev,
202 qdf_nbuf_t wbuf,
203 void **rx_desc_old_position)
204{
205 int rx_desc_len = 0;
206 *rx_desc_old_position = htt_rx_msdu_desc_retrieve(htt_pdev, wbuf);
207 rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev,
208 *rx_desc_old_position);
209
210 return rx_desc_len;
211}
212
213/**
214 * ol_rx_defrag_push_rx_desc() - point to payload for HL
215 * @nbuf: buffer containing the MSDU payload
216 * @rx_desc_old_position: rx MSDU descriptor
217 * @ind_old_position: rx msdu indication
218 * @rx_desc_len: HL rx desc size
219 *
220 * Return: Return the HL rx desc size
221 */
222static
223void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,
224 void *rx_desc_old_position,
225 void *ind_old_position,
226 int rx_desc_len)
227{
228 qdf_nbuf_push_head(nbuf, rx_desc_len);
229 qdf_mem_move(
230 qdf_nbuf_data(nbuf), rx_desc_old_position, rx_desc_len);
231 qdf_mem_move(
232 qdf_nbuf_data(nbuf) - HTT_RX_IND_HL_BYTES, ind_old_position,
233 HTT_RX_IND_HL_BYTES);
234}
235#else
236
237static inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800238 htt_pdev_handle htt_pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530239 qdf_nbuf_t frag)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800240{
241 return
Nirav Shahcbc6d722016-03-01 16:24:53 +0530242 (struct ieee80211_frame *) qdf_nbuf_data(frag);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800243}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530244
245static inline void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,
246 qdf_nbuf_t frag, int hdrsize)
247{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530248 qdf_nbuf_pull_head(frag, hdrsize);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530249}
250
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251static inline void
252ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530253 qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800254 void **rx_desc_old_position,
255 void **ind_old_position, int *rx_desc_len)
256{
257 *rx_desc_old_position = NULL;
258 *ind_old_position = NULL;
259 *rx_desc_len = 0;
260}
261
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530262static inline void
263ol_rx_frag_restructure(
264 ol_txrx_pdev_handle pdev,
265 qdf_nbuf_t msdu,
266 void *rx_desc_old_position,
267 void *ind_old_position,
268 const struct ol_rx_defrag_cipher *f_type,
269 int rx_desc_len)
270{
271 /* no op */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530272}
273
274static inline
275int ol_rx_get_desc_len(htt_pdev_handle htt_pdev,
276 qdf_nbuf_t wbuf,
277 void **rx_desc_old_position)
278{
279 return 0;
280}
281
282static inline
283void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,
284 void *rx_desc_old_position,
285 void *ind_old_position,
286 int rx_desc_len)
287{
288 return;
289}
290#endif /* CONFIG_HL_SUPPORT */
291
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800292/*
293 * Process incoming fragments
294 */
295void
296ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530297 qdf_nbuf_t rx_frag_ind_msg,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800298 uint16_t peer_id, uint8_t tid)
299{
300 uint16_t seq_num;
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800301 uint16_t seq_num_start, seq_num_end;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800302 struct ol_txrx_peer_t *peer;
303 htt_pdev_handle htt_pdev;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530304 qdf_nbuf_t head_msdu, tail_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800305 void *rx_mpdu_desc;
Poddar, Siddarthd56b4c42016-10-07 15:05:56 +0530306 uint8_t pktlog_bit;
Himanshu Agarwal5f5e1662017-05-24 12:37:09 +0530307 uint32_t msdu_count = 0;
Govind Singh6f6d7112017-08-17 15:41:05 +0530308 int ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800310 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
311 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
312 return;
313 }
314
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315 htt_pdev = pdev->htt_pdev;
316 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
317
318 if (!ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) &&
319 htt_rx_ind_flush(pdev->htt_pdev, rx_frag_ind_msg) && peer) {
320 htt_rx_frag_ind_flush_seq_num_range(pdev->htt_pdev,
321 rx_frag_ind_msg,
322 &seq_num_start,
323 &seq_num_end);
324 /*
325 * Assuming flush indication for frags sent from target is
326 * separate from normal frames
327 */
328 ol_rx_reorder_flush_frag(htt_pdev, peer, tid, seq_num_start);
329 }
Poddar, Siddarthd56b4c42016-10-07 15:05:56 +0530330 pktlog_bit =
331 (htt_rx_amsdu_rx_in_order_get_pktlog(rx_frag_ind_msg) == 0x01);
Govind Singh6f6d7112017-08-17 15:41:05 +0530332 ret = htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
333 &tail_msdu, &msdu_count);
334 /* Return if msdu pop fails from rx hash table, as recovery
335 * is triggered and we exit gracefully.
336 */
337 if (!ret)
338 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800339 if (peer) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530340 qdf_assert(head_msdu == tail_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800341 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
342 rx_mpdu_desc =
343 htt_rx_mpdu_desc_list_next(htt_pdev, head_msdu);
344 } else {
345 rx_mpdu_desc =
346 htt_rx_mpdu_desc_list_next(htt_pdev,
347 rx_frag_ind_msg);
348 }
349 seq_num = htt_rx_mpdu_desc_seq_num(htt_pdev, rx_mpdu_desc);
350 OL_RX_ERR_STATISTICS_1(pdev, peer->vdev, peer, rx_mpdu_desc,
351 OL_RX_ERR_NONE_FRAG);
Nirav Shahbb8e47c2018-05-17 16:56:41 +0530352 ol_rx_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800353 ol_rx_reorder_store_frag(pdev, peer, tid, seq_num, head_msdu);
354 } else {
355 /* invalid frame - discard it */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800356 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
357 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu);
358 else
359 htt_rx_mpdu_desc_list_next(htt_pdev, rx_frag_ind_msg);
360
Nirav Shahbb8e47c2018-05-17 16:56:41 +0530361 ol_rx_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800362 htt_rx_desc_frame_free(htt_pdev, head_msdu);
363 }
364 /* request HTT to provide new rx MSDU buffers for the target to fill. */
365 htt_rx_msdu_buff_replenish(htt_pdev);
366}
367
368/*
369 * Flushing fragments
370 */
371void
372ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev,
Yun Park63018122017-04-06 21:29:19 -0700373 struct ol_txrx_peer_t *peer,
Tiger Yu62ef4fb2017-12-05 14:30:08 +0800374 unsigned int tid, uint16_t seq_num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375{
376 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
377 int seq;
378
379 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
380 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
381 if (rx_reorder_array_elem->head) {
382 ol_rx_frames_free(htt_pdev, rx_reorder_array_elem->head);
383 rx_reorder_array_elem->head = NULL;
384 rx_reorder_array_elem->tail = NULL;
385 }
386}
387
388/*
389 * Reorder and store fragments
390 */
391void
392ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
393 struct ol_txrx_peer_t *peer,
Yun Park63018122017-04-06 21:29:19 -0700394 unsigned int tid, uint16_t seq_num, qdf_nbuf_t frag)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395{
396 struct ieee80211_frame *fmac_hdr, *mac_hdr;
397 uint8_t fragno, more_frag, all_frag_present = 0;
398 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
399 uint16_t frxseq, rxseq, seq;
400 htt_pdev_handle htt_pdev = pdev->htt_pdev;
401
402 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530403 qdf_assert(seq == 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
405
406 mac_hdr = (struct ieee80211_frame *)
407 ol_rx_frag_get_mac_hdr(htt_pdev, frag);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530408 rxseq = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800409 IEEE80211_SEQ_SEQ_SHIFT;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530410 fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800411 IEEE80211_SEQ_FRAG_MASK;
412 more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
413
414 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
415 rx_reorder_array_elem->head = frag;
416 rx_reorder_array_elem->tail = frag;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530417 qdf_nbuf_set_next(frag, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800418 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
419 rx_reorder_array_elem->head = NULL;
420 rx_reorder_array_elem->tail = NULL;
421 return;
422 }
423 if (rx_reorder_array_elem->head) {
424 fmac_hdr = (struct ieee80211_frame *)
425 ol_rx_frag_get_mac_hdr(htt_pdev,
426 rx_reorder_array_elem->head);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530427 frxseq = qdf_le16_to_cpu(*(uint16_t *) fmac_hdr->i_seq) >>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800428 IEEE80211_SEQ_SEQ_SHIFT;
429 if (rxseq != frxseq
430 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr1,
431 fmac_hdr->i_addr1)
432 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr2,
433 fmac_hdr->i_addr2)) {
434 ol_rx_frames_free(htt_pdev,
435 rx_reorder_array_elem->head);
436 rx_reorder_array_elem->head = NULL;
437 rx_reorder_array_elem->tail = NULL;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530438 ol_txrx_err("\n ol_rx_reorder_store:%s mismatch\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800439 (rxseq == frxseq)
440 ? "address"
441 : "seq number");
442 }
443 }
444
445 ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head,
446 &rx_reorder_array_elem->tail, frag,
447 &all_frag_present);
448
449 if (pdev->rx.flags.defrag_timeout_check)
450 ol_rx_defrag_waitlist_remove(peer, tid);
451
452 if (all_frag_present) {
453 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
454 rx_reorder_array_elem->head = NULL;
455 rx_reorder_array_elem->tail = NULL;
456 peer->tids_rx_reorder[tid].defrag_timeout_ms = 0;
457 peer->tids_last_seq[tid] = seq_num;
458 } else if (pdev->rx.flags.defrag_timeout_check) {
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530459 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800460
461 peer->tids_rx_reorder[tid].defrag_timeout_ms =
462 now_ms + pdev->rx.defrag.timeout_ms;
463 ol_rx_defrag_waitlist_add(peer, tid);
464 }
465}
466
467/*
468 * Insert and store fragments
469 */
470void
471ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530472 qdf_nbuf_t *head_addr,
473 qdf_nbuf_t *tail_addr,
474 qdf_nbuf_t frag, uint8_t *all_frag_present)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800475{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530476 qdf_nbuf_t next, prev = NULL, cur = *head_addr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477 struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr;
478 uint8_t fragno, cur_fragno, lfragno, next_fragno;
479 uint8_t last_morefrag = 1, count = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800480
Anurag Chouhanc5548422016-02-24 18:33:27 +0530481 qdf_assert(frag);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800482
483 mac_hdr = (struct ieee80211_frame *)
484 ol_rx_frag_get_mac_hdr(htt_pdev, frag);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530485 fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800486 IEEE80211_SEQ_FRAG_MASK;
487
488 if (!(*head_addr)) {
489 *head_addr = frag;
490 *tail_addr = frag;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530491 qdf_nbuf_set_next(*tail_addr, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800492 return;
493 }
494 /* For efficiency, compare with tail first */
495 lmac_hdr = (struct ieee80211_frame *)
496 ol_rx_frag_get_mac_hdr(htt_pdev, *tail_addr);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530497 lfragno = qdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800498 IEEE80211_SEQ_FRAG_MASK;
499 if (fragno > lfragno) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530500 qdf_nbuf_set_next(*tail_addr, frag);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800501 *tail_addr = frag;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530502 qdf_nbuf_set_next(*tail_addr, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800503 } else {
504 do {
505 cmac_hdr = (struct ieee80211_frame *)
506 ol_rx_frag_get_mac_hdr(htt_pdev, cur);
507 cur_fragno =
Anurag Chouhanc5548422016-02-24 18:33:27 +0530508 qdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800509 IEEE80211_SEQ_FRAG_MASK;
510 prev = cur;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530511 cur = qdf_nbuf_next(cur);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800512 } while (fragno > cur_fragno);
513
514 if (fragno == cur_fragno) {
515 htt_rx_desc_frame_free(htt_pdev, frag);
516 *all_frag_present = 0;
517 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800518 }
Yun Park63018122017-04-06 21:29:19 -0700519
520 qdf_nbuf_set_next(prev, frag);
521 qdf_nbuf_set_next(frag, cur);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800522 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530523 next = qdf_nbuf_next(*head_addr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800524 lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
525 *tail_addr);
526 last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
527 if (!last_morefrag) {
528 do {
529 next_hdr =
530 (struct ieee80211_frame *)
531 ol_rx_frag_get_mac_hdr(htt_pdev, next);
532 next_fragno =
Anurag Chouhanc5548422016-02-24 18:33:27 +0530533 qdf_le16_to_cpu(*(uint16_t *) next_hdr->i_seq) &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800534 IEEE80211_SEQ_FRAG_MASK;
535 count++;
536 if (next_fragno != count)
537 break;
538
Nirav Shahcbc6d722016-03-01 16:24:53 +0530539 next = qdf_nbuf_next(next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540 } while (next);
541
542 if (!next) {
543 *all_frag_present = 1;
544 return;
545 }
546 }
547 *all_frag_present = 0;
548}
549
550/*
551 * add tid to pending fragment wait list
552 */
Yun Park63018122017-04-06 21:29:19 -0700553void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned int tid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800554{
555 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
556 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
557
558 TAILQ_INSERT_TAIL(&pdev->rx.defrag.waitlist, rx_reorder,
559 defrag_waitlist_elem);
560}
561
562/*
563 * remove tid from pending fragment wait list
564 */
Yun Park63018122017-04-06 21:29:19 -0700565void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned int tid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800566{
567 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
568 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
569
DARAM SUDHAc653bba2015-05-14 18:44:59 +0530570 if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800571
572 TAILQ_REMOVE(&pdev->rx.defrag.waitlist, rx_reorder,
573 defrag_waitlist_elem);
574
575 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
576 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
DARAM SUDHA6d0ea362015-05-16 08:53:02 +0530577 } else if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530578 ol_txrx_alert("waitlist->tqe_prv = NULL\n");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530579 QDF_ASSERT(0);
DARAM SUDHAc653bba2015-05-14 18:44:59 +0530580 rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800581 }
582}
583
584#ifndef container_of
585#define container_of(ptr, type, member) \
586 ((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
587#endif
588
589/*
590 * flush stale fragments from the waitlist
591 */
592void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev)
593{
594 struct ol_rx_reorder_t *rx_reorder, *tmp;
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530595 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800596
597 TAILQ_FOREACH_SAFE(rx_reorder, &pdev->rx.defrag.waitlist,
598 defrag_waitlist_elem, tmp) {
599 struct ol_txrx_peer_t *peer;
600 struct ol_rx_reorder_t *rx_reorder_base;
Yun Park63018122017-04-06 21:29:19 -0700601 unsigned int tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602
603 if (rx_reorder->defrag_timeout_ms > now_ms)
604 break;
605
606 tid = rx_reorder->tid;
Alok Kumar4278b692018-01-11 11:16:53 +0530607 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
608 ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
609 WARN_ON(1);
610 continue;
611 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800612 /* get index 0 of the rx_reorder array */
613 rx_reorder_base = rx_reorder - tid;
614 peer =
615 container_of(rx_reorder_base, struct ol_txrx_peer_t,
616 tids_rx_reorder[0]);
617
618 ol_rx_defrag_waitlist_remove(peer, tid);
619 ol_rx_reorder_flush_frag(pdev->htt_pdev, peer, tid,
620 0 /* frags always stored at seq 0 */);
621 }
622}
623
624/*
625 * Handling security checking and processing fragments
626 */
627void
628ol_rx_defrag(ol_txrx_pdev_handle pdev,
Yun Park63018122017-04-06 21:29:19 -0700629 struct ol_txrx_peer_t *peer, unsigned int tid,
630 qdf_nbuf_t frag_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800631{
632 struct ol_txrx_vdev_t *vdev = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530633 qdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800634 uint8_t index, tkip_demic = 0;
635 uint16_t hdr_space;
636 void *rx_desc;
637 struct ieee80211_frame *wh;
638 uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800639 htt_pdev_handle htt_pdev = pdev->htt_pdev;
Yun Park63018122017-04-06 21:29:19 -0700640
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800641 vdev = peer->vdev;
642
643 /* bypass defrag for safe mode */
644 if (vdev->safemode) {
645 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
646 ol_rx_in_order_deliver(vdev, peer, tid, frag_list);
647 else
648 ol_rx_deliver(vdev, peer, tid, frag_list);
649 return;
650 }
651
652 while (cur) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530653 tmp_next = qdf_nbuf_next(cur);
654 qdf_nbuf_set_next(cur, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800655 if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) {
656 /* PN check failed,discard frags */
657 if (prev) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530658 qdf_nbuf_set_next(prev, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800659 ol_rx_frames_free(htt_pdev, frag_list);
660 }
661 ol_rx_frames_free(htt_pdev, tmp_next);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530662 ol_txrx_err("ol_rx_defrag: PN Check failed\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800663 return;
664 }
665 /* remove FCS from each fragment */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530666 qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800667 prev = cur;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530668 qdf_nbuf_set_next(cur, tmp_next);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800669 cur = tmp_next;
670 }
671 cur = frag_list;
672 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, cur);
673 hdr_space = ol_rx_frag_hdrsize(wh);
674 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag_list);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530675 qdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800676 index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ?
677 txrx_sec_mcast : txrx_sec_ucast;
678
679 switch (peer->security[index].sec_type) {
680 case htt_sec_type_tkip:
681 tkip_demic = 1;
682 /* fall-through to rest of tkip ops */
683 case htt_sec_type_tkip_nomic:
684 while (cur) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530685 tmp_next = qdf_nbuf_next(cur);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800686 if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) {
687 /* TKIP decap failed, discard frags */
688 ol_rx_frames_free(htt_pdev, frag_list);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530689 ol_txrx_err("\n ol_rx_defrag: TKIP decap failed\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690 return;
691 }
692 cur = tmp_next;
693 }
694 break;
695
696 case htt_sec_type_aes_ccmp:
697 while (cur) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530698 tmp_next = qdf_nbuf_next(cur);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800699 if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) {
700 /* CCMP demic failed, discard frags */
701 ol_rx_frames_free(htt_pdev, frag_list);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530702 ol_txrx_err("\n ol_rx_defrag: CCMP demic failed\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800703 return;
704 }
705 if (!ol_rx_frag_ccmp_decap(pdev, cur, hdr_space)) {
706 /* CCMP decap failed, discard frags */
707 ol_rx_frames_free(htt_pdev, frag_list);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530708 ol_txrx_err("\n ol_rx_defrag: CCMP decap failed\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800709 return;
710 }
711 cur = tmp_next;
712 }
713 break;
714
715 case htt_sec_type_wep40:
716 case htt_sec_type_wep104:
717 case htt_sec_type_wep128:
718 while (cur) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530719 tmp_next = qdf_nbuf_next(cur);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800720 if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) {
721 /* wep decap failed, discard frags */
722 ol_rx_frames_free(htt_pdev, frag_list);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530723 ol_txrx_err("\n ol_rx_defrag: wep decap failed\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800724 return;
725 }
726 cur = tmp_next;
727 }
728 break;
729
730 default:
731 break;
732 }
733
734 msdu = ol_rx_defrag_decap_recombine(htt_pdev, frag_list, hdr_space);
735 if (!msdu)
736 return;
737
738 if (tkip_demic) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530739 qdf_mem_copy(key,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800740 peer->security[index].michael_key,
741 sizeof(peer->security[index].michael_key));
742 if (!ol_rx_frag_tkip_demic(pdev, key, msdu, hdr_space)) {
743 htt_rx_desc_frame_free(htt_pdev, msdu);
744 ol_rx_err(pdev->ctrl_pdev,
745 vdev->vdev_id, peer->mac_addr.raw, tid, 0,
746 OL_RX_DEFRAG_ERR, msdu, NULL, 0);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530747 ol_txrx_err("\n ol_rx_defrag: TKIP demic failed\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800748 return;
749 }
750 }
751 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, msdu);
752 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh))
753 ol_rx_defrag_qos_decap(pdev, msdu, hdr_space);
754 if (ol_cfg_frame_type(pdev->ctrl_pdev) == wlan_frm_fmt_802_3)
755 ol_rx_defrag_nwifi_to_8023(pdev, msdu);
756
757 ol_rx_fwd_check(vdev, peer, tid, msdu);
758}
759
760/*
761 * Handling TKIP processing for defragmentation
762 */
763int
764ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530765 qdf_nbuf_t msdu, uint16_t hdrlen)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800766{
767 uint8_t *ivp, *origHdr;
768
769 void *rx_desc_old_position = NULL;
770 void *ind_old_position = NULL;
771 int rx_desc_len = 0;
772
773 ol_rx_frag_desc_adjust(pdev,
774 msdu,
775 &rx_desc_old_position,
776 &ind_old_position, &rx_desc_len);
777 /* Header should have extended IV */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530778 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800779
780 ivp = origHdr + hdrlen;
781 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
782 return OL_RX_DEFRAG_ERR;
783
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530784 qdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530785 ol_rx_frag_restructure(
786 pdev,
787 msdu,
788 rx_desc_old_position,
789 ind_old_position,
790 &f_tkip,
791 rx_desc_len);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530792 qdf_nbuf_pull_head(msdu, f_tkip.ic_header);
793 qdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800794 return OL_RX_DEFRAG_OK;
795}
796
797/*
798 * Handling WEP processing for defragmentation
799 */
800int
Nirav Shahcbc6d722016-03-01 16:24:53 +0530801ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t hdrlen)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800802{
803 uint8_t *origHdr;
804 void *rx_desc_old_position = NULL;
805 void *ind_old_position = NULL;
806 int rx_desc_len = 0;
807
808 ol_rx_frag_desc_adjust(pdev,
809 msdu,
810 &rx_desc_old_position,
811 &ind_old_position, &rx_desc_len);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530812 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530813 qdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530814 ol_rx_frag_restructure(
815 pdev,
816 msdu,
817 rx_desc_old_position,
818 ind_old_position,
819 &f_wep,
820 rx_desc_len);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530821 qdf_nbuf_pull_head(msdu, f_wep.ic_header);
822 qdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800823 return OL_RX_DEFRAG_OK;
824}
825
826/*
827 * Verify and strip MIC from the frame.
828 */
829int
830ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530831 qdf_nbuf_t msdu, uint16_t hdrlen)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800832{
833 int status;
834 uint32_t pktlen;
835 uint8_t mic[IEEE80211_WEP_MICLEN];
836 uint8_t mic0[IEEE80211_WEP_MICLEN];
837 void *rx_desc_old_position = NULL;
838 void *ind_old_position = NULL;
839 int rx_desc_len = 0;
840
841 ol_rx_frag_desc_adjust(pdev,
842 msdu,
843 &rx_desc_old_position,
844 &ind_old_position, &rx_desc_len);
845
846 pktlen = ol_rx_defrag_len(msdu) - rx_desc_len;
847
848 status = ol_rx_defrag_mic(pdev, key, msdu, hdrlen,
849 pktlen - (hdrlen + f_tkip.ic_miclen), mic);
850 if (status != OL_RX_DEFRAG_OK)
851 return OL_RX_DEFRAG_ERR;
852
853 ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len,
854 f_tkip.ic_miclen, (caddr_t) mic0);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530855 if (!qdf_mem_cmp(mic, mic0, f_tkip.ic_miclen))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800856 return OL_RX_DEFRAG_ERR;
857
Nirav Shahcbc6d722016-03-01 16:24:53 +0530858 qdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800859 return OL_RX_DEFRAG_OK;
860}
861
862/*
863 * Handling CCMP processing for defragmentation
864 */
865int
866ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530867 qdf_nbuf_t nbuf, uint16_t hdrlen)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800868{
869 uint8_t *ivp, *origHdr;
870 void *rx_desc_old_position = NULL;
871 void *ind_old_position = NULL;
872 int rx_desc_len = 0;
873
874 ol_rx_frag_desc_adjust(pdev,
875 nbuf,
876 &rx_desc_old_position,
877 &ind_old_position, &rx_desc_len);
878
Nirav Shahcbc6d722016-03-01 16:24:53 +0530879 origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800880 ivp = origHdr + hdrlen;
881 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
882 return OL_RX_DEFRAG_ERR;
883
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530884 qdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530885 ol_rx_frag_restructure(
886 pdev,
887 nbuf,
888 rx_desc_old_position,
889 ind_old_position,
890 &f_ccmp,
891 rx_desc_len);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530892 qdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800893
894 return OL_RX_DEFRAG_OK;
895}
896
897/*
898 * Verify and strip MIC from the frame.
899 */
900int
901ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530902 qdf_nbuf_t wbuf, uint16_t hdrlen)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800903{
904 uint8_t *ivp, *origHdr;
905 void *rx_desc_old_position = NULL;
906 void *ind_old_position = NULL;
907 int rx_desc_len = 0;
908
909 ol_rx_frag_desc_adjust(pdev,
910 wbuf,
911 &rx_desc_old_position,
912 &ind_old_position, &rx_desc_len);
913
Nirav Shahcbc6d722016-03-01 16:24:53 +0530914 origHdr = (uint8_t *) (qdf_nbuf_data(wbuf) + rx_desc_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800915
916 ivp = origHdr + hdrlen;
917 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
918 return OL_RX_DEFRAG_ERR;
919
Nirav Shahcbc6d722016-03-01 16:24:53 +0530920 qdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800921
922 return OL_RX_DEFRAG_OK;
923}
924
925/*
926 * Craft pseudo header used to calculate the MIC.
927 */
928void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[])
929{
930 const struct ieee80211_frame_addr4 *wh =
931 (const struct ieee80211_frame_addr4 *)wh0;
932
933 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
934 case IEEE80211_FC1_DIR_NODS:
935 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
936 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
937 wh->i_addr2);
938 break;
939 case IEEE80211_FC1_DIR_TODS:
940 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
941 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
942 wh->i_addr2);
943 break;
944 case IEEE80211_FC1_DIR_FROMDS:
945 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
946 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
947 wh->i_addr3);
948 break;
949 case IEEE80211_FC1_DIR_DSTODS:
950 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
951 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
952 wh->i_addr4);
953 break;
954 }
955 /*
956 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
957 * it could also be set for deauth, disassoc, action, etc. for
958 * a mgt type frame. It comes into picture for MFP.
959 */
960 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
961 const struct ieee80211_qosframe *qwh =
962 (const struct ieee80211_qosframe *)wh;
963 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
964 } else {
965 hdr[12] = 0;
966 }
967 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
968}
969
970/*
971 * Michael_mic for defragmentation
972 */
973int
974ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
975 const uint8_t *key,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530976 qdf_nbuf_t wbuf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800977 uint16_t off, uint16_t data_len, uint8_t mic[])
978{
979 uint8_t hdr[16] = { 0, };
980 uint32_t l, r;
981 const uint8_t *data;
982 uint32_t space;
983 void *rx_desc_old_position = NULL;
984 void *ind_old_position = NULL;
985 int rx_desc_len = 0;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530986 htt_pdev_handle htt_pdev = pdev->htt_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800987
988 ol_rx_frag_desc_adjust(pdev,
989 wbuf,
990 &rx_desc_old_position,
991 &ind_old_position, &rx_desc_len);
992
Nirav Shahcbc6d722016-03-01 16:24:53 +0530993 ol_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800994 rx_desc_len), hdr);
995 l = get_le32(key);
996 r = get_le32(key + 4);
997
998 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
999 l ^= get_le32(hdr);
1000 michael_block(l, r);
1001 l ^= get_le32(&hdr[4]);
1002 michael_block(l, r);
1003 l ^= get_le32(&hdr[8]);
1004 michael_block(l, r);
1005 l ^= get_le32(&hdr[12]);
1006 michael_block(l, r);
1007
1008 /* first buffer has special handling */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301009 data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001010 space = ol_rx_defrag_len(wbuf) - rx_desc_len - off;
1011 for (;; ) {
1012 if (space > data_len)
1013 space = data_len;
1014
1015 /* collect 32-bit blocks from current buffer */
1016 while (space >= sizeof(uint32_t)) {
1017 l ^= get_le32(data);
1018 michael_block(l, r);
1019 data += sizeof(uint32_t);
1020 space -= sizeof(uint32_t);
1021 data_len -= sizeof(uint32_t);
1022 }
1023 if (data_len < sizeof(uint32_t))
1024 break;
1025
Nirav Shahcbc6d722016-03-01 16:24:53 +05301026 wbuf = qdf_nbuf_next(wbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001027 if (wbuf == NULL)
1028 return OL_RX_DEFRAG_ERR;
1029
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301030 rx_desc_len = ol_rx_get_desc_len(htt_pdev, wbuf,
1031 &rx_desc_old_position);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001032
1033 if (space != 0) {
1034 const uint8_t *data_next;
1035 /*
1036 * Block straddles buffers, split references.
1037 */
1038 data_next =
Nirav Shahcbc6d722016-03-01 16:24:53 +05301039 (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001040 if ((ol_rx_defrag_len(wbuf) - rx_desc_len) <
1041 sizeof(uint32_t) - space) {
1042 return OL_RX_DEFRAG_ERR;
1043 }
1044 switch (space) {
1045 case 1:
1046 l ^= get_le32_split(data[0], data_next[0],
1047 data_next[1], data_next[2]);
1048 data = data_next + 3;
1049 space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
1050 - 3;
1051 break;
1052 case 2:
1053 l ^= get_le32_split(data[0], data[1],
1054 data_next[0], data_next[1]);
1055 data = data_next + 2;
1056 space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
1057 - 2;
1058 break;
1059 case 3:
1060 l ^= get_le32_split(data[0], data[1], data[2],
1061 data_next[0]);
1062 data = data_next + 1;
1063 space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
1064 - 1;
1065 break;
1066 }
1067 michael_block(l, r);
1068 data_len -= sizeof(uint32_t);
1069 } else {
1070 /*
1071 * Setup for next buffer.
1072 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301073 data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001074 space = ol_rx_defrag_len(wbuf) - rx_desc_len;
1075 }
1076 }
1077 /* Last block and padding (0x5a, 4..7 x 0) */
1078 switch (data_len) {
1079 case 0:
1080 l ^= get_le32_split(0x5a, 0, 0, 0);
1081 break;
1082 case 1:
1083 l ^= get_le32_split(data[0], 0x5a, 0, 0);
1084 break;
1085 case 2:
1086 l ^= get_le32_split(data[0], data[1], 0x5a, 0);
1087 break;
1088 case 3:
1089 l ^= get_le32_split(data[0], data[1], data[2], 0x5a);
1090 break;
1091 }
1092 michael_block(l, r);
1093 michael_block(l, r);
1094 put_le32(mic, l);
1095 put_le32(mic + 4, r);
1096
1097 return OL_RX_DEFRAG_OK;
1098}
1099
1100/*
1101 * Calculate headersize
1102 */
1103uint16_t ol_rx_frag_hdrsize(const void *data)
1104{
1105 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
1106 uint16_t size = sizeof(struct ieee80211_frame);
1107
1108 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
1109 size += IEEE80211_ADDR_LEN;
1110
1111 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
1112 size += sizeof(uint16_t);
1113 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
1114 size += sizeof(struct ieee80211_htc);
1115 }
1116 return size;
1117}
1118
1119/*
1120 * Recombine and decap fragments
1121 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301122qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001123ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301124 qdf_nbuf_t frag_list, uint16_t hdrsize)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001125{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301126 qdf_nbuf_t tmp;
1127 qdf_nbuf_t msdu = frag_list;
1128 qdf_nbuf_t rx_nbuf = frag_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001129 struct ieee80211_frame *wh;
1130
Nirav Shahcbc6d722016-03-01 16:24:53 +05301131 msdu = qdf_nbuf_next(msdu);
1132 qdf_nbuf_set_next(rx_nbuf, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001133 while (msdu) {
1134 htt_rx_msdu_desc_free(htt_pdev, msdu);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301135 tmp = qdf_nbuf_next(msdu);
1136 qdf_nbuf_set_next(msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001137 ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize);
1138 if (!ol_rx_defrag_concat(rx_nbuf, msdu)) {
1139 ol_rx_frames_free(htt_pdev, tmp);
1140 htt_rx_desc_frame_free(htt_pdev, rx_nbuf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301141 qdf_nbuf_free(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001142 /* msdu rx desc already freed above */
1143 return NULL;
1144 }
1145 msdu = tmp;
1146 }
1147 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
1148 rx_nbuf);
1149 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG;
1150 *(uint16_t *) wh->i_seq &= ~IEEE80211_SEQ_FRAG_MASK;
1151
1152 return rx_nbuf;
1153}
1154
Nirav Shahcbc6d722016-03-01 16:24:53 +05301155void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001156{
1157 struct ieee80211_frame wh;
1158 uint32_t hdrsize;
1159 struct llc_snap_hdr_t llchdr;
1160 struct ethernet_hdr_t *eth_hdr;
1161 void *rx_desc_old_position = NULL;
1162 void *ind_old_position = NULL;
1163 int rx_desc_len = 0;
1164 struct ieee80211_frame *wh_ptr;
1165
1166 ol_rx_frag_desc_adjust(pdev,
1167 msdu,
1168 &rx_desc_old_position,
1169 &ind_old_position, &rx_desc_len);
1170
Nirav Shahcbc6d722016-03-01 16:24:53 +05301171 wh_ptr = (struct ieee80211_frame *)(qdf_nbuf_data(msdu) + rx_desc_len);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301172 qdf_mem_copy(&wh, wh_ptr, sizeof(wh));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001173 hdrsize = sizeof(struct ieee80211_frame);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301174 qdf_mem_copy(&llchdr, ((uint8_t *) (qdf_nbuf_data(msdu) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001175 rx_desc_len)) + hdrsize,
1176 sizeof(struct llc_snap_hdr_t));
1177
1178 /*
1179 * Now move the data pointer to the beginning of the mac header :
1180 * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
1181 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301182 qdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001183 sizeof(struct llc_snap_hdr_t) -
1184 sizeof(struct ethernet_hdr_t)));
Nirav Shahcbc6d722016-03-01 16:24:53 +05301185 eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001186 switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
1187 case IEEE80211_FC1_DIR_NODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301188 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001189 IEEE80211_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301190 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001191 break;
1192 case IEEE80211_FC1_DIR_TODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301193 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001194 IEEE80211_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301195 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001196 break;
1197 case IEEE80211_FC1_DIR_FROMDS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301198 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001199 IEEE80211_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301200 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, IEEE80211_ADDR_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001201 break;
1202 case IEEE80211_FC1_DIR_DSTODS:
1203 break;
1204 }
1205
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301206 qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001207 sizeof(llchdr.ethertype));
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301208
1209 ol_rx_defrag_push_rx_desc(msdu, rx_desc_old_position,
1210 ind_old_position, rx_desc_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001211}
1212
1213/*
1214 * Handling QOS for defragmentation
1215 */
1216void
1217ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301218 qdf_nbuf_t nbuf, uint16_t hdrlen)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001219{
1220 struct ieee80211_frame *wh;
1221 uint16_t qoslen;
1222 void *rx_desc_old_position = NULL;
1223 void *ind_old_position = NULL;
1224 int rx_desc_len = 0;
1225
1226 ol_rx_frag_desc_adjust(pdev,
1227 nbuf,
1228 &rx_desc_old_position,
1229 &ind_old_position, &rx_desc_len);
1230
Nirav Shahcbc6d722016-03-01 16:24:53 +05301231 wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) + rx_desc_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001232 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
1233 qoslen = sizeof(struct ieee80211_qoscntl);
1234 /* Qos frame with Order bit set indicates a HTC frame */
1235 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
1236 qoslen += sizeof(struct ieee80211_htc);
1237
1238 /* remove QoS filed from header */
1239 hdrlen -= qoslen;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301240 qdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301241 wh = (struct ieee80211_frame *)qdf_nbuf_pull_head(nbuf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001242 rx_desc_len +
1243 qoslen);
1244 /* clear QoS bit */
1245 /*
Nirav Shahcbc6d722016-03-01 16:24:53 +05301246 * KW# 6154 'qdf_nbuf_pull_head' in turn calls
1247 * __qdf_nbuf_pull_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001248 * which returns NULL if there is not sufficient data to pull.
Nirav Shahcbc6d722016-03-01 16:24:53 +05301249 * It's guaranteed that qdf_nbuf_pull_head will succeed rather
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001250 * than returning NULL, since the entire rx frame is already
1251 * present in the rx buffer.
1252 * However, to make it obvious to static analyzers that this
Nirav Shahcbc6d722016-03-01 16:24:53 +05301253 * code is safe, add an explicit check that qdf_nbuf_pull_head
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001254 * returns a non-NULL value.
1255 * Since this part of the code is not performance-critical,
1256 * adding this explicit check is okay.
1257 */
1258 if (wh)
1259 wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
1260
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301261 ol_rx_defrag_push_rx_desc(nbuf, rx_desc_old_position,
1262 ind_old_position, rx_desc_len);
1263
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001264 }
1265}