Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Alok Kumar | 4278b69 | 2018-01-11 11:16:53 +0530 | [diff] [blame] | 2 | * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 19 | /*- |
| 20 | * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting |
| 21 | * All rights reserved. |
| 22 | * |
| 23 | * Redistribution and use in source and binary forms, with or without |
| 24 | * modification, are permitted provided that the following conditions |
| 25 | * are met: |
| 26 | * 1. Redistributions of source code must retain the above copyright |
| 27 | * notice, this list of conditions and the following disclaimer. |
| 28 | * 2. Redistributions in binary form must reproduce the above copyright |
| 29 | * notice, this list of conditions and the following disclaimer in the |
| 30 | * documentation and/or other materials provided with the distribution. |
| 31 | * |
| 32 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| 33 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| 34 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| 35 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 36 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| 37 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 38 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 39 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 40 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| 41 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 42 | */ |
| 43 | #include <ol_htt_api.h> |
| 44 | #include <ol_txrx_api.h> |
| 45 | #include <ol_txrx_htt_api.h> |
| 46 | #include <ol_htt_rx_api.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 47 | #include <ol_rx_reorder.h> |
| 48 | #include <ol_rx_pn.h> |
| 49 | #include <ol_rx_fwd.h> |
| 50 | #include <ol_rx.h> |
| 51 | #include <ol_txrx_internal.h> |
| 52 | #include <ol_ctrl_txrx_api.h> |
| 53 | #include <ol_txrx_peer_find.h> |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 54 | #include <qdf_nbuf.h> |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 55 | #include <qdf_util.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 56 | #include <athdefs.h> |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 57 | #include <qdf_mem.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 58 | #include <ol_rx_defrag.h> |
| 59 | #include <enet.h> |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 60 | #include <qdf_time.h> /* qdf_system_time */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 61 | |
| 62 | #define DEFRAG_IEEE80211_ADDR_EQ(a1, a2) \ |
Dhanashri Atre | 5e584fa | 2016-08-25 14:30:36 -0700 | [diff] [blame] | 63 | (!qdf_mem_cmp(a1, a2, IEEE80211_ADDR_LEN)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 64 | |
| 65 | #define DEFRAG_IEEE80211_ADDR_COPY(dst, src) \ |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 66 | qdf_mem_copy(dst, src, IEEE80211_ADDR_LEN) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 67 | |
| 68 | #define DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \ |
| 69 | (((wh)->i_fc[0] & \ |
| 70 | (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \ |
| 71 | (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS)) |
| 72 | |
| 73 | #define DEFRAG_IEEE80211_QOS_GET_TID(_x) \ |
| 74 | ((_x)->i_qos[0] & IEEE80211_QOS_TID) |
| 75 | |
| 76 | const struct ol_rx_defrag_cipher f_ccmp = { |
| 77 | "AES-CCM", |
| 78 | IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, |
| 79 | IEEE80211_WEP_MICLEN, |
| 80 | 0, |
| 81 | }; |
| 82 | |
| 83 | const struct ol_rx_defrag_cipher f_tkip = { |
| 84 | "TKIP", |
| 85 | IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, |
| 86 | IEEE80211_WEP_CRCLEN, |
| 87 | IEEE80211_WEP_MICLEN, |
| 88 | }; |
| 89 | |
| 90 | const struct ol_rx_defrag_cipher f_wep = { |
| 91 | "WEP", |
| 92 | IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, |
| 93 | IEEE80211_WEP_CRCLEN, |
| 94 | 0, |
| 95 | }; |
| 96 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 97 | #if defined(CONFIG_HL_SUPPORT) |
| 98 | |
| 99 | /** |
| 100 | * ol_rx_frag_get_mac_hdr() - retrieve mac header |
| 101 | * @htt_pdev: pointer to htt pdev handle |
| 102 | * @frag: rx fragment |
| 103 | * |
| 104 | * Return: pointer to ieee mac header of frag |
| 105 | */ |
| 106 | static struct ieee80211_frame *ol_rx_frag_get_mac_hdr( |
| 107 | htt_pdev_handle htt_pdev, qdf_nbuf_t frag) |
| 108 | { |
| 109 | void *rx_desc; |
| 110 | int rx_desc_len; |
| 111 | |
| 112 | rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag); |
| 113 | rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc); |
| 114 | return (struct ieee80211_frame *)(qdf_nbuf_data(frag) + rx_desc_len); |
| 115 | } |
| 116 | |
| 117 | /** |
| 118 | * ol_rx_frag_pull_hdr() - point to payload of rx frag |
| 119 | * @htt_pdev: pointer to htt pdev handle |
| 120 | * @frag: rx fragment |
| 121 | * @hdrsize: header size |
| 122 | * |
| 123 | * Return: None |
| 124 | */ |
| 125 | static void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev, |
| 126 | qdf_nbuf_t frag, int hdrsize) |
| 127 | { |
| 128 | void *rx_desc; |
| 129 | int rx_desc_len; |
| 130 | |
| 131 | rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag); |
| 132 | rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc); |
| 133 | qdf_nbuf_pull_head(frag, rx_desc_len + hdrsize); |
| 134 | } |
| 135 | |
| 136 | /** |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 137 | * ol_rx_frag_desc_adjust() - adjust rx frag descriptor position |
| 138 | * @pdev: pointer to txrx handle |
| 139 | * @msdu: msdu |
| 140 | * @rx_desc_old_position: rx descriptor old position |
| 141 | * @ind_old_position:index of old position |
| 142 | * @rx_desc_len: rx desciptor length |
| 143 | * |
| 144 | * Return: None |
| 145 | */ |
| 146 | static void |
| 147 | ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev, |
| 148 | qdf_nbuf_t msdu, |
| 149 | void **rx_desc_old_position, |
| 150 | void **ind_old_position, int *rx_desc_len) |
| 151 | { |
| 152 | *rx_desc_old_position = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, |
| 153 | msdu); |
| 154 | *ind_old_position = *rx_desc_old_position - HTT_RX_IND_HL_BYTES; |
| 155 | *rx_desc_len = htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev, |
| 156 | *rx_desc_old_position); |
| 157 | } |
| 158 | |
| 159 | /** |
| 160 | * ol_rx_frag_restructure() - point to payload for HL |
| 161 | * @pdev: physical device object |
| 162 | * @msdu: the buffer containing the MSDU payload |
| 163 | * @rx_desc_old_position: rx MSDU descriptor |
| 164 | * @ind_old_position: rx msdu indication |
| 165 | * @f_type: pointing to rx defrag cipher |
| 166 | * @rx_desc_len: length by which rx descriptor to move |
| 167 | * |
| 168 | * Return: None |
| 169 | */ |
| 170 | static void |
| 171 | ol_rx_frag_restructure( |
| 172 | ol_txrx_pdev_handle pdev, |
| 173 | qdf_nbuf_t msdu, |
| 174 | void *rx_desc_old_position, |
| 175 | void *ind_old_position, |
| 176 | const struct ol_rx_defrag_cipher *f_type, |
| 177 | int rx_desc_len) |
| 178 | { |
| 179 | if ((ind_old_position == NULL) || (rx_desc_old_position == NULL)) { |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 180 | ol_txrx_err("ind_old_position,rx_desc_old_position is NULL\n"); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 181 | ASSERT(0); |
| 182 | return; |
| 183 | } |
| 184 | /* move rx description*/ |
| 185 | qdf_mem_move(rx_desc_old_position + f_type->ic_header, |
| 186 | rx_desc_old_position, rx_desc_len); |
| 187 | /* move rx indication*/ |
| 188 | qdf_mem_move(ind_old_position + f_type->ic_header, ind_old_position, |
| 189 | HTT_RX_IND_HL_BYTES); |
| 190 | } |
| 191 | |
| 192 | /** |
| 193 | * ol_rx_get_desc_len() - point to payload for HL |
| 194 | * @htt_pdev: the HTT instance the rx data was received on |
| 195 | * @wbuf: buffer containing the MSDU payload |
| 196 | * @rx_desc_old_position: rx MSDU descriptor |
| 197 | * |
| 198 | * Return: Return the HL rx desc size |
| 199 | */ |
| 200 | static |
| 201 | int ol_rx_get_desc_len(htt_pdev_handle htt_pdev, |
| 202 | qdf_nbuf_t wbuf, |
| 203 | void **rx_desc_old_position) |
| 204 | { |
| 205 | int rx_desc_len = 0; |
| 206 | *rx_desc_old_position = htt_rx_msdu_desc_retrieve(htt_pdev, wbuf); |
| 207 | rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, |
| 208 | *rx_desc_old_position); |
| 209 | |
| 210 | return rx_desc_len; |
| 211 | } |
| 212 | |
| 213 | /** |
| 214 | * ol_rx_defrag_push_rx_desc() - point to payload for HL |
| 215 | * @nbuf: buffer containing the MSDU payload |
| 216 | * @rx_desc_old_position: rx MSDU descriptor |
| 217 | * @ind_old_position: rx msdu indication |
| 218 | * @rx_desc_len: HL rx desc size |
| 219 | * |
| 220 | * Return: Return the HL rx desc size |
| 221 | */ |
| 222 | static |
| 223 | void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf, |
| 224 | void *rx_desc_old_position, |
| 225 | void *ind_old_position, |
| 226 | int rx_desc_len) |
| 227 | { |
| 228 | qdf_nbuf_push_head(nbuf, rx_desc_len); |
| 229 | qdf_mem_move( |
| 230 | qdf_nbuf_data(nbuf), rx_desc_old_position, rx_desc_len); |
| 231 | qdf_mem_move( |
| 232 | qdf_nbuf_data(nbuf) - HTT_RX_IND_HL_BYTES, ind_old_position, |
| 233 | HTT_RX_IND_HL_BYTES); |
| 234 | } |
| 235 | #else |
| 236 | |
| 237 | static inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr( |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 238 | htt_pdev_handle htt_pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 239 | qdf_nbuf_t frag) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 240 | { |
| 241 | return |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 242 | (struct ieee80211_frame *) qdf_nbuf_data(frag); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 243 | } |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 244 | |
| 245 | static inline void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev, |
| 246 | qdf_nbuf_t frag, int hdrsize) |
| 247 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 248 | qdf_nbuf_pull_head(frag, hdrsize); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 249 | } |
| 250 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 251 | static inline void |
| 252 | ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 253 | qdf_nbuf_t msdu, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 254 | void **rx_desc_old_position, |
| 255 | void **ind_old_position, int *rx_desc_len) |
| 256 | { |
| 257 | *rx_desc_old_position = NULL; |
| 258 | *ind_old_position = NULL; |
| 259 | *rx_desc_len = 0; |
| 260 | } |
| 261 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 262 | static inline void |
| 263 | ol_rx_frag_restructure( |
| 264 | ol_txrx_pdev_handle pdev, |
| 265 | qdf_nbuf_t msdu, |
| 266 | void *rx_desc_old_position, |
| 267 | void *ind_old_position, |
| 268 | const struct ol_rx_defrag_cipher *f_type, |
| 269 | int rx_desc_len) |
| 270 | { |
| 271 | /* no op */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | static inline |
| 275 | int ol_rx_get_desc_len(htt_pdev_handle htt_pdev, |
| 276 | qdf_nbuf_t wbuf, |
| 277 | void **rx_desc_old_position) |
| 278 | { |
| 279 | return 0; |
| 280 | } |
| 281 | |
| 282 | static inline |
| 283 | void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf, |
| 284 | void *rx_desc_old_position, |
| 285 | void *ind_old_position, |
| 286 | int rx_desc_len) |
| 287 | { |
| 288 | return; |
| 289 | } |
| 290 | #endif /* CONFIG_HL_SUPPORT */ |
| 291 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 292 | /* |
| 293 | * Process incoming fragments |
| 294 | */ |
| 295 | void |
| 296 | ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 297 | qdf_nbuf_t rx_frag_ind_msg, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 298 | uint16_t peer_id, uint8_t tid) |
| 299 | { |
| 300 | uint16_t seq_num; |
Tiger Yu | 62ef4fb | 2017-12-05 14:30:08 +0800 | [diff] [blame] | 301 | uint16_t seq_num_start, seq_num_end; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 302 | struct ol_txrx_peer_t *peer; |
| 303 | htt_pdev_handle htt_pdev; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 304 | qdf_nbuf_t head_msdu, tail_msdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 305 | void *rx_mpdu_desc; |
Poddar, Siddarth | d56b4c4 | 2016-10-07 15:05:56 +0530 | [diff] [blame] | 306 | uint8_t pktlog_bit; |
Himanshu Agarwal | 5f5e166 | 2017-05-24 12:37:09 +0530 | [diff] [blame] | 307 | uint32_t msdu_count = 0; |
Govind Singh | 6f6d711 | 2017-08-17 15:41:05 +0530 | [diff] [blame] | 308 | int ret; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 309 | |
Tiger Yu | 62ef4fb | 2017-12-05 14:30:08 +0800 | [diff] [blame] | 310 | if (tid >= OL_TXRX_NUM_EXT_TIDS) { |
| 311 | ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid); |
| 312 | return; |
| 313 | } |
| 314 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 315 | htt_pdev = pdev->htt_pdev; |
| 316 | peer = ol_txrx_peer_find_by_id(pdev, peer_id); |
| 317 | |
| 318 | if (!ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) && |
| 319 | htt_rx_ind_flush(pdev->htt_pdev, rx_frag_ind_msg) && peer) { |
| 320 | htt_rx_frag_ind_flush_seq_num_range(pdev->htt_pdev, |
| 321 | rx_frag_ind_msg, |
| 322 | &seq_num_start, |
| 323 | &seq_num_end); |
| 324 | /* |
| 325 | * Assuming flush indication for frags sent from target is |
| 326 | * separate from normal frames |
| 327 | */ |
| 328 | ol_rx_reorder_flush_frag(htt_pdev, peer, tid, seq_num_start); |
| 329 | } |
Poddar, Siddarth | d56b4c4 | 2016-10-07 15:05:56 +0530 | [diff] [blame] | 330 | pktlog_bit = |
| 331 | (htt_rx_amsdu_rx_in_order_get_pktlog(rx_frag_ind_msg) == 0x01); |
Govind Singh | 6f6d711 | 2017-08-17 15:41:05 +0530 | [diff] [blame] | 332 | ret = htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu, |
| 333 | &tail_msdu, &msdu_count); |
| 334 | /* Return if msdu pop fails from rx hash table, as recovery |
| 335 | * is triggered and we exit gracefully. |
| 336 | */ |
| 337 | if (!ret) |
| 338 | return; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 339 | if (peer) { |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 340 | qdf_assert(head_msdu == tail_msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 341 | if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) { |
| 342 | rx_mpdu_desc = |
| 343 | htt_rx_mpdu_desc_list_next(htt_pdev, head_msdu); |
| 344 | } else { |
| 345 | rx_mpdu_desc = |
| 346 | htt_rx_mpdu_desc_list_next(htt_pdev, |
| 347 | rx_frag_ind_msg); |
| 348 | } |
| 349 | seq_num = htt_rx_mpdu_desc_seq_num(htt_pdev, rx_mpdu_desc); |
| 350 | OL_RX_ERR_STATISTICS_1(pdev, peer->vdev, peer, rx_mpdu_desc, |
| 351 | OL_RX_ERR_NONE_FRAG); |
Nirav Shah | bb8e47c | 2018-05-17 16:56:41 +0530 | [diff] [blame^] | 352 | ol_rx_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 353 | ol_rx_reorder_store_frag(pdev, peer, tid, seq_num, head_msdu); |
| 354 | } else { |
| 355 | /* invalid frame - discard it */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 356 | if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) |
| 357 | htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu); |
| 358 | else |
| 359 | htt_rx_mpdu_desc_list_next(htt_pdev, rx_frag_ind_msg); |
| 360 | |
Nirav Shah | bb8e47c | 2018-05-17 16:56:41 +0530 | [diff] [blame^] | 361 | ol_rx_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 362 | htt_rx_desc_frame_free(htt_pdev, head_msdu); |
| 363 | } |
| 364 | /* request HTT to provide new rx MSDU buffers for the target to fill. */ |
| 365 | htt_rx_msdu_buff_replenish(htt_pdev); |
| 366 | } |
| 367 | |
| 368 | /* |
| 369 | * Flushing fragments |
| 370 | */ |
| 371 | void |
| 372 | ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev, |
Yun Park | 6301812 | 2017-04-06 21:29:19 -0700 | [diff] [blame] | 373 | struct ol_txrx_peer_t *peer, |
Tiger Yu | 62ef4fb | 2017-12-05 14:30:08 +0800 | [diff] [blame] | 374 | unsigned int tid, uint16_t seq_num) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 375 | { |
| 376 | struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem; |
| 377 | int seq; |
| 378 | |
| 379 | seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask; |
| 380 | rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq]; |
| 381 | if (rx_reorder_array_elem->head) { |
| 382 | ol_rx_frames_free(htt_pdev, rx_reorder_array_elem->head); |
| 383 | rx_reorder_array_elem->head = NULL; |
| 384 | rx_reorder_array_elem->tail = NULL; |
| 385 | } |
| 386 | } |
| 387 | |
| 388 | /* |
| 389 | * Reorder and store fragments |
| 390 | */ |
| 391 | void |
| 392 | ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev, |
| 393 | struct ol_txrx_peer_t *peer, |
Yun Park | 6301812 | 2017-04-06 21:29:19 -0700 | [diff] [blame] | 394 | unsigned int tid, uint16_t seq_num, qdf_nbuf_t frag) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 395 | { |
| 396 | struct ieee80211_frame *fmac_hdr, *mac_hdr; |
| 397 | uint8_t fragno, more_frag, all_frag_present = 0; |
| 398 | struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem; |
| 399 | uint16_t frxseq, rxseq, seq; |
| 400 | htt_pdev_handle htt_pdev = pdev->htt_pdev; |
| 401 | |
| 402 | seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask; |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 403 | qdf_assert(seq == 0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 404 | rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq]; |
| 405 | |
| 406 | mac_hdr = (struct ieee80211_frame *) |
| 407 | ol_rx_frag_get_mac_hdr(htt_pdev, frag); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 408 | rxseq = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 409 | IEEE80211_SEQ_SEQ_SHIFT; |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 410 | fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) & |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 411 | IEEE80211_SEQ_FRAG_MASK; |
| 412 | more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG; |
| 413 | |
| 414 | if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { |
| 415 | rx_reorder_array_elem->head = frag; |
| 416 | rx_reorder_array_elem->tail = frag; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 417 | qdf_nbuf_set_next(frag, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 418 | ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head); |
| 419 | rx_reorder_array_elem->head = NULL; |
| 420 | rx_reorder_array_elem->tail = NULL; |
| 421 | return; |
| 422 | } |
| 423 | if (rx_reorder_array_elem->head) { |
| 424 | fmac_hdr = (struct ieee80211_frame *) |
| 425 | ol_rx_frag_get_mac_hdr(htt_pdev, |
| 426 | rx_reorder_array_elem->head); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 427 | frxseq = qdf_le16_to_cpu(*(uint16_t *) fmac_hdr->i_seq) >> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 428 | IEEE80211_SEQ_SEQ_SHIFT; |
| 429 | if (rxseq != frxseq |
| 430 | || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr1, |
| 431 | fmac_hdr->i_addr1) |
| 432 | || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr2, |
| 433 | fmac_hdr->i_addr2)) { |
| 434 | ol_rx_frames_free(htt_pdev, |
| 435 | rx_reorder_array_elem->head); |
| 436 | rx_reorder_array_elem->head = NULL; |
| 437 | rx_reorder_array_elem->tail = NULL; |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 438 | ol_txrx_err("\n ol_rx_reorder_store:%s mismatch\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 439 | (rxseq == frxseq) |
| 440 | ? "address" |
| 441 | : "seq number"); |
| 442 | } |
| 443 | } |
| 444 | |
| 445 | ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head, |
| 446 | &rx_reorder_array_elem->tail, frag, |
| 447 | &all_frag_present); |
| 448 | |
| 449 | if (pdev->rx.flags.defrag_timeout_check) |
| 450 | ol_rx_defrag_waitlist_remove(peer, tid); |
| 451 | |
| 452 | if (all_frag_present) { |
| 453 | ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head); |
| 454 | rx_reorder_array_elem->head = NULL; |
| 455 | rx_reorder_array_elem->tail = NULL; |
| 456 | peer->tids_rx_reorder[tid].defrag_timeout_ms = 0; |
| 457 | peer->tids_last_seq[tid] = seq_num; |
| 458 | } else if (pdev->rx.flags.defrag_timeout_check) { |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 459 | uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 460 | |
| 461 | peer->tids_rx_reorder[tid].defrag_timeout_ms = |
| 462 | now_ms + pdev->rx.defrag.timeout_ms; |
| 463 | ol_rx_defrag_waitlist_add(peer, tid); |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | /* |
| 468 | * Insert and store fragments |
| 469 | */ |
| 470 | void |
| 471 | ol_rx_fraglist_insert(htt_pdev_handle htt_pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 472 | qdf_nbuf_t *head_addr, |
| 473 | qdf_nbuf_t *tail_addr, |
| 474 | qdf_nbuf_t frag, uint8_t *all_frag_present) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 475 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 476 | qdf_nbuf_t next, prev = NULL, cur = *head_addr; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 477 | struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr; |
| 478 | uint8_t fragno, cur_fragno, lfragno, next_fragno; |
| 479 | uint8_t last_morefrag = 1, count = 0; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 480 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 481 | qdf_assert(frag); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 482 | |
| 483 | mac_hdr = (struct ieee80211_frame *) |
| 484 | ol_rx_frag_get_mac_hdr(htt_pdev, frag); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 485 | fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) & |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 486 | IEEE80211_SEQ_FRAG_MASK; |
| 487 | |
| 488 | if (!(*head_addr)) { |
| 489 | *head_addr = frag; |
| 490 | *tail_addr = frag; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 491 | qdf_nbuf_set_next(*tail_addr, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 492 | return; |
| 493 | } |
| 494 | /* For efficiency, compare with tail first */ |
| 495 | lmac_hdr = (struct ieee80211_frame *) |
| 496 | ol_rx_frag_get_mac_hdr(htt_pdev, *tail_addr); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 497 | lfragno = qdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) & |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 498 | IEEE80211_SEQ_FRAG_MASK; |
| 499 | if (fragno > lfragno) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 500 | qdf_nbuf_set_next(*tail_addr, frag); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 501 | *tail_addr = frag; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 502 | qdf_nbuf_set_next(*tail_addr, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 503 | } else { |
| 504 | do { |
| 505 | cmac_hdr = (struct ieee80211_frame *) |
| 506 | ol_rx_frag_get_mac_hdr(htt_pdev, cur); |
| 507 | cur_fragno = |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 508 | qdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) & |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 509 | IEEE80211_SEQ_FRAG_MASK; |
| 510 | prev = cur; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 511 | cur = qdf_nbuf_next(cur); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 512 | } while (fragno > cur_fragno); |
| 513 | |
| 514 | if (fragno == cur_fragno) { |
| 515 | htt_rx_desc_frame_free(htt_pdev, frag); |
| 516 | *all_frag_present = 0; |
| 517 | return; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 518 | } |
Yun Park | 6301812 | 2017-04-06 21:29:19 -0700 | [diff] [blame] | 519 | |
| 520 | qdf_nbuf_set_next(prev, frag); |
| 521 | qdf_nbuf_set_next(frag, cur); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 522 | } |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 523 | next = qdf_nbuf_next(*head_addr); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 524 | lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, |
| 525 | *tail_addr); |
| 526 | last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG; |
| 527 | if (!last_morefrag) { |
| 528 | do { |
| 529 | next_hdr = |
| 530 | (struct ieee80211_frame *) |
| 531 | ol_rx_frag_get_mac_hdr(htt_pdev, next); |
| 532 | next_fragno = |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 533 | qdf_le16_to_cpu(*(uint16_t *) next_hdr->i_seq) & |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 534 | IEEE80211_SEQ_FRAG_MASK; |
| 535 | count++; |
| 536 | if (next_fragno != count) |
| 537 | break; |
| 538 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 539 | next = qdf_nbuf_next(next); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 540 | } while (next); |
| 541 | |
| 542 | if (!next) { |
| 543 | *all_frag_present = 1; |
| 544 | return; |
| 545 | } |
| 546 | } |
| 547 | *all_frag_present = 0; |
| 548 | } |
| 549 | |
| 550 | /* |
| 551 | * add tid to pending fragment wait list |
| 552 | */ |
Yun Park | 6301812 | 2017-04-06 21:29:19 -0700 | [diff] [blame] | 553 | void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned int tid) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 554 | { |
| 555 | struct ol_txrx_pdev_t *pdev = peer->vdev->pdev; |
| 556 | struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid]; |
| 557 | |
| 558 | TAILQ_INSERT_TAIL(&pdev->rx.defrag.waitlist, rx_reorder, |
| 559 | defrag_waitlist_elem); |
| 560 | } |
| 561 | |
| 562 | /* |
| 563 | * remove tid from pending fragment wait list |
| 564 | */ |
Yun Park | 6301812 | 2017-04-06 21:29:19 -0700 | [diff] [blame] | 565 | void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned int tid) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 566 | { |
| 567 | struct ol_txrx_pdev_t *pdev = peer->vdev->pdev; |
| 568 | struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid]; |
| 569 | |
DARAM SUDHA | c653bba | 2015-05-14 18:44:59 +0530 | [diff] [blame] | 570 | if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 571 | |
| 572 | TAILQ_REMOVE(&pdev->rx.defrag.waitlist, rx_reorder, |
| 573 | defrag_waitlist_elem); |
| 574 | |
| 575 | rx_reorder->defrag_waitlist_elem.tqe_next = NULL; |
| 576 | rx_reorder->defrag_waitlist_elem.tqe_prev = NULL; |
DARAM SUDHA | 6d0ea36 | 2015-05-16 08:53:02 +0530 | [diff] [blame] | 577 | } else if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) { |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 578 | ol_txrx_alert("waitlist->tqe_prv = NULL\n"); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 579 | QDF_ASSERT(0); |
DARAM SUDHA | c653bba | 2015-05-14 18:44:59 +0530 | [diff] [blame] | 580 | rx_reorder->defrag_waitlist_elem.tqe_next = NULL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 581 | } |
| 582 | } |
| 583 | |
| 584 | #ifndef container_of |
| 585 | #define container_of(ptr, type, member) \ |
| 586 | ((type *)((char *)(ptr) - (char *)(&((type *)0)->member))) |
| 587 | #endif |
| 588 | |
| 589 | /* |
| 590 | * flush stale fragments from the waitlist |
| 591 | */ |
| 592 | void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev) |
| 593 | { |
| 594 | struct ol_rx_reorder_t *rx_reorder, *tmp; |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 595 | uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 596 | |
| 597 | TAILQ_FOREACH_SAFE(rx_reorder, &pdev->rx.defrag.waitlist, |
| 598 | defrag_waitlist_elem, tmp) { |
| 599 | struct ol_txrx_peer_t *peer; |
| 600 | struct ol_rx_reorder_t *rx_reorder_base; |
Yun Park | 6301812 | 2017-04-06 21:29:19 -0700 | [diff] [blame] | 601 | unsigned int tid; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 602 | |
| 603 | if (rx_reorder->defrag_timeout_ms > now_ms) |
| 604 | break; |
| 605 | |
| 606 | tid = rx_reorder->tid; |
Alok Kumar | 4278b69 | 2018-01-11 11:16:53 +0530 | [diff] [blame] | 607 | if (tid >= OL_TXRX_NUM_EXT_TIDS) { |
| 608 | ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid); |
| 609 | WARN_ON(1); |
| 610 | continue; |
| 611 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 612 | /* get index 0 of the rx_reorder array */ |
| 613 | rx_reorder_base = rx_reorder - tid; |
| 614 | peer = |
| 615 | container_of(rx_reorder_base, struct ol_txrx_peer_t, |
| 616 | tids_rx_reorder[0]); |
| 617 | |
| 618 | ol_rx_defrag_waitlist_remove(peer, tid); |
| 619 | ol_rx_reorder_flush_frag(pdev->htt_pdev, peer, tid, |
| 620 | 0 /* frags always stored at seq 0 */); |
| 621 | } |
| 622 | } |
| 623 | |
| 624 | /* |
| 625 | * Handling security checking and processing fragments |
| 626 | */ |
| 627 | void |
| 628 | ol_rx_defrag(ol_txrx_pdev_handle pdev, |
Yun Park | 6301812 | 2017-04-06 21:29:19 -0700 | [diff] [blame] | 629 | struct ol_txrx_peer_t *peer, unsigned int tid, |
| 630 | qdf_nbuf_t frag_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 631 | { |
| 632 | struct ol_txrx_vdev_t *vdev = NULL; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 633 | qdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 634 | uint8_t index, tkip_demic = 0; |
| 635 | uint16_t hdr_space; |
| 636 | void *rx_desc; |
| 637 | struct ieee80211_frame *wh; |
| 638 | uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 639 | htt_pdev_handle htt_pdev = pdev->htt_pdev; |
Yun Park | 6301812 | 2017-04-06 21:29:19 -0700 | [diff] [blame] | 640 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 641 | vdev = peer->vdev; |
| 642 | |
| 643 | /* bypass defrag for safe mode */ |
| 644 | if (vdev->safemode) { |
| 645 | if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) |
| 646 | ol_rx_in_order_deliver(vdev, peer, tid, frag_list); |
| 647 | else |
| 648 | ol_rx_deliver(vdev, peer, tid, frag_list); |
| 649 | return; |
| 650 | } |
| 651 | |
| 652 | while (cur) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 653 | tmp_next = qdf_nbuf_next(cur); |
| 654 | qdf_nbuf_set_next(cur, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 655 | if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) { |
| 656 | /* PN check failed,discard frags */ |
| 657 | if (prev) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 658 | qdf_nbuf_set_next(prev, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 659 | ol_rx_frames_free(htt_pdev, frag_list); |
| 660 | } |
| 661 | ol_rx_frames_free(htt_pdev, tmp_next); |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 662 | ol_txrx_err("ol_rx_defrag: PN Check failed\n"); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 663 | return; |
| 664 | } |
| 665 | /* remove FCS from each fragment */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 666 | qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 667 | prev = cur; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 668 | qdf_nbuf_set_next(cur, tmp_next); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 669 | cur = tmp_next; |
| 670 | } |
| 671 | cur = frag_list; |
| 672 | wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, cur); |
| 673 | hdr_space = ol_rx_frag_hdrsize(wh); |
| 674 | rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag_list); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 675 | qdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 676 | index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ? |
| 677 | txrx_sec_mcast : txrx_sec_ucast; |
| 678 | |
| 679 | switch (peer->security[index].sec_type) { |
| 680 | case htt_sec_type_tkip: |
| 681 | tkip_demic = 1; |
| 682 | /* fall-through to rest of tkip ops */ |
| 683 | case htt_sec_type_tkip_nomic: |
| 684 | while (cur) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 685 | tmp_next = qdf_nbuf_next(cur); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 686 | if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) { |
| 687 | /* TKIP decap failed, discard frags */ |
| 688 | ol_rx_frames_free(htt_pdev, frag_list); |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 689 | ol_txrx_err("\n ol_rx_defrag: TKIP decap failed\n"); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 690 | return; |
| 691 | } |
| 692 | cur = tmp_next; |
| 693 | } |
| 694 | break; |
| 695 | |
| 696 | case htt_sec_type_aes_ccmp: |
| 697 | while (cur) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 698 | tmp_next = qdf_nbuf_next(cur); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 699 | if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) { |
| 700 | /* CCMP demic failed, discard frags */ |
| 701 | ol_rx_frames_free(htt_pdev, frag_list); |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 702 | ol_txrx_err("\n ol_rx_defrag: CCMP demic failed\n"); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 703 | return; |
| 704 | } |
| 705 | if (!ol_rx_frag_ccmp_decap(pdev, cur, hdr_space)) { |
| 706 | /* CCMP decap failed, discard frags */ |
| 707 | ol_rx_frames_free(htt_pdev, frag_list); |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 708 | ol_txrx_err("\n ol_rx_defrag: CCMP decap failed\n"); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 709 | return; |
| 710 | } |
| 711 | cur = tmp_next; |
| 712 | } |
| 713 | break; |
| 714 | |
| 715 | case htt_sec_type_wep40: |
| 716 | case htt_sec_type_wep104: |
| 717 | case htt_sec_type_wep128: |
| 718 | while (cur) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 719 | tmp_next = qdf_nbuf_next(cur); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 720 | if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) { |
| 721 | /* wep decap failed, discard frags */ |
| 722 | ol_rx_frames_free(htt_pdev, frag_list); |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 723 | ol_txrx_err("\n ol_rx_defrag: wep decap failed\n"); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 724 | return; |
| 725 | } |
| 726 | cur = tmp_next; |
| 727 | } |
| 728 | break; |
| 729 | |
| 730 | default: |
| 731 | break; |
| 732 | } |
| 733 | |
| 734 | msdu = ol_rx_defrag_decap_recombine(htt_pdev, frag_list, hdr_space); |
| 735 | if (!msdu) |
| 736 | return; |
| 737 | |
| 738 | if (tkip_demic) { |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 739 | qdf_mem_copy(key, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 740 | peer->security[index].michael_key, |
| 741 | sizeof(peer->security[index].michael_key)); |
| 742 | if (!ol_rx_frag_tkip_demic(pdev, key, msdu, hdr_space)) { |
| 743 | htt_rx_desc_frame_free(htt_pdev, msdu); |
| 744 | ol_rx_err(pdev->ctrl_pdev, |
| 745 | vdev->vdev_id, peer->mac_addr.raw, tid, 0, |
| 746 | OL_RX_DEFRAG_ERR, msdu, NULL, 0); |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 747 | ol_txrx_err("\n ol_rx_defrag: TKIP demic failed\n"); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 748 | return; |
| 749 | } |
| 750 | } |
| 751 | wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, msdu); |
| 752 | if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) |
| 753 | ol_rx_defrag_qos_decap(pdev, msdu, hdr_space); |
| 754 | if (ol_cfg_frame_type(pdev->ctrl_pdev) == wlan_frm_fmt_802_3) |
| 755 | ol_rx_defrag_nwifi_to_8023(pdev, msdu); |
| 756 | |
| 757 | ol_rx_fwd_check(vdev, peer, tid, msdu); |
| 758 | } |
| 759 | |
| 760 | /* |
| 761 | * Handling TKIP processing for defragmentation |
| 762 | */ |
| 763 | int |
| 764 | ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 765 | qdf_nbuf_t msdu, uint16_t hdrlen) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 766 | { |
| 767 | uint8_t *ivp, *origHdr; |
| 768 | |
| 769 | void *rx_desc_old_position = NULL; |
| 770 | void *ind_old_position = NULL; |
| 771 | int rx_desc_len = 0; |
| 772 | |
| 773 | ol_rx_frag_desc_adjust(pdev, |
| 774 | msdu, |
| 775 | &rx_desc_old_position, |
| 776 | &ind_old_position, &rx_desc_len); |
| 777 | /* Header should have extended IV */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 778 | origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 779 | |
| 780 | ivp = origHdr + hdrlen; |
| 781 | if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) |
| 782 | return OL_RX_DEFRAG_ERR; |
| 783 | |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 784 | qdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 785 | ol_rx_frag_restructure( |
| 786 | pdev, |
| 787 | msdu, |
| 788 | rx_desc_old_position, |
| 789 | ind_old_position, |
| 790 | &f_tkip, |
| 791 | rx_desc_len); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 792 | qdf_nbuf_pull_head(msdu, f_tkip.ic_header); |
| 793 | qdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 794 | return OL_RX_DEFRAG_OK; |
| 795 | } |
| 796 | |
| 797 | /* |
| 798 | * Handling WEP processing for defragmentation |
| 799 | */ |
| 800 | int |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 801 | ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t hdrlen) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 802 | { |
| 803 | uint8_t *origHdr; |
| 804 | void *rx_desc_old_position = NULL; |
| 805 | void *ind_old_position = NULL; |
| 806 | int rx_desc_len = 0; |
| 807 | |
| 808 | ol_rx_frag_desc_adjust(pdev, |
| 809 | msdu, |
| 810 | &rx_desc_old_position, |
| 811 | &ind_old_position, &rx_desc_len); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 812 | origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 813 | qdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 814 | ol_rx_frag_restructure( |
| 815 | pdev, |
| 816 | msdu, |
| 817 | rx_desc_old_position, |
| 818 | ind_old_position, |
| 819 | &f_wep, |
| 820 | rx_desc_len); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 821 | qdf_nbuf_pull_head(msdu, f_wep.ic_header); |
| 822 | qdf_nbuf_trim_tail(msdu, f_wep.ic_trailer); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 823 | return OL_RX_DEFRAG_OK; |
| 824 | } |
| 825 | |
| 826 | /* |
| 827 | * Verify and strip MIC from the frame. |
| 828 | */ |
| 829 | int |
| 830 | ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 831 | qdf_nbuf_t msdu, uint16_t hdrlen) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 832 | { |
| 833 | int status; |
| 834 | uint32_t pktlen; |
| 835 | uint8_t mic[IEEE80211_WEP_MICLEN]; |
| 836 | uint8_t mic0[IEEE80211_WEP_MICLEN]; |
| 837 | void *rx_desc_old_position = NULL; |
| 838 | void *ind_old_position = NULL; |
| 839 | int rx_desc_len = 0; |
| 840 | |
| 841 | ol_rx_frag_desc_adjust(pdev, |
| 842 | msdu, |
| 843 | &rx_desc_old_position, |
| 844 | &ind_old_position, &rx_desc_len); |
| 845 | |
| 846 | pktlen = ol_rx_defrag_len(msdu) - rx_desc_len; |
| 847 | |
| 848 | status = ol_rx_defrag_mic(pdev, key, msdu, hdrlen, |
| 849 | pktlen - (hdrlen + f_tkip.ic_miclen), mic); |
| 850 | if (status != OL_RX_DEFRAG_OK) |
| 851 | return OL_RX_DEFRAG_ERR; |
| 852 | |
| 853 | ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len, |
| 854 | f_tkip.ic_miclen, (caddr_t) mic0); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 855 | if (!qdf_mem_cmp(mic, mic0, f_tkip.ic_miclen)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 856 | return OL_RX_DEFRAG_ERR; |
| 857 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 858 | qdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 859 | return OL_RX_DEFRAG_OK; |
| 860 | } |
| 861 | |
| 862 | /* |
| 863 | * Handling CCMP processing for defragmentation |
| 864 | */ |
| 865 | int |
| 866 | ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 867 | qdf_nbuf_t nbuf, uint16_t hdrlen) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 868 | { |
| 869 | uint8_t *ivp, *origHdr; |
| 870 | void *rx_desc_old_position = NULL; |
| 871 | void *ind_old_position = NULL; |
| 872 | int rx_desc_len = 0; |
| 873 | |
| 874 | ol_rx_frag_desc_adjust(pdev, |
| 875 | nbuf, |
| 876 | &rx_desc_old_position, |
| 877 | &ind_old_position, &rx_desc_len); |
| 878 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 879 | origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 880 | ivp = origHdr + hdrlen; |
| 881 | if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) |
| 882 | return OL_RX_DEFRAG_ERR; |
| 883 | |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 884 | qdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 885 | ol_rx_frag_restructure( |
| 886 | pdev, |
| 887 | nbuf, |
| 888 | rx_desc_old_position, |
| 889 | ind_old_position, |
| 890 | &f_ccmp, |
| 891 | rx_desc_len); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 892 | qdf_nbuf_pull_head(nbuf, f_ccmp.ic_header); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 893 | |
| 894 | return OL_RX_DEFRAG_OK; |
| 895 | } |
| 896 | |
| 897 | /* |
| 898 | * Verify and strip MIC from the frame. |
| 899 | */ |
| 900 | int |
| 901 | ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 902 | qdf_nbuf_t wbuf, uint16_t hdrlen) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 903 | { |
| 904 | uint8_t *ivp, *origHdr; |
| 905 | void *rx_desc_old_position = NULL; |
| 906 | void *ind_old_position = NULL; |
| 907 | int rx_desc_len = 0; |
| 908 | |
| 909 | ol_rx_frag_desc_adjust(pdev, |
| 910 | wbuf, |
| 911 | &rx_desc_old_position, |
| 912 | &ind_old_position, &rx_desc_len); |
| 913 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 914 | origHdr = (uint8_t *) (qdf_nbuf_data(wbuf) + rx_desc_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 915 | |
| 916 | ivp = origHdr + hdrlen; |
| 917 | if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) |
| 918 | return OL_RX_DEFRAG_ERR; |
| 919 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 920 | qdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 921 | |
| 922 | return OL_RX_DEFRAG_OK; |
| 923 | } |
| 924 | |
| 925 | /* |
| 926 | * Craft pseudo header used to calculate the MIC. |
| 927 | */ |
| 928 | void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[]) |
| 929 | { |
| 930 | const struct ieee80211_frame_addr4 *wh = |
| 931 | (const struct ieee80211_frame_addr4 *)wh0; |
| 932 | |
| 933 | switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { |
| 934 | case IEEE80211_FC1_DIR_NODS: |
| 935 | DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ |
| 936 | DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, |
| 937 | wh->i_addr2); |
| 938 | break; |
| 939 | case IEEE80211_FC1_DIR_TODS: |
| 940 | DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ |
| 941 | DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, |
| 942 | wh->i_addr2); |
| 943 | break; |
| 944 | case IEEE80211_FC1_DIR_FROMDS: |
| 945 | DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ |
| 946 | DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, |
| 947 | wh->i_addr3); |
| 948 | break; |
| 949 | case IEEE80211_FC1_DIR_DSTODS: |
| 950 | DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ |
| 951 | DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, |
| 952 | wh->i_addr4); |
| 953 | break; |
| 954 | } |
| 955 | /* |
| 956 | * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but |
| 957 | * it could also be set for deauth, disassoc, action, etc. for |
| 958 | * a mgt type frame. It comes into picture for MFP. |
| 959 | */ |
| 960 | if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { |
| 961 | const struct ieee80211_qosframe *qwh = |
| 962 | (const struct ieee80211_qosframe *)wh; |
| 963 | hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; |
| 964 | } else { |
| 965 | hdr[12] = 0; |
| 966 | } |
| 967 | hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ |
| 968 | } |
| 969 | |
| 970 | /* |
| 971 | * Michael_mic for defragmentation |
| 972 | */ |
| 973 | int |
| 974 | ol_rx_defrag_mic(ol_txrx_pdev_handle pdev, |
| 975 | const uint8_t *key, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 976 | qdf_nbuf_t wbuf, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 977 | uint16_t off, uint16_t data_len, uint8_t mic[]) |
| 978 | { |
| 979 | uint8_t hdr[16] = { 0, }; |
| 980 | uint32_t l, r; |
| 981 | const uint8_t *data; |
| 982 | uint32_t space; |
| 983 | void *rx_desc_old_position = NULL; |
| 984 | void *ind_old_position = NULL; |
| 985 | int rx_desc_len = 0; |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 986 | htt_pdev_handle htt_pdev = pdev->htt_pdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 987 | |
| 988 | ol_rx_frag_desc_adjust(pdev, |
| 989 | wbuf, |
| 990 | &rx_desc_old_position, |
| 991 | &ind_old_position, &rx_desc_len); |
| 992 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 993 | ol_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) + |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 994 | rx_desc_len), hdr); |
| 995 | l = get_le32(key); |
| 996 | r = get_le32(key + 4); |
| 997 | |
| 998 | /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ |
| 999 | l ^= get_le32(hdr); |
| 1000 | michael_block(l, r); |
| 1001 | l ^= get_le32(&hdr[4]); |
| 1002 | michael_block(l, r); |
| 1003 | l ^= get_le32(&hdr[8]); |
| 1004 | michael_block(l, r); |
| 1005 | l ^= get_le32(&hdr[12]); |
| 1006 | michael_block(l, r); |
| 1007 | |
| 1008 | /* first buffer has special handling */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1009 | data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1010 | space = ol_rx_defrag_len(wbuf) - rx_desc_len - off; |
| 1011 | for (;; ) { |
| 1012 | if (space > data_len) |
| 1013 | space = data_len; |
| 1014 | |
| 1015 | /* collect 32-bit blocks from current buffer */ |
| 1016 | while (space >= sizeof(uint32_t)) { |
| 1017 | l ^= get_le32(data); |
| 1018 | michael_block(l, r); |
| 1019 | data += sizeof(uint32_t); |
| 1020 | space -= sizeof(uint32_t); |
| 1021 | data_len -= sizeof(uint32_t); |
| 1022 | } |
| 1023 | if (data_len < sizeof(uint32_t)) |
| 1024 | break; |
| 1025 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1026 | wbuf = qdf_nbuf_next(wbuf); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1027 | if (wbuf == NULL) |
| 1028 | return OL_RX_DEFRAG_ERR; |
| 1029 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1030 | rx_desc_len = ol_rx_get_desc_len(htt_pdev, wbuf, |
| 1031 | &rx_desc_old_position); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1032 | |
| 1033 | if (space != 0) { |
| 1034 | const uint8_t *data_next; |
| 1035 | /* |
| 1036 | * Block straddles buffers, split references. |
| 1037 | */ |
| 1038 | data_next = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1039 | (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1040 | if ((ol_rx_defrag_len(wbuf) - rx_desc_len) < |
| 1041 | sizeof(uint32_t) - space) { |
| 1042 | return OL_RX_DEFRAG_ERR; |
| 1043 | } |
| 1044 | switch (space) { |
| 1045 | case 1: |
| 1046 | l ^= get_le32_split(data[0], data_next[0], |
| 1047 | data_next[1], data_next[2]); |
| 1048 | data = data_next + 3; |
| 1049 | space = (ol_rx_defrag_len(wbuf) - rx_desc_len) |
| 1050 | - 3; |
| 1051 | break; |
| 1052 | case 2: |
| 1053 | l ^= get_le32_split(data[0], data[1], |
| 1054 | data_next[0], data_next[1]); |
| 1055 | data = data_next + 2; |
| 1056 | space = (ol_rx_defrag_len(wbuf) - rx_desc_len) |
| 1057 | - 2; |
| 1058 | break; |
| 1059 | case 3: |
| 1060 | l ^= get_le32_split(data[0], data[1], data[2], |
| 1061 | data_next[0]); |
| 1062 | data = data_next + 1; |
| 1063 | space = (ol_rx_defrag_len(wbuf) - rx_desc_len) |
| 1064 | - 1; |
| 1065 | break; |
| 1066 | } |
| 1067 | michael_block(l, r); |
| 1068 | data_len -= sizeof(uint32_t); |
| 1069 | } else { |
| 1070 | /* |
| 1071 | * Setup for next buffer. |
| 1072 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1073 | data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1074 | space = ol_rx_defrag_len(wbuf) - rx_desc_len; |
| 1075 | } |
| 1076 | } |
| 1077 | /* Last block and padding (0x5a, 4..7 x 0) */ |
| 1078 | switch (data_len) { |
| 1079 | case 0: |
| 1080 | l ^= get_le32_split(0x5a, 0, 0, 0); |
| 1081 | break; |
| 1082 | case 1: |
| 1083 | l ^= get_le32_split(data[0], 0x5a, 0, 0); |
| 1084 | break; |
| 1085 | case 2: |
| 1086 | l ^= get_le32_split(data[0], data[1], 0x5a, 0); |
| 1087 | break; |
| 1088 | case 3: |
| 1089 | l ^= get_le32_split(data[0], data[1], data[2], 0x5a); |
| 1090 | break; |
| 1091 | } |
| 1092 | michael_block(l, r); |
| 1093 | michael_block(l, r); |
| 1094 | put_le32(mic, l); |
| 1095 | put_le32(mic + 4, r); |
| 1096 | |
| 1097 | return OL_RX_DEFRAG_OK; |
| 1098 | } |
| 1099 | |
| 1100 | /* |
| 1101 | * Calculate headersize |
| 1102 | */ |
| 1103 | uint16_t ol_rx_frag_hdrsize(const void *data) |
| 1104 | { |
| 1105 | const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data; |
| 1106 | uint16_t size = sizeof(struct ieee80211_frame); |
| 1107 | |
| 1108 | if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) |
| 1109 | size += IEEE80211_ADDR_LEN; |
| 1110 | |
| 1111 | if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) { |
| 1112 | size += sizeof(uint16_t); |
| 1113 | if (wh->i_fc[1] & IEEE80211_FC1_ORDER) |
| 1114 | size += sizeof(struct ieee80211_htc); |
| 1115 | } |
| 1116 | return size; |
| 1117 | } |
| 1118 | |
| 1119 | /* |
| 1120 | * Recombine and decap fragments |
| 1121 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1122 | qdf_nbuf_t |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1123 | ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1124 | qdf_nbuf_t frag_list, uint16_t hdrsize) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1125 | { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1126 | qdf_nbuf_t tmp; |
| 1127 | qdf_nbuf_t msdu = frag_list; |
| 1128 | qdf_nbuf_t rx_nbuf = frag_list; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1129 | struct ieee80211_frame *wh; |
| 1130 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1131 | msdu = qdf_nbuf_next(msdu); |
| 1132 | qdf_nbuf_set_next(rx_nbuf, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1133 | while (msdu) { |
| 1134 | htt_rx_msdu_desc_free(htt_pdev, msdu); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1135 | tmp = qdf_nbuf_next(msdu); |
| 1136 | qdf_nbuf_set_next(msdu, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1137 | ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize); |
| 1138 | if (!ol_rx_defrag_concat(rx_nbuf, msdu)) { |
| 1139 | ol_rx_frames_free(htt_pdev, tmp); |
| 1140 | htt_rx_desc_frame_free(htt_pdev, rx_nbuf); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1141 | qdf_nbuf_free(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1142 | /* msdu rx desc already freed above */ |
| 1143 | return NULL; |
| 1144 | } |
| 1145 | msdu = tmp; |
| 1146 | } |
| 1147 | wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, |
| 1148 | rx_nbuf); |
| 1149 | wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG; |
| 1150 | *(uint16_t *) wh->i_seq &= ~IEEE80211_SEQ_FRAG_MASK; |
| 1151 | |
| 1152 | return rx_nbuf; |
| 1153 | } |
| 1154 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1155 | void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1156 | { |
| 1157 | struct ieee80211_frame wh; |
| 1158 | uint32_t hdrsize; |
| 1159 | struct llc_snap_hdr_t llchdr; |
| 1160 | struct ethernet_hdr_t *eth_hdr; |
| 1161 | void *rx_desc_old_position = NULL; |
| 1162 | void *ind_old_position = NULL; |
| 1163 | int rx_desc_len = 0; |
| 1164 | struct ieee80211_frame *wh_ptr; |
| 1165 | |
| 1166 | ol_rx_frag_desc_adjust(pdev, |
| 1167 | msdu, |
| 1168 | &rx_desc_old_position, |
| 1169 | &ind_old_position, &rx_desc_len); |
| 1170 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1171 | wh_ptr = (struct ieee80211_frame *)(qdf_nbuf_data(msdu) + rx_desc_len); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1172 | qdf_mem_copy(&wh, wh_ptr, sizeof(wh)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1173 | hdrsize = sizeof(struct ieee80211_frame); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1174 | qdf_mem_copy(&llchdr, ((uint8_t *) (qdf_nbuf_data(msdu) + |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1175 | rx_desc_len)) + hdrsize, |
| 1176 | sizeof(struct llc_snap_hdr_t)); |
| 1177 | |
| 1178 | /* |
| 1179 | * Now move the data pointer to the beginning of the mac header : |
| 1180 | * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize) |
| 1181 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1182 | qdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize + |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1183 | sizeof(struct llc_snap_hdr_t) - |
| 1184 | sizeof(struct ethernet_hdr_t))); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1185 | eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(msdu)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1186 | switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) { |
| 1187 | case IEEE80211_FC1_DIR_NODS: |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1188 | qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1189 | IEEE80211_ADDR_LEN); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1190 | qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1191 | break; |
| 1192 | case IEEE80211_FC1_DIR_TODS: |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1193 | qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1194 | IEEE80211_ADDR_LEN); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1195 | qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1196 | break; |
| 1197 | case IEEE80211_FC1_DIR_FROMDS: |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1198 | qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1199 | IEEE80211_ADDR_LEN); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1200 | qdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, IEEE80211_ADDR_LEN); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1201 | break; |
| 1202 | case IEEE80211_FC1_DIR_DSTODS: |
| 1203 | break; |
| 1204 | } |
| 1205 | |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1206 | qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1207 | sizeof(llchdr.ethertype)); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1208 | |
| 1209 | ol_rx_defrag_push_rx_desc(msdu, rx_desc_old_position, |
| 1210 | ind_old_position, rx_desc_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1211 | } |
| 1212 | |
| 1213 | /* |
| 1214 | * Handling QOS for defragmentation |
| 1215 | */ |
| 1216 | void |
| 1217 | ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1218 | qdf_nbuf_t nbuf, uint16_t hdrlen) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1219 | { |
| 1220 | struct ieee80211_frame *wh; |
| 1221 | uint16_t qoslen; |
| 1222 | void *rx_desc_old_position = NULL; |
| 1223 | void *ind_old_position = NULL; |
| 1224 | int rx_desc_len = 0; |
| 1225 | |
| 1226 | ol_rx_frag_desc_adjust(pdev, |
| 1227 | nbuf, |
| 1228 | &rx_desc_old_position, |
| 1229 | &ind_old_position, &rx_desc_len); |
| 1230 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1231 | wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) + rx_desc_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1232 | if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) { |
| 1233 | qoslen = sizeof(struct ieee80211_qoscntl); |
| 1234 | /* Qos frame with Order bit set indicates a HTC frame */ |
| 1235 | if (wh->i_fc[1] & IEEE80211_FC1_ORDER) |
| 1236 | qoslen += sizeof(struct ieee80211_htc); |
| 1237 | |
| 1238 | /* remove QoS filed from header */ |
| 1239 | hdrlen -= qoslen; |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1240 | qdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1241 | wh = (struct ieee80211_frame *)qdf_nbuf_pull_head(nbuf, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1242 | rx_desc_len + |
| 1243 | qoslen); |
| 1244 | /* clear QoS bit */ |
| 1245 | /* |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1246 | * KW# 6154 'qdf_nbuf_pull_head' in turn calls |
| 1247 | * __qdf_nbuf_pull_head, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1248 | * which returns NULL if there is not sufficient data to pull. |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1249 | * It's guaranteed that qdf_nbuf_pull_head will succeed rather |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1250 | * than returning NULL, since the entire rx frame is already |
| 1251 | * present in the rx buffer. |
| 1252 | * However, to make it obvious to static analyzers that this |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1253 | * code is safe, add an explicit check that qdf_nbuf_pull_head |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1254 | * returns a non-NULL value. |
| 1255 | * Since this part of the code is not performance-critical, |
| 1256 | * adding this explicit check is okay. |
| 1257 | */ |
| 1258 | if (wh) |
| 1259 | wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS; |
| 1260 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 1261 | ol_rx_defrag_push_rx_desc(nbuf, rx_desc_old_position, |
| 1262 | ind_old_position, rx_desc_len); |
| 1263 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1264 | } |
| 1265 | } |