Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 2 | * Copyright (c) 2011, 2013-2016 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 28 | #include <qdf_nbuf.h> /* qdf_nbuf_t */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 29 | |
| 30 | #include <ol_htt_rx_api.h> /* htt_rx_pn_t, etc. */ |
| 31 | #include <ol_ctrl_txrx_api.h> /* ol_rx_err */ |
| 32 | |
| 33 | #include <ol_txrx_internal.h> /* ol_rx_mpdu_list_next */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 34 | #include <ol_rx_pn.h> /* our own defs */ |
| 35 | #include <ol_rx_fwd.h> /* ol_rx_fwd_check */ |
| 36 | #include <ol_rx.h> /* ol_rx_deliver */ |
| 37 | |
| 38 | /* add the MSDUs from this MPDU to the list of good frames */ |
| 39 | #define ADD_MPDU_TO_LIST(head, tail, mpdu, mpdu_tail) do { \ |
| 40 | if (!head) { \ |
| 41 | head = mpdu; \ |
| 42 | } else { \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 43 | qdf_nbuf_set_next(tail, mpdu); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 44 | } \ |
| 45 | tail = mpdu_tail; \ |
| 46 | } while (0) |
| 47 | |
| 48 | int ol_rx_pn_cmp24(union htt_rx_pn_t *new_pn, |
| 49 | union htt_rx_pn_t *old_pn, int is_unicast, int opmode) |
| 50 | { |
| 51 | int rc = ((new_pn->pn24 & 0xffffff) <= (old_pn->pn24 & 0xffffff)); |
| 52 | return rc; |
| 53 | } |
| 54 | |
| 55 | int ol_rx_pn_cmp48(union htt_rx_pn_t *new_pn, |
| 56 | union htt_rx_pn_t *old_pn, int is_unicast, int opmode) |
| 57 | { |
| 58 | int rc = ((new_pn->pn48 & 0xffffffffffffULL) <= |
| 59 | (old_pn->pn48 & 0xffffffffffffULL)); |
| 60 | return rc; |
| 61 | } |
| 62 | |
| 63 | int ol_rx_pn_wapi_cmp(union htt_rx_pn_t *new_pn, |
| 64 | union htt_rx_pn_t *old_pn, int is_unicast, int opmode) |
| 65 | { |
| 66 | int pn_is_replay = 0; |
| 67 | |
| 68 | if (new_pn->pn128[1] == old_pn->pn128[1]) |
| 69 | pn_is_replay = (new_pn->pn128[0] <= old_pn->pn128[0]); |
| 70 | else |
| 71 | pn_is_replay = (new_pn->pn128[1] < old_pn->pn128[1]); |
| 72 | |
| 73 | if (is_unicast) { |
| 74 | if (opmode == wlan_op_mode_ap) |
| 75 | pn_is_replay |= ((new_pn->pn128[0] & 0x1ULL) != 0); |
| 76 | else |
| 77 | pn_is_replay |= ((new_pn->pn128[0] & 0x1ULL) != 1); |
| 78 | } |
| 79 | return pn_is_replay; |
| 80 | } |
| 81 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 82 | qdf_nbuf_t |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 83 | ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev, |
| 84 | struct ol_txrx_peer_t *peer, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 85 | unsigned tid, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 86 | { |
| 87 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 88 | union htt_rx_pn_t *last_pn; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 89 | qdf_nbuf_t out_list_head = NULL; |
| 90 | qdf_nbuf_t out_list_tail = NULL; |
| 91 | qdf_nbuf_t mpdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 92 | int index; /* unicast vs. multicast */ |
| 93 | int pn_len; |
| 94 | void *rx_desc; |
| 95 | int last_pn_valid; |
| 96 | |
| 97 | /* Make sure host pn check is not redundant */ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 98 | if ((qdf_atomic_read(&peer->fw_pn_check)) || |
DARAM SUDHA | a51d6fb | 2015-01-29 19:55:14 +0530 | [diff] [blame] | 99 | (vdev->opmode == wlan_op_mode_ibss)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 100 | return msdu_list; |
DARAM SUDHA | a51d6fb | 2015-01-29 19:55:14 +0530 | [diff] [blame] | 101 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 102 | |
| 103 | /* First, check whether the PN check applies */ |
| 104 | rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu_list); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 105 | qdf_assert(htt_rx_msdu_has_wlan_mcast_flag(pdev->htt_pdev, rx_desc)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 106 | index = htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc) ? |
| 107 | txrx_sec_mcast : txrx_sec_ucast; |
| 108 | pn_len = pdev->rx_pn[peer->security[index].sec_type].len; |
| 109 | if (pn_len == 0) |
| 110 | return msdu_list; |
| 111 | |
| 112 | last_pn_valid = peer->tids_last_pn_valid[tid]; |
| 113 | last_pn = &peer->tids_last_pn[tid]; |
| 114 | mpdu = msdu_list; |
| 115 | while (mpdu) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 116 | qdf_nbuf_t mpdu_tail, next_mpdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 117 | union htt_rx_pn_t new_pn; |
| 118 | int pn_is_replay = 0; |
| 119 | |
| 120 | rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, mpdu); |
| 121 | |
| 122 | /* |
| 123 | * Find the last MSDU within this MPDU, and |
| 124 | * the find the first MSDU within the next MPDU. |
| 125 | */ |
| 126 | ol_rx_mpdu_list_next(pdev, mpdu, &mpdu_tail, &next_mpdu); |
| 127 | |
| 128 | /* Don't check the PN replay for non-encrypted frames */ |
| 129 | if (!htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc)) { |
| 130 | ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, |
| 131 | mpdu_tail); |
| 132 | mpdu = next_mpdu; |
| 133 | continue; |
| 134 | } |
| 135 | |
| 136 | /* retrieve PN from rx descriptor */ |
| 137 | htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &new_pn, pn_len); |
| 138 | |
| 139 | /* if there was no prior PN, there's nothing to check */ |
| 140 | if (last_pn_valid) { |
| 141 | pn_is_replay = |
| 142 | pdev->rx_pn[peer->security[index].sec_type]. |
| 143 | cmp(&new_pn, last_pn, index == txrx_sec_ucast, |
| 144 | vdev->opmode); |
| 145 | } else { |
| 146 | last_pn_valid = peer->tids_last_pn_valid[tid] = 1; |
| 147 | } |
| 148 | |
| 149 | if (pn_is_replay) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 150 | qdf_nbuf_t msdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 151 | static uint32_t last_pncheck_print_time /* = 0 */; |
| 152 | int log_level; |
| 153 | uint32_t current_time_ms; |
| 154 | |
| 155 | /* |
| 156 | * This MPDU failed the PN check: |
| 157 | * 1. notify the control SW of the PN failure |
| 158 | * (so countermeasures can be taken, if necessary) |
| 159 | * 2. Discard all the MSDUs from this MPDU. |
| 160 | */ |
| 161 | msdu = mpdu; |
| 162 | current_time_ms = |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 163 | qdf_system_ticks_to_msecs(qdf_system_ticks()); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 164 | if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS < |
| 165 | (current_time_ms - last_pncheck_print_time)) { |
| 166 | last_pncheck_print_time = current_time_ms; |
| 167 | log_level = TXRX_PRINT_LEVEL_WARN; |
| 168 | } else { |
| 169 | log_level = TXRX_PRINT_LEVEL_INFO2; |
| 170 | } |
| 171 | |
| 172 | TXRX_PRINT(log_level, |
| 173 | "PN check failed - TID %d, peer %p " |
| 174 | "(%02x:%02x:%02x:%02x:%02x:%02x) %s\n" |
| 175 | " old PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" |
| 176 | " new PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" |
| 177 | " new seq num = %d\n", |
| 178 | tid, peer, |
| 179 | peer->mac_addr.raw[0], peer->mac_addr.raw[1], |
| 180 | peer->mac_addr.raw[2], peer->mac_addr.raw[3], |
| 181 | peer->mac_addr.raw[4], peer->mac_addr.raw[5], |
| 182 | (index == |
| 183 | txrx_sec_ucast) ? "ucast" : "mcast", |
| 184 | last_pn->pn128[1], last_pn->pn128[0], |
| 185 | last_pn->pn128[0] & 0xffffffffffffULL, |
| 186 | new_pn.pn128[1], new_pn.pn128[0], |
| 187 | new_pn.pn128[0] & 0xffffffffffffULL, |
| 188 | htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, |
| 189 | rx_desc)); |
| 190 | #if defined(ENABLE_RX_PN_TRACE) |
| 191 | ol_rx_pn_trace_display(pdev, 1); |
| 192 | #endif /* ENABLE_RX_PN_TRACE */ |
| 193 | ol_rx_err(pdev->ctrl_pdev, |
| 194 | vdev->vdev_id, peer->mac_addr.raw, tid, |
| 195 | htt_rx_mpdu_desc_tsf32(pdev->htt_pdev, |
| 196 | rx_desc), OL_RX_ERR_PN, |
| 197 | mpdu, NULL, 0); |
| 198 | /* free all MSDUs within this MPDU */ |
| 199 | do { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 200 | qdf_nbuf_t next_msdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 201 | OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, |
| 202 | rx_desc, OL_RX_ERR_PN); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 203 | next_msdu = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 204 | htt_rx_desc_frame_free(pdev->htt_pdev, msdu); |
| 205 | if (msdu == mpdu_tail) |
| 206 | break; |
| 207 | else |
| 208 | msdu = next_msdu; |
| 209 | } while (1); |
| 210 | } else { |
| 211 | ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, |
| 212 | mpdu_tail); |
| 213 | /* |
| 214 | * Remember the new PN. |
| 215 | * For simplicity, just do 2 64-bit word copies to |
| 216 | * cover the worst case (WAPI), regardless of the length |
| 217 | * of the PN. |
| 218 | * This is more efficient than doing a conditional |
| 219 | * branch to copy only the relevant portion. |
| 220 | */ |
| 221 | last_pn->pn128[0] = new_pn.pn128[0]; |
| 222 | last_pn->pn128[1] = new_pn.pn128[1]; |
| 223 | OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc); |
| 224 | } |
| 225 | |
| 226 | mpdu = next_mpdu; |
| 227 | } |
| 228 | /* make sure the list is null-terminated */ |
| 229 | if (out_list_tail) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 230 | qdf_nbuf_set_next(out_list_tail, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 231 | |
| 232 | return out_list_head; |
| 233 | } |
| 234 | |
| 235 | void |
| 236 | ol_rx_pn_check(struct ol_txrx_vdev_t *vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 237 | struct ol_txrx_peer_t *peer, unsigned tid, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 238 | { |
| 239 | msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list); |
| 240 | ol_rx_fwd_check(vdev, peer, tid, msdu_list); |
| 241 | } |
| 242 | |
| 243 | void |
| 244 | ol_rx_pn_check_only(struct ol_txrx_vdev_t *vdev, |
| 245 | struct ol_txrx_peer_t *peer, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 246 | unsigned tid, qdf_nbuf_t msdu_list) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 247 | { |
| 248 | msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list); |
| 249 | ol_rx_deliver(vdev, peer, tid, msdu_list); |
| 250 | } |
| 251 | |
| 252 | #if defined(ENABLE_RX_PN_TRACE) |
| 253 | |
| 254 | A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev) |
| 255 | { |
| 256 | int num_elems; |
| 257 | |
| 258 | num_elems = 1 << TXRX_RX_PN_TRACE_SIZE_LOG2; |
| 259 | pdev->rx_pn_trace.idx = 0; |
| 260 | pdev->rx_pn_trace.cnt = 0; |
| 261 | pdev->rx_pn_trace.mask = num_elems - 1; |
| 262 | pdev->rx_pn_trace.data = |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 263 | qdf_mem_malloc(sizeof(*pdev->rx_pn_trace.data) * num_elems); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 264 | if (!pdev->rx_pn_trace.data) |
| 265 | return A_NO_MEMORY; |
| 266 | return A_OK; |
| 267 | } |
| 268 | |
| 269 | void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev) |
| 270 | { |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 271 | qdf_mem_free(pdev->rx_pn_trace.data); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | void |
| 275 | ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev, |
| 276 | struct ol_txrx_peer_t *peer, uint16_t tid, void *rx_desc) |
| 277 | { |
| 278 | uint32_t idx = pdev->rx_pn_trace.idx; |
| 279 | union htt_rx_pn_t pn; |
| 280 | uint32_t pn32; |
| 281 | uint16_t seq_num; |
| 282 | uint8_t unicast; |
| 283 | |
| 284 | htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &pn, 48); |
| 285 | pn32 = pn.pn48 & 0xffffffff; |
| 286 | seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_desc); |
| 287 | unicast = !htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc); |
| 288 | |
| 289 | pdev->rx_pn_trace.data[idx].peer = peer; |
| 290 | pdev->rx_pn_trace.data[idx].tid = tid; |
| 291 | pdev->rx_pn_trace.data[idx].seq_num = seq_num; |
| 292 | pdev->rx_pn_trace.data[idx].unicast = unicast; |
| 293 | pdev->rx_pn_trace.data[idx].pn32 = pn32; |
| 294 | pdev->rx_pn_trace.cnt++; |
| 295 | idx++; |
| 296 | pdev->rx_pn_trace.idx = idx & pdev->rx_pn_trace.mask; |
| 297 | } |
| 298 | |
| 299 | void ol_rx_pn_trace_display(ol_txrx_pdev_handle pdev, int just_once) |
| 300 | { |
| 301 | static int print_count /* = 0 */; |
| 302 | uint32_t i, start, end; |
| 303 | uint64_t cnt; |
| 304 | int elems; |
| 305 | int limit = 0; /* move this to the arg list? */ |
| 306 | |
| 307 | if (print_count != 0 && just_once) |
| 308 | return; |
| 309 | |
| 310 | print_count++; |
| 311 | |
| 312 | end = pdev->rx_pn_trace.idx; |
| 313 | if (pdev->rx_pn_trace.cnt <= pdev->rx_pn_trace.mask) { |
| 314 | /* trace log has not yet wrapped around - start at the top */ |
| 315 | start = 0; |
| 316 | cnt = 0; |
| 317 | } else { |
| 318 | start = end; |
| 319 | cnt = pdev->rx_pn_trace.cnt - (pdev->rx_pn_trace.mask + 1); |
| 320 | } |
| 321 | elems = (end - 1 - start) & pdev->rx_pn_trace.mask; |
| 322 | if (limit > 0 && elems > limit) { |
| 323 | int delta; |
| 324 | delta = elems - limit; |
| 325 | start += delta; |
| 326 | start &= pdev->rx_pn_trace.mask; |
| 327 | cnt += delta; |
| 328 | } |
| 329 | |
| 330 | i = start; |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 331 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 332 | " seq PN"); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 333 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 334 | " count idx peer tid uni num LSBs"); |
| 335 | do { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 336 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 337 | " %6lld %4d %p %2d %d %4d %8d", |
| 338 | cnt, i, |
| 339 | pdev->rx_pn_trace.data[i].peer, |
| 340 | pdev->rx_pn_trace.data[i].tid, |
| 341 | pdev->rx_pn_trace.data[i].unicast, |
| 342 | pdev->rx_pn_trace.data[i].seq_num, |
| 343 | pdev->rx_pn_trace.data[i].pn32); |
| 344 | cnt++; |
| 345 | i++; |
| 346 | i &= pdev->rx_pn_trace.mask; |
| 347 | } while (i != end); |
| 348 | } |
| 349 | #endif /* ENABLE_RX_PN_TRACE */ |