Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2011, 2013-2015 The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
| 28 | #include <cdf_nbuf.h> /* cdf_nbuf_t */ |
| 29 | |
| 30 | #include <ol_htt_rx_api.h> /* htt_rx_pn_t, etc. */ |
| 31 | #include <ol_ctrl_txrx_api.h> /* ol_rx_err */ |
| 32 | |
| 33 | #include <ol_txrx_internal.h> /* ol_rx_mpdu_list_next */ |
| 34 | #include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */ |
| 35 | #include <ol_rx_pn.h> /* our own defs */ |
| 36 | #include <ol_rx_fwd.h> /* ol_rx_fwd_check */ |
| 37 | #include <ol_rx.h> /* ol_rx_deliver */ |
| 38 | |
| 39 | /* add the MSDUs from this MPDU to the list of good frames */ |
| 40 | #define ADD_MPDU_TO_LIST(head, tail, mpdu, mpdu_tail) do { \ |
| 41 | if (!head) { \ |
| 42 | head = mpdu; \ |
| 43 | } else { \ |
| 44 | cdf_nbuf_set_next(tail, mpdu); \ |
| 45 | } \ |
| 46 | tail = mpdu_tail; \ |
| 47 | } while (0) |
| 48 | |
| 49 | int ol_rx_pn_cmp24(union htt_rx_pn_t *new_pn, |
| 50 | union htt_rx_pn_t *old_pn, int is_unicast, int opmode) |
| 51 | { |
| 52 | int rc = ((new_pn->pn24 & 0xffffff) <= (old_pn->pn24 & 0xffffff)); |
| 53 | return rc; |
| 54 | } |
| 55 | |
| 56 | int ol_rx_pn_cmp48(union htt_rx_pn_t *new_pn, |
| 57 | union htt_rx_pn_t *old_pn, int is_unicast, int opmode) |
| 58 | { |
| 59 | int rc = ((new_pn->pn48 & 0xffffffffffffULL) <= |
| 60 | (old_pn->pn48 & 0xffffffffffffULL)); |
| 61 | return rc; |
| 62 | } |
| 63 | |
| 64 | int ol_rx_pn_wapi_cmp(union htt_rx_pn_t *new_pn, |
| 65 | union htt_rx_pn_t *old_pn, int is_unicast, int opmode) |
| 66 | { |
| 67 | int pn_is_replay = 0; |
| 68 | |
| 69 | if (new_pn->pn128[1] == old_pn->pn128[1]) |
| 70 | pn_is_replay = (new_pn->pn128[0] <= old_pn->pn128[0]); |
| 71 | else |
| 72 | pn_is_replay = (new_pn->pn128[1] < old_pn->pn128[1]); |
| 73 | |
| 74 | if (is_unicast) { |
| 75 | if (opmode == wlan_op_mode_ap) |
| 76 | pn_is_replay |= ((new_pn->pn128[0] & 0x1ULL) != 0); |
| 77 | else |
| 78 | pn_is_replay |= ((new_pn->pn128[0] & 0x1ULL) != 1); |
| 79 | } |
| 80 | return pn_is_replay; |
| 81 | } |
| 82 | |
| 83 | cdf_nbuf_t |
| 84 | ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev, |
| 85 | struct ol_txrx_peer_t *peer, |
| 86 | unsigned tid, cdf_nbuf_t msdu_list) |
| 87 | { |
| 88 | struct ol_txrx_pdev_t *pdev = vdev->pdev; |
| 89 | union htt_rx_pn_t *last_pn; |
| 90 | cdf_nbuf_t out_list_head = NULL; |
| 91 | cdf_nbuf_t out_list_tail = NULL; |
| 92 | cdf_nbuf_t mpdu; |
| 93 | int index; /* unicast vs. multicast */ |
| 94 | int pn_len; |
| 95 | void *rx_desc; |
| 96 | int last_pn_valid; |
| 97 | |
| 98 | /* Make sure host pn check is not redundant */ |
DARAM SUDHA | a51d6fb | 2015-01-29 19:55:14 +0530 | [diff] [blame^] | 99 | if ((cdf_atomic_read(&peer->fw_pn_check)) || |
| 100 | (vdev->opmode == wlan_op_mode_ibss)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 101 | return msdu_list; |
DARAM SUDHA | a51d6fb | 2015-01-29 19:55:14 +0530 | [diff] [blame^] | 102 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 103 | |
| 104 | /* First, check whether the PN check applies */ |
| 105 | rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu_list); |
| 106 | cdf_assert(htt_rx_msdu_has_wlan_mcast_flag(pdev->htt_pdev, rx_desc)); |
| 107 | index = htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc) ? |
| 108 | txrx_sec_mcast : txrx_sec_ucast; |
| 109 | pn_len = pdev->rx_pn[peer->security[index].sec_type].len; |
| 110 | if (pn_len == 0) |
| 111 | return msdu_list; |
| 112 | |
| 113 | last_pn_valid = peer->tids_last_pn_valid[tid]; |
| 114 | last_pn = &peer->tids_last_pn[tid]; |
| 115 | mpdu = msdu_list; |
| 116 | while (mpdu) { |
| 117 | cdf_nbuf_t mpdu_tail, next_mpdu; |
| 118 | union htt_rx_pn_t new_pn; |
| 119 | int pn_is_replay = 0; |
| 120 | |
| 121 | rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, mpdu); |
| 122 | |
| 123 | /* |
| 124 | * Find the last MSDU within this MPDU, and |
| 125 | * the find the first MSDU within the next MPDU. |
| 126 | */ |
| 127 | ol_rx_mpdu_list_next(pdev, mpdu, &mpdu_tail, &next_mpdu); |
| 128 | |
| 129 | /* Don't check the PN replay for non-encrypted frames */ |
| 130 | if (!htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc)) { |
| 131 | ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, |
| 132 | mpdu_tail); |
| 133 | mpdu = next_mpdu; |
| 134 | continue; |
| 135 | } |
| 136 | |
| 137 | /* retrieve PN from rx descriptor */ |
| 138 | htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &new_pn, pn_len); |
| 139 | |
| 140 | /* if there was no prior PN, there's nothing to check */ |
| 141 | if (last_pn_valid) { |
| 142 | pn_is_replay = |
| 143 | pdev->rx_pn[peer->security[index].sec_type]. |
| 144 | cmp(&new_pn, last_pn, index == txrx_sec_ucast, |
| 145 | vdev->opmode); |
| 146 | } else { |
| 147 | last_pn_valid = peer->tids_last_pn_valid[tid] = 1; |
| 148 | } |
| 149 | |
| 150 | if (pn_is_replay) { |
| 151 | cdf_nbuf_t msdu; |
| 152 | static uint32_t last_pncheck_print_time /* = 0 */; |
| 153 | int log_level; |
| 154 | uint32_t current_time_ms; |
| 155 | |
| 156 | /* |
| 157 | * This MPDU failed the PN check: |
| 158 | * 1. notify the control SW of the PN failure |
| 159 | * (so countermeasures can be taken, if necessary) |
| 160 | * 2. Discard all the MSDUs from this MPDU. |
| 161 | */ |
| 162 | msdu = mpdu; |
| 163 | current_time_ms = |
| 164 | cdf_system_ticks_to_msecs(cdf_system_ticks()); |
| 165 | if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS < |
| 166 | (current_time_ms - last_pncheck_print_time)) { |
| 167 | last_pncheck_print_time = current_time_ms; |
| 168 | log_level = TXRX_PRINT_LEVEL_WARN; |
| 169 | } else { |
| 170 | log_level = TXRX_PRINT_LEVEL_INFO2; |
| 171 | } |
| 172 | |
| 173 | TXRX_PRINT(log_level, |
| 174 | "PN check failed - TID %d, peer %p " |
| 175 | "(%02x:%02x:%02x:%02x:%02x:%02x) %s\n" |
| 176 | " old PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" |
| 177 | " new PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" |
| 178 | " new seq num = %d\n", |
| 179 | tid, peer, |
| 180 | peer->mac_addr.raw[0], peer->mac_addr.raw[1], |
| 181 | peer->mac_addr.raw[2], peer->mac_addr.raw[3], |
| 182 | peer->mac_addr.raw[4], peer->mac_addr.raw[5], |
| 183 | (index == |
| 184 | txrx_sec_ucast) ? "ucast" : "mcast", |
| 185 | last_pn->pn128[1], last_pn->pn128[0], |
| 186 | last_pn->pn128[0] & 0xffffffffffffULL, |
| 187 | new_pn.pn128[1], new_pn.pn128[0], |
| 188 | new_pn.pn128[0] & 0xffffffffffffULL, |
| 189 | htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, |
| 190 | rx_desc)); |
| 191 | #if defined(ENABLE_RX_PN_TRACE) |
| 192 | ol_rx_pn_trace_display(pdev, 1); |
| 193 | #endif /* ENABLE_RX_PN_TRACE */ |
| 194 | ol_rx_err(pdev->ctrl_pdev, |
| 195 | vdev->vdev_id, peer->mac_addr.raw, tid, |
| 196 | htt_rx_mpdu_desc_tsf32(pdev->htt_pdev, |
| 197 | rx_desc), OL_RX_ERR_PN, |
| 198 | mpdu, NULL, 0); |
| 199 | /* free all MSDUs within this MPDU */ |
| 200 | do { |
| 201 | cdf_nbuf_t next_msdu; |
| 202 | OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, |
| 203 | rx_desc, OL_RX_ERR_PN); |
| 204 | next_msdu = cdf_nbuf_next(msdu); |
| 205 | htt_rx_desc_frame_free(pdev->htt_pdev, msdu); |
| 206 | if (msdu == mpdu_tail) |
| 207 | break; |
| 208 | else |
| 209 | msdu = next_msdu; |
| 210 | } while (1); |
| 211 | } else { |
| 212 | ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, |
| 213 | mpdu_tail); |
| 214 | /* |
| 215 | * Remember the new PN. |
| 216 | * For simplicity, just do 2 64-bit word copies to |
| 217 | * cover the worst case (WAPI), regardless of the length |
| 218 | * of the PN. |
| 219 | * This is more efficient than doing a conditional |
| 220 | * branch to copy only the relevant portion. |
| 221 | */ |
| 222 | last_pn->pn128[0] = new_pn.pn128[0]; |
| 223 | last_pn->pn128[1] = new_pn.pn128[1]; |
| 224 | OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc); |
| 225 | } |
| 226 | |
| 227 | mpdu = next_mpdu; |
| 228 | } |
| 229 | /* make sure the list is null-terminated */ |
| 230 | if (out_list_tail) |
| 231 | cdf_nbuf_set_next(out_list_tail, NULL); |
| 232 | |
| 233 | return out_list_head; |
| 234 | } |
| 235 | |
| 236 | void |
| 237 | ol_rx_pn_check(struct ol_txrx_vdev_t *vdev, |
| 238 | struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list) |
| 239 | { |
| 240 | msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list); |
| 241 | ol_rx_fwd_check(vdev, peer, tid, msdu_list); |
| 242 | } |
| 243 | |
| 244 | void |
| 245 | ol_rx_pn_check_only(struct ol_txrx_vdev_t *vdev, |
| 246 | struct ol_txrx_peer_t *peer, |
| 247 | unsigned tid, cdf_nbuf_t msdu_list) |
| 248 | { |
| 249 | msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list); |
| 250 | ol_rx_deliver(vdev, peer, tid, msdu_list); |
| 251 | } |
| 252 | |
| 253 | #if defined(ENABLE_RX_PN_TRACE) |
| 254 | |
| 255 | A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev) |
| 256 | { |
| 257 | int num_elems; |
| 258 | |
| 259 | num_elems = 1 << TXRX_RX_PN_TRACE_SIZE_LOG2; |
| 260 | pdev->rx_pn_trace.idx = 0; |
| 261 | pdev->rx_pn_trace.cnt = 0; |
| 262 | pdev->rx_pn_trace.mask = num_elems - 1; |
| 263 | pdev->rx_pn_trace.data = |
| 264 | cdf_mem_malloc(sizeof(*pdev->rx_pn_trace.data) * num_elems); |
| 265 | if (!pdev->rx_pn_trace.data) |
| 266 | return A_NO_MEMORY; |
| 267 | return A_OK; |
| 268 | } |
| 269 | |
| 270 | void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev) |
| 271 | { |
| 272 | cdf_mem_free(pdev->rx_pn_trace.data); |
| 273 | } |
| 274 | |
| 275 | void |
| 276 | ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev, |
| 277 | struct ol_txrx_peer_t *peer, uint16_t tid, void *rx_desc) |
| 278 | { |
| 279 | uint32_t idx = pdev->rx_pn_trace.idx; |
| 280 | union htt_rx_pn_t pn; |
| 281 | uint32_t pn32; |
| 282 | uint16_t seq_num; |
| 283 | uint8_t unicast; |
| 284 | |
| 285 | htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &pn, 48); |
| 286 | pn32 = pn.pn48 & 0xffffffff; |
| 287 | seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_desc); |
| 288 | unicast = !htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc); |
| 289 | |
| 290 | pdev->rx_pn_trace.data[idx].peer = peer; |
| 291 | pdev->rx_pn_trace.data[idx].tid = tid; |
| 292 | pdev->rx_pn_trace.data[idx].seq_num = seq_num; |
| 293 | pdev->rx_pn_trace.data[idx].unicast = unicast; |
| 294 | pdev->rx_pn_trace.data[idx].pn32 = pn32; |
| 295 | pdev->rx_pn_trace.cnt++; |
| 296 | idx++; |
| 297 | pdev->rx_pn_trace.idx = idx & pdev->rx_pn_trace.mask; |
| 298 | } |
| 299 | |
| 300 | void ol_rx_pn_trace_display(ol_txrx_pdev_handle pdev, int just_once) |
| 301 | { |
| 302 | static int print_count /* = 0 */; |
| 303 | uint32_t i, start, end; |
| 304 | uint64_t cnt; |
| 305 | int elems; |
| 306 | int limit = 0; /* move this to the arg list? */ |
| 307 | |
| 308 | if (print_count != 0 && just_once) |
| 309 | return; |
| 310 | |
| 311 | print_count++; |
| 312 | |
| 313 | end = pdev->rx_pn_trace.idx; |
| 314 | if (pdev->rx_pn_trace.cnt <= pdev->rx_pn_trace.mask) { |
| 315 | /* trace log has not yet wrapped around - start at the top */ |
| 316 | start = 0; |
| 317 | cnt = 0; |
| 318 | } else { |
| 319 | start = end; |
| 320 | cnt = pdev->rx_pn_trace.cnt - (pdev->rx_pn_trace.mask + 1); |
| 321 | } |
| 322 | elems = (end - 1 - start) & pdev->rx_pn_trace.mask; |
| 323 | if (limit > 0 && elems > limit) { |
| 324 | int delta; |
| 325 | delta = elems - limit; |
| 326 | start += delta; |
| 327 | start &= pdev->rx_pn_trace.mask; |
| 328 | cnt += delta; |
| 329 | } |
| 330 | |
| 331 | i = start; |
| 332 | CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO, |
| 333 | " seq PN"); |
| 334 | CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO, |
| 335 | " count idx peer tid uni num LSBs"); |
| 336 | do { |
| 337 | CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO, |
| 338 | " %6lld %4d %p %2d %d %4d %8d", |
| 339 | cnt, i, |
| 340 | pdev->rx_pn_trace.data[i].peer, |
| 341 | pdev->rx_pn_trace.data[i].tid, |
| 342 | pdev->rx_pn_trace.data[i].unicast, |
| 343 | pdev->rx_pn_trace.data[i].seq_num, |
| 344 | pdev->rx_pn_trace.data[i].pn32); |
| 345 | cnt++; |
| 346 | i++; |
| 347 | i &= pdev->rx_pn_trace.mask; |
| 348 | } while (i != end); |
| 349 | } |
| 350 | #endif /* ENABLE_RX_PN_TRACE */ |