Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1 | /* |
Rakesh Pillai | ae0f601 | 2020-01-02 11:03:09 +0530 | [diff] [blame] | 2 | * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
Harilakshmi Deshkumar | 1ea2109 | 2017-05-08 21:16:27 +0530 | [diff] [blame] | 16 | * PERFORMANCE OF THIS SOFTWARE. |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 17 | */ |
| 18 | |
| 19 | #include <qdf_types.h> |
| 20 | #include <qdf_lock.h> |
Balamurugan Mahalingam | f72cb1f | 2018-06-25 12:18:34 +0530 | [diff] [blame] | 21 | #include <hal_hw_headers.h> |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 22 | #include "dp_htt.h" |
| 23 | #include "dp_types.h" |
| 24 | #include "dp_internal.h" |
Jeff Johnson | 2cb8fc7 | 2016-12-17 10:45:08 -0800 | [diff] [blame] | 25 | #include "dp_peer.h" |
Lin Bai | f1c577e | 2018-05-22 20:45:42 +0800 | [diff] [blame] | 26 | #include "dp_rx_defrag.h" |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 27 | #include "dp_rx.h" |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 28 | #include <hal_api.h> |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 29 | #include <hal_reo.h> |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 30 | #include <cdp_txrx_handle.h> |
Ravi Joshi | af9ace8 | 2017-02-17 12:41:48 -0800 | [diff] [blame] | 31 | #include <wlan_cfg.h> |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 32 | |
nobelj | debe2b3 | 2019-04-23 11:18:47 -0700 | [diff] [blame] | 33 | #ifdef WLAN_TX_PKT_CAPTURE_ENH |
| 34 | #include "dp_tx_capture.h" |
| 35 | #endif |
| 36 | |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 37 | static inline void |
| 38 | dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, |
| 39 | uint8_t valid) |
| 40 | { |
| 41 | params->u.upd_queue_params.update_svld = 1; |
| 42 | params->u.upd_queue_params.svld = valid; |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 43 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 44 | "%s: Setting SSN valid bit to %d", |
| 45 | __func__, valid); |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 46 | } |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 47 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 48 | static inline int dp_peer_find_mac_addr_cmp( |
| 49 | union dp_align_mac_addr *mac_addr1, |
| 50 | union dp_align_mac_addr *mac_addr2) |
| 51 | { |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 52 | /* |
| 53 | * Intentionally use & rather than &&. |
| 54 | * because the operands are binary rather than generic boolean, |
| 55 | * the functionality is equivalent. |
| 56 | * Using && has the advantage of short-circuited evaluation, |
| 57 | * but using & has the advantage of no conditional branching, |
| 58 | * which is a more significant benefit. |
| 59 | */ |
Amir Patel | cb99026 | 2019-05-28 15:12:48 +0530 | [diff] [blame] | 60 | return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd) |
| 61 | & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef)); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 62 | } |
| 63 | |
Tallapragada Kalyan | c741308 | 2019-03-07 21:22:10 +0530 | [diff] [blame] | 64 | static int dp_peer_ast_table_attach(struct dp_soc *soc) |
| 65 | { |
| 66 | uint32_t max_ast_index; |
| 67 | |
| 68 | max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); |
| 69 | /* allocate ast_table for ast entry to ast_index map */ |
| 70 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, |
| 71 | "\n<=== cfg max ast idx %d ====>", max_ast_index); |
| 72 | soc->ast_table = qdf_mem_malloc(max_ast_index * |
| 73 | sizeof(struct dp_ast_entry *)); |
| 74 | if (!soc->ast_table) { |
| 75 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 76 | "%s: ast_table memory allocation failed", __func__); |
| 77 | return QDF_STATUS_E_NOMEM; |
| 78 | } |
| 79 | return 0; /* success */ |
| 80 | } |
| 81 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 82 | static int dp_peer_find_map_attach(struct dp_soc *soc) |
| 83 | { |
| 84 | uint32_t max_peers, peer_map_size; |
| 85 | |
Chaithanya Garrepalli | 2f57279 | 2018-04-11 17:49:28 +0530 | [diff] [blame] | 86 | max_peers = soc->max_peers; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 87 | /* allocate the peer ID -> peer object map */ |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 88 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, |
| 89 | "\n<=== cfg max peer id %d ====>", max_peers); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 90 | peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]); |
| 91 | soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size); |
| 92 | if (!soc->peer_id_to_obj_map) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 93 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 94 | "%s: peer map memory allocation failed", __func__); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 95 | return QDF_STATUS_E_NOMEM; |
| 96 | } |
| 97 | |
| 98 | /* |
| 99 | * The peer_id_to_obj_map doesn't really need to be initialized, |
| 100 | * since elements are only used after they have been individually |
| 101 | * initialized. |
| 102 | * However, it is convenient for debugging to have all elements |
| 103 | * that are not in use set to 0. |
| 104 | */ |
| 105 | qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 106 | return 0; /* success */ |
| 107 | } |
| 108 | |
Amir Patel | cb99026 | 2019-05-28 15:12:48 +0530 | [diff] [blame] | 109 | static int dp_log2_ceil(unsigned int value) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 110 | { |
Amir Patel | cb99026 | 2019-05-28 15:12:48 +0530 | [diff] [blame] | 111 | unsigned int tmp = value; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 112 | int log2 = -1; |
| 113 | |
| 114 | while (tmp) { |
| 115 | log2++; |
| 116 | tmp >>= 1; |
| 117 | } |
| 118 | if (1 << log2 != value) |
| 119 | log2++; |
| 120 | return log2; |
| 121 | } |
| 122 | |
| 123 | static int dp_peer_find_add_id_to_obj( |
| 124 | struct dp_peer *peer, |
| 125 | uint16_t peer_id) |
| 126 | { |
| 127 | int i; |
| 128 | |
| 129 | for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { |
| 130 | if (peer->peer_ids[i] == HTT_INVALID_PEER) { |
| 131 | peer->peer_ids[i] = peer_id; |
| 132 | return 0; /* success */ |
| 133 | } |
| 134 | } |
| 135 | return QDF_STATUS_E_FAILURE; /* failure */ |
| 136 | } |
| 137 | |
| 138 | #define DP_PEER_HASH_LOAD_MULT 2 |
| 139 | #define DP_PEER_HASH_LOAD_SHIFT 0 |
| 140 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 141 | #define DP_AST_HASH_LOAD_MULT 2 |
| 142 | #define DP_AST_HASH_LOAD_SHIFT 0 |
| 143 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 144 | static int dp_peer_find_hash_attach(struct dp_soc *soc) |
| 145 | { |
| 146 | int i, hash_elems, log2; |
| 147 | |
| 148 | /* allocate the peer MAC address -> peer object hash table */ |
Chaithanya Garrepalli | 2f57279 | 2018-04-11 17:49:28 +0530 | [diff] [blame] | 149 | hash_elems = soc->max_peers; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 150 | hash_elems *= DP_PEER_HASH_LOAD_MULT; |
| 151 | hash_elems >>= DP_PEER_HASH_LOAD_SHIFT; |
| 152 | log2 = dp_log2_ceil(hash_elems); |
| 153 | hash_elems = 1 << log2; |
| 154 | |
| 155 | soc->peer_hash.mask = hash_elems - 1; |
| 156 | soc->peer_hash.idx_bits = log2; |
| 157 | /* allocate an array of TAILQ peer object lists */ |
| 158 | soc->peer_hash.bins = qdf_mem_malloc( |
| 159 | hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer))); |
| 160 | if (!soc->peer_hash.bins) |
| 161 | return QDF_STATUS_E_NOMEM; |
| 162 | |
| 163 | for (i = 0; i < hash_elems; i++) |
| 164 | TAILQ_INIT(&soc->peer_hash.bins[i]); |
| 165 | |
| 166 | return 0; |
| 167 | } |
| 168 | |
| 169 | static void dp_peer_find_hash_detach(struct dp_soc *soc) |
| 170 | { |
phadiman | b100750 | 2019-04-03 15:21:53 +0530 | [diff] [blame] | 171 | if (soc->peer_hash.bins) { |
| 172 | qdf_mem_free(soc->peer_hash.bins); |
| 173 | soc->peer_hash.bins = NULL; |
| 174 | } |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc, |
| 178 | union dp_align_mac_addr *mac_addr) |
| 179 | { |
| 180 | unsigned index; |
| 181 | |
| 182 | index = |
| 183 | mac_addr->align2.bytes_ab ^ |
| 184 | mac_addr->align2.bytes_cd ^ |
| 185 | mac_addr->align2.bytes_ef; |
| 186 | index ^= index >> soc->peer_hash.idx_bits; |
| 187 | index &= soc->peer_hash.mask; |
| 188 | return index; |
| 189 | } |
| 190 | |
| 191 | |
| 192 | void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer) |
| 193 | { |
| 194 | unsigned index; |
| 195 | |
| 196 | index = dp_peer_find_hash_index(soc, &peer->mac_addr); |
| 197 | qdf_spin_lock_bh(&soc->peer_ref_mutex); |
| 198 | /* |
| 199 | * It is important to add the new peer at the tail of the peer list |
| 200 | * with the bin index. Together with having the hash_find function |
| 201 | * search from head to tail, this ensures that if two entries with |
| 202 | * the same MAC address are stored, the one added first will be |
| 203 | * found first. |
| 204 | */ |
| 205 | TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem); |
| 206 | qdf_spin_unlock_bh(&soc->peer_ref_mutex); |
| 207 | } |
| 208 | |
Tallapragada Kalyan | 71c46b9 | 2018-03-01 13:17:10 +0530 | [diff] [blame] | 209 | #ifdef FEATURE_AST |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 210 | /* |
| 211 | * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table |
| 212 | * @soc: SoC handle |
| 213 | * |
| 214 | * Return: None |
| 215 | */ |
| 216 | static int dp_peer_ast_hash_attach(struct dp_soc *soc) |
| 217 | { |
| 218 | int i, hash_elems, log2; |
Tallapragada Kalyan | c741308 | 2019-03-07 21:22:10 +0530 | [diff] [blame] | 219 | unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 220 | |
Tallapragada Kalyan | c741308 | 2019-03-07 21:22:10 +0530 | [diff] [blame] | 221 | hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >> |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 222 | DP_AST_HASH_LOAD_SHIFT); |
| 223 | |
| 224 | log2 = dp_log2_ceil(hash_elems); |
| 225 | hash_elems = 1 << log2; |
| 226 | |
| 227 | soc->ast_hash.mask = hash_elems - 1; |
| 228 | soc->ast_hash.idx_bits = log2; |
| 229 | |
Tallapragada Kalyan | c741308 | 2019-03-07 21:22:10 +0530 | [diff] [blame] | 230 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, |
| 231 | "ast hash_elems: %d, max_ast_idx: %d", |
| 232 | hash_elems, max_ast_idx); |
| 233 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 234 | /* allocate an array of TAILQ peer object lists */ |
| 235 | soc->ast_hash.bins = qdf_mem_malloc( |
| 236 | hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, |
| 237 | dp_ast_entry))); |
| 238 | |
| 239 | if (!soc->ast_hash.bins) |
| 240 | return QDF_STATUS_E_NOMEM; |
| 241 | |
| 242 | for (i = 0; i < hash_elems; i++) |
| 243 | TAILQ_INIT(&soc->ast_hash.bins[i]); |
| 244 | |
| 245 | return 0; |
| 246 | } |
| 247 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 248 | /* |
| 249 | * dp_peer_ast_cleanup() - cleanup the references |
| 250 | * @soc: SoC handle |
| 251 | * @ast: ast entry |
| 252 | * |
| 253 | * Return: None |
| 254 | */ |
Kiran Venkatappa | ed35f44 | 2018-07-19 22:22:29 +0530 | [diff] [blame] | 255 | static inline void dp_peer_ast_cleanup(struct dp_soc *soc, |
| 256 | struct dp_ast_entry *ast) |
| 257 | { |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 258 | txrx_ast_free_cb cb = ast->callback; |
| 259 | void *cookie = ast->cookie; |
Kiran Venkatappa | ed35f44 | 2018-07-19 22:22:29 +0530 | [diff] [blame] | 260 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 261 | /* Call the callbacks to free up the cookie */ |
| 262 | if (cb) { |
| 263 | ast->callback = NULL; |
| 264 | ast->cookie = NULL; |
| 265 | cb(soc->ctrl_psoc, |
Akshay Kosigi | a870c61 | 2019-07-08 23:10:30 +0530 | [diff] [blame] | 266 | dp_soc_to_cdp_soc(soc), |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 267 | cookie, |
| 268 | CDP_TXRX_AST_DELETE_IN_PROGRESS); |
| 269 | } |
Kiran Venkatappa | ed35f44 | 2018-07-19 22:22:29 +0530 | [diff] [blame] | 270 | } |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 271 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 272 | /* |
| 273 | * dp_peer_ast_hash_detach() - Free AST Hash table |
| 274 | * @soc: SoC handle |
| 275 | * |
| 276 | * Return: None |
| 277 | */ |
| 278 | static void dp_peer_ast_hash_detach(struct dp_soc *soc) |
| 279 | { |
Chaithanya Garrepalli | 157543d | 2018-07-09 17:42:59 +0530 | [diff] [blame] | 280 | unsigned int index; |
| 281 | struct dp_ast_entry *ast, *ast_next; |
| 282 | |
| 283 | if (!soc->ast_hash.mask) |
| 284 | return; |
| 285 | |
phadiman | b100750 | 2019-04-03 15:21:53 +0530 | [diff] [blame] | 286 | if (!soc->ast_hash.bins) |
| 287 | return; |
| 288 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 289 | qdf_spin_lock_bh(&soc->ast_lock); |
Chaithanya Garrepalli | 157543d | 2018-07-09 17:42:59 +0530 | [diff] [blame] | 290 | for (index = 0; index <= soc->ast_hash.mask; index++) { |
| 291 | if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) { |
| 292 | TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index], |
| 293 | hash_list_elem, ast_next) { |
| 294 | TAILQ_REMOVE(&soc->ast_hash.bins[index], ast, |
| 295 | hash_list_elem); |
| 296 | dp_peer_ast_cleanup(soc, ast); |
| 297 | qdf_mem_free(ast); |
| 298 | } |
| 299 | } |
| 300 | } |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 301 | qdf_spin_unlock_bh(&soc->ast_lock); |
Chaithanya Garrepalli | 157543d | 2018-07-09 17:42:59 +0530 | [diff] [blame] | 302 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 303 | qdf_mem_free(soc->ast_hash.bins); |
phadiman | b100750 | 2019-04-03 15:21:53 +0530 | [diff] [blame] | 304 | soc->ast_hash.bins = NULL; |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 305 | } |
| 306 | |
| 307 | /* |
| 308 | * dp_peer_ast_hash_index() - Compute the AST hash from MAC address |
| 309 | * @soc: SoC handle |
| 310 | * |
| 311 | * Return: AST hash |
| 312 | */ |
| 313 | static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc, |
| 314 | union dp_align_mac_addr *mac_addr) |
| 315 | { |
| 316 | uint32_t index; |
| 317 | |
| 318 | index = |
| 319 | mac_addr->align2.bytes_ab ^ |
| 320 | mac_addr->align2.bytes_cd ^ |
| 321 | mac_addr->align2.bytes_ef; |
| 322 | index ^= index >> soc->ast_hash.idx_bits; |
| 323 | index &= soc->ast_hash.mask; |
| 324 | return index; |
| 325 | } |
| 326 | |
| 327 | /* |
| 328 | * dp_peer_ast_hash_add() - Add AST entry into hash table |
| 329 | * @soc: SoC handle |
| 330 | * |
| 331 | * This function adds the AST entry into SoC AST hash table |
| 332 | * It assumes caller has taken the ast lock to protect the access to this table |
| 333 | * |
| 334 | * Return: None |
| 335 | */ |
| 336 | static inline void dp_peer_ast_hash_add(struct dp_soc *soc, |
| 337 | struct dp_ast_entry *ase) |
| 338 | { |
| 339 | uint32_t index; |
| 340 | |
| 341 | index = dp_peer_ast_hash_index(soc, &ase->mac_addr); |
| 342 | TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem); |
| 343 | } |
| 344 | |
| 345 | /* |
| 346 | * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table |
| 347 | * @soc: SoC handle |
| 348 | * |
| 349 | * This function removes the AST entry from soc AST hash table |
| 350 | * It assumes caller has taken the ast lock to protect the access to this table |
| 351 | * |
| 352 | * Return: None |
| 353 | */ |
Pavankumar Nandeshwar | 1ab908e | 2019-01-24 12:53:13 +0530 | [diff] [blame] | 354 | void dp_peer_ast_hash_remove(struct dp_soc *soc, |
| 355 | struct dp_ast_entry *ase) |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 356 | { |
| 357 | unsigned index; |
| 358 | struct dp_ast_entry *tmpase; |
| 359 | int found = 0; |
| 360 | |
| 361 | index = dp_peer_ast_hash_index(soc, &ase->mac_addr); |
| 362 | /* Check if tail is not empty before delete*/ |
| 363 | QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index])); |
| 364 | |
| 365 | TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) { |
| 366 | if (tmpase == ase) { |
| 367 | found = 1; |
| 368 | break; |
| 369 | } |
| 370 | } |
| 371 | |
| 372 | QDF_ASSERT(found); |
| 373 | TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem); |
| 374 | } |
| 375 | |
| 376 | /* |
Chaithanya Garrepalli | 7c8cf12 | 2018-09-07 19:23:52 +0530 | [diff] [blame] | 377 | * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list |
| 378 | * @soc: SoC handle |
| 379 | * @peer: peer handle |
| 380 | * @ast_mac_addr: mac address |
| 381 | * |
| 382 | * It assumes caller has taken the ast lock to protect the access to ast list |
| 383 | * |
| 384 | * Return: AST entry |
| 385 | */ |
| 386 | struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc, |
| 387 | struct dp_peer *peer, |
| 388 | uint8_t *ast_mac_addr) |
| 389 | { |
| 390 | struct dp_ast_entry *ast_entry = NULL; |
| 391 | union dp_align_mac_addr *mac_addr = |
| 392 | (union dp_align_mac_addr *)ast_mac_addr; |
| 393 | |
| 394 | TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) { |
| 395 | if (!dp_peer_find_mac_addr_cmp(mac_addr, |
| 396 | &ast_entry->mac_addr)) { |
| 397 | return ast_entry; |
| 398 | } |
| 399 | } |
| 400 | |
| 401 | return NULL; |
| 402 | } |
| 403 | |
| 404 | /* |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 405 | * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 406 | * @soc: SoC handle |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 407 | * |
| 408 | * It assumes caller has taken the ast lock to protect the access to |
| 409 | * AST hash table |
| 410 | * |
| 411 | * Return: AST entry |
| 412 | */ |
| 413 | struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, |
| 414 | uint8_t *ast_mac_addr, |
| 415 | uint8_t pdev_id) |
| 416 | { |
| 417 | union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; |
| 418 | uint32_t index; |
| 419 | struct dp_ast_entry *ase; |
| 420 | |
| 421 | qdf_mem_copy(&local_mac_addr_aligned.raw[0], |
Srinivas Girigowda | 2751b6d | 2019-02-27 12:28:13 -0800 | [diff] [blame] | 422 | ast_mac_addr, QDF_MAC_ADDR_SIZE); |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 423 | mac_addr = &local_mac_addr_aligned; |
| 424 | |
| 425 | index = dp_peer_ast_hash_index(soc, mac_addr); |
| 426 | TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { |
| 427 | if ((pdev_id == ase->pdev_id) && |
| 428 | !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) { |
| 429 | return ase; |
| 430 | } |
| 431 | } |
| 432 | |
| 433 | return NULL; |
| 434 | } |
| 435 | |
| 436 | /* |
Chaithanya Garrepalli | cf347d1 | 2018-09-18 14:28:55 +0530 | [diff] [blame] | 437 | * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 438 | * @soc: SoC handle |
| 439 | * |
| 440 | * It assumes caller has taken the ast lock to protect the access to |
| 441 | * AST hash table |
| 442 | * |
| 443 | * Return: AST entry |
| 444 | */ |
Chaithanya Garrepalli | cf347d1 | 2018-09-18 14:28:55 +0530 | [diff] [blame] | 445 | struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, |
| 446 | uint8_t *ast_mac_addr) |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 447 | { |
| 448 | union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; |
| 449 | unsigned index; |
| 450 | struct dp_ast_entry *ase; |
| 451 | |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 452 | qdf_mem_copy(&local_mac_addr_aligned.raw[0], |
Srinivas Girigowda | 2751b6d | 2019-02-27 12:28:13 -0800 | [diff] [blame] | 453 | ast_mac_addr, QDF_MAC_ADDR_SIZE); |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 454 | mac_addr = &local_mac_addr_aligned; |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 455 | |
| 456 | index = dp_peer_ast_hash_index(soc, mac_addr); |
| 457 | TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { |
| 458 | if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) { |
| 459 | return ase; |
| 460 | } |
| 461 | } |
| 462 | |
| 463 | return NULL; |
| 464 | } |
| 465 | |
| 466 | /* |
| 467 | * dp_peer_map_ast() - Map the ast entry with HW AST Index |
| 468 | * @soc: SoC handle |
| 469 | * @peer: peer to which ast node belongs |
| 470 | * @mac_addr: MAC address of ast node |
| 471 | * @hw_peer_id: HW AST Index returned by target in peer map event |
| 472 | * @vdev_id: vdev id for VAP to which the peer belongs to |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 473 | * @ast_hash: ast hash value in HW |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 474 | * |
| 475 | * Return: None |
| 476 | */ |
| 477 | static inline void dp_peer_map_ast(struct dp_soc *soc, |
| 478 | struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 479 | uint8_t vdev_id, uint16_t ast_hash) |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 480 | { |
Chaithanya Garrepalli | 7c8cf12 | 2018-09-07 19:23:52 +0530 | [diff] [blame] | 481 | struct dp_ast_entry *ast_entry = NULL; |
Chandru Neginahal | 2a4e5d2 | 2017-11-08 12:20:49 +0530 | [diff] [blame] | 482 | enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC; |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 483 | |
| 484 | if (!peer) { |
| 485 | return; |
| 486 | } |
| 487 | |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 488 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
Shivani Soni | a5707a4 | 2020-01-08 16:42:08 +0530 | [diff] [blame] | 489 | "%s: peer %pK ID %d vid %d mac %pM", |
| 490 | __func__, peer, hw_peer_id, vdev_id, mac_addr); |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 491 | |
| 492 | qdf_spin_lock_bh(&soc->ast_lock); |
Chaithanya Garrepalli | 7c8cf12 | 2018-09-07 19:23:52 +0530 | [diff] [blame] | 493 | |
| 494 | ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr); |
| 495 | |
| 496 | if (ast_entry) { |
| 497 | ast_entry->ast_idx = hw_peer_id; |
| 498 | soc->ast_table[hw_peer_id] = ast_entry; |
| 499 | ast_entry->is_active = TRUE; |
| 500 | peer_type = ast_entry->type; |
| 501 | ast_entry->ast_hash_value = ast_hash; |
Chaithanya Garrepalli | e10f87b | 2018-10-18 00:14:11 +0530 | [diff] [blame] | 502 | ast_entry->is_mapped = TRUE; |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 503 | } |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 504 | |
Chaithanya Garrepalli | 7c8cf12 | 2018-09-07 19:23:52 +0530 | [diff] [blame] | 505 | if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) { |
Chandru Neginahal | 2a4e5d2 | 2017-11-08 12:20:49 +0530 | [diff] [blame] | 506 | if (soc->cdp_soc.ol_ops->peer_map_event) { |
| 507 | soc->cdp_soc.ol_ops->peer_map_event( |
Sathyanarayanan Esakkiappan | 38c6f98 | 2017-12-05 12:00:31 +0530 | [diff] [blame] | 508 | soc->ctrl_psoc, peer->peer_ids[0], |
Chandru Neginahal | 2a4e5d2 | 2017-11-08 12:20:49 +0530 | [diff] [blame] | 509 | hw_peer_id, vdev_id, |
Radha krishna Simha Jiguru | d359eb4 | 2018-09-16 13:56:34 +0530 | [diff] [blame] | 510 | mac_addr, peer_type, ast_hash); |
Chandru Neginahal | 2a4e5d2 | 2017-11-08 12:20:49 +0530 | [diff] [blame] | 511 | } |
| 512 | } else { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 513 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 514 | "AST entry not found"); |
Chandru Neginahal | 2a4e5d2 | 2017-11-08 12:20:49 +0530 | [diff] [blame] | 515 | } |
| 516 | |
| 517 | qdf_spin_unlock_bh(&soc->ast_lock); |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 518 | return; |
| 519 | } |
| 520 | |
Akshay Kosigi | eec6db9 | 2019-07-02 14:25:54 +0530 | [diff] [blame] | 521 | void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, |
Akshay Kosigi | 4002f76 | 2019-07-08 23:04:36 +0530 | [diff] [blame] | 522 | struct cdp_soc *dp_soc, |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 523 | void *cookie, |
| 524 | enum cdp_ast_free_status status) |
Kiran Venkatappa | 74e6d8b | 2018-11-05 15:02:29 +0530 | [diff] [blame] | 525 | { |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 526 | struct dp_ast_free_cb_params *param = |
| 527 | (struct dp_ast_free_cb_params *)cookie; |
| 528 | struct dp_soc *soc = (struct dp_soc *)dp_soc; |
| 529 | struct dp_peer *peer = NULL; |
| 530 | |
| 531 | if (status != CDP_TXRX_AST_DELETED) { |
| 532 | qdf_mem_free(cookie); |
| 533 | return; |
| 534 | } |
| 535 | |
| 536 | peer = dp_peer_find_hash_find(soc, ¶m->peer_mac_addr.raw[0], |
| 537 | 0, param->vdev_id); |
| 538 | if (peer) { |
| 539 | dp_peer_add_ast(soc, peer, |
| 540 | ¶m->mac_addr.raw[0], |
| 541 | param->type, |
| 542 | param->flags); |
| 543 | dp_peer_unref_delete(peer); |
| 544 | } |
| 545 | qdf_mem_free(cookie); |
Kiran Venkatappa | 74e6d8b | 2018-11-05 15:02:29 +0530 | [diff] [blame] | 546 | } |
Kiran Venkatappa | 74e6d8b | 2018-11-05 15:02:29 +0530 | [diff] [blame] | 547 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 548 | /* |
| 549 | * dp_peer_add_ast() - Allocate and add AST entry into peer list |
| 550 | * @soc: SoC handle |
| 551 | * @peer: peer to which ast node belongs |
| 552 | * @mac_addr: MAC address of ast node |
| 553 | * @is_self: Is this base AST entry with peer mac address |
| 554 | * |
Jeff Johnson | bd6e61f | 2018-05-06 17:11:15 -0700 | [diff] [blame] | 555 | * This API is used by WDS source port learning function to |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 556 | * add a new AST entry into peer AST list |
| 557 | * |
| 558 | * Return: 0 if new entry is allocated, |
Ruchi, Agrawal | d6ba7ae | 2018-02-23 16:54:58 +0530 | [diff] [blame] | 559 | * -1 if entry add failed |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 560 | */ |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 561 | int dp_peer_add_ast(struct dp_soc *soc, |
| 562 | struct dp_peer *peer, |
| 563 | uint8_t *mac_addr, |
| 564 | enum cdp_txrx_ast_entry_type type, |
| 565 | uint32_t flags) |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 566 | { |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 567 | struct dp_ast_entry *ast_entry = NULL; |
Prathyusha Guduri | bd4fd7a | 2019-10-01 19:29:20 +0530 | [diff] [blame] | 568 | struct dp_vdev *vdev = NULL, *tmp_vdev = NULL; |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 569 | struct dp_pdev *pdev = NULL; |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 570 | uint8_t next_node_mac[6]; |
Ruchi, Agrawal | d6ba7ae | 2018-02-23 16:54:58 +0530 | [diff] [blame] | 571 | int ret = -1; |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 572 | txrx_ast_free_cb cb = NULL; |
| 573 | void *cookie = NULL; |
Chaithanya Garrepalli | 09837d2 | 2019-09-09 15:01:10 +0530 | [diff] [blame] | 574 | struct dp_peer *tmp_peer = NULL; |
Chaithanya Garrepalli | cf0b4e2 | 2019-09-21 23:01:21 +0530 | [diff] [blame] | 575 | bool is_peer_found = false; |
| 576 | |
Prathyusha Guduri | bd4fd7a | 2019-10-01 19:29:20 +0530 | [diff] [blame] | 577 | vdev = peer->vdev; |
| 578 | if (!vdev) { |
| 579 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 580 | FL("Peers vdev is NULL")); |
| 581 | QDF_ASSERT(0); |
| 582 | return ret; |
| 583 | } |
| 584 | |
| 585 | pdev = vdev->pdev; |
| 586 | |
Chaithanya Garrepalli | cf0b4e2 | 2019-09-21 23:01:21 +0530 | [diff] [blame] | 587 | tmp_peer = dp_peer_find_hash_find(soc, mac_addr, 0, |
| 588 | DP_VDEV_ALL); |
| 589 | if (tmp_peer) { |
Prathyusha Guduri | bd4fd7a | 2019-10-01 19:29:20 +0530 | [diff] [blame] | 590 | tmp_vdev = tmp_peer->vdev; |
| 591 | if (!tmp_vdev) { |
| 592 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 593 | FL("Peers vdev is NULL")); |
| 594 | QDF_ASSERT(0); |
| 595 | dp_peer_unref_delete(tmp_peer); |
| 596 | return ret; |
| 597 | } |
| 598 | if (tmp_vdev->pdev->pdev_id == pdev->pdev_id) |
| 599 | is_peer_found = true; |
| 600 | |
Chaithanya Garrepalli | cf0b4e2 | 2019-09-21 23:01:21 +0530 | [diff] [blame] | 601 | dp_peer_unref_delete(tmp_peer); |
Chaithanya Garrepalli | cf0b4e2 | 2019-09-21 23:01:21 +0530 | [diff] [blame] | 602 | } |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 603 | |
Chaithanya Garrepalli | 8fb4877 | 2019-01-21 23:11:18 +0530 | [diff] [blame] | 604 | qdf_spin_lock_bh(&soc->ast_lock); |
| 605 | if (peer->delete_in_progress) { |
| 606 | qdf_spin_unlock_bh(&soc->ast_lock); |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 607 | return ret; |
Chaithanya Garrepalli | 8fb4877 | 2019-01-21 23:11:18 +0530 | [diff] [blame] | 608 | } |
Ruchi, Agrawal | 93bcf12 | 2018-10-26 13:56:34 +0530 | [diff] [blame] | 609 | |
phadiman | d2e88e3 | 2019-01-23 12:58:43 +0530 | [diff] [blame] | 610 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
phadiman | e9fb547 | 2018-10-30 16:53:05 +0530 | [diff] [blame] | 611 | "%s: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM", |
| 612 | __func__, pdev->pdev_id, vdev->vdev_id, type, flags, |
| 613 | peer->mac_addr.raw, peer, mac_addr); |
| 614 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 615 | |
Tallapragada Kalyan | a702362 | 2018-12-03 19:29:52 +0530 | [diff] [blame] | 616 | /* fw supports only 2 times the max_peers ast entries */ |
| 617 | if (soc->num_ast_entries >= |
| 618 | wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) { |
| 619 | qdf_spin_unlock_bh(&soc->ast_lock); |
| 620 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 621 | FL("Max ast entries reached")); |
| 622 | return ret; |
| 623 | } |
| 624 | |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 625 | /* If AST entry already exists , just return from here |
| 626 | * ast entry with same mac address can exist on different radios |
| 627 | * if ast_override support is enabled use search by pdev in this |
| 628 | * case |
| 629 | */ |
| 630 | if (soc->ast_override_support) { |
| 631 | ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, |
| 632 | pdev->pdev_id); |
| 633 | if (ast_entry) { |
Tallapragada Kalyan | 9e4b36f | 2019-05-02 13:22:34 +0530 | [diff] [blame] | 634 | if ((type == CDP_TXRX_AST_TYPE_MEC) && |
| 635 | (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)) |
| 636 | ast_entry->is_active = TRUE; |
| 637 | |
Pamidipati, Vijay | 13f5ec2 | 2018-08-06 17:34:21 +0530 | [diff] [blame] | 638 | qdf_spin_unlock_bh(&soc->ast_lock); |
| 639 | return 0; |
| 640 | } |
Chaithanya Garrepalli | cf0b4e2 | 2019-09-21 23:01:21 +0530 | [diff] [blame] | 641 | if (is_peer_found) { |
Rathees kumar Chinannan | e03a81b | 2019-10-10 15:00:21 +0530 | [diff] [blame] | 642 | /* During WDS to static roaming, peer is added |
| 643 | * to the list before static AST entry create. |
| 644 | * So, allow AST entry for STATIC type |
| 645 | * even if peer is present |
| 646 | */ |
| 647 | if (type != CDP_TXRX_AST_TYPE_STATIC) { |
| 648 | qdf_spin_unlock_bh(&soc->ast_lock); |
| 649 | return 0; |
| 650 | } |
Chaithanya Garrepalli | 09837d2 | 2019-09-09 15:01:10 +0530 | [diff] [blame] | 651 | } |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 652 | } else { |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 653 | /* For HWMWDS_SEC entries can be added for same mac address |
| 654 | * do not check for existing entry |
| 655 | */ |
| 656 | if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) |
| 657 | goto add_ast_entry; |
| 658 | |
Chaithanya Garrepalli | cf347d1 | 2018-09-18 14:28:55 +0530 | [diff] [blame] | 659 | ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr); |
Pamidipati, Vijay | 13f5ec2 | 2018-08-06 17:34:21 +0530 | [diff] [blame] | 660 | |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 661 | if (ast_entry) { |
Pamidipati, Vijay | b113bbc | 2019-01-22 22:06:36 +0530 | [diff] [blame] | 662 | if ((type == CDP_TXRX_AST_TYPE_MEC) && |
| 663 | (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)) |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 664 | ast_entry->is_active = TRUE; |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 665 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 666 | if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) && |
| 667 | !ast_entry->delete_in_progress) { |
| 668 | qdf_spin_unlock_bh(&soc->ast_lock); |
| 669 | return 0; |
| 670 | } |
| 671 | |
| 672 | /* Add for HMWDS entry we cannot be ignored if there |
| 673 | * is AST entry with same mac address |
| 674 | * |
| 675 | * if ast entry exists with the requested mac address |
| 676 | * send a delete command and register callback which |
| 677 | * can take care of adding HMWDS ast enty on delete |
| 678 | * confirmation from target |
| 679 | */ |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 680 | if (type == CDP_TXRX_AST_TYPE_WDS_HM) { |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 681 | struct dp_ast_free_cb_params *param = NULL; |
| 682 | |
| 683 | if (ast_entry->type == |
| 684 | CDP_TXRX_AST_TYPE_WDS_HM_SEC) |
| 685 | goto add_ast_entry; |
| 686 | |
| 687 | /* save existing callback */ |
| 688 | if (ast_entry->callback) { |
| 689 | cb = ast_entry->callback; |
| 690 | cookie = ast_entry->cookie; |
| 691 | } |
| 692 | |
| 693 | param = qdf_mem_malloc(sizeof(*param)); |
| 694 | if (!param) { |
| 695 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 696 | QDF_TRACE_LEVEL_ERROR, |
| 697 | "Allocation failed"); |
| 698 | qdf_spin_unlock_bh(&soc->ast_lock); |
| 699 | return ret; |
| 700 | } |
| 701 | |
| 702 | qdf_mem_copy(¶m->mac_addr.raw[0], mac_addr, |
Srinivas Girigowda | 2751b6d | 2019-02-27 12:28:13 -0800 | [diff] [blame] | 703 | QDF_MAC_ADDR_SIZE); |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 704 | qdf_mem_copy(¶m->peer_mac_addr.raw[0], |
| 705 | &peer->mac_addr.raw[0], |
Srinivas Girigowda | 2751b6d | 2019-02-27 12:28:13 -0800 | [diff] [blame] | 706 | QDF_MAC_ADDR_SIZE); |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 707 | param->type = type; |
| 708 | param->flags = flags; |
| 709 | param->vdev_id = vdev->vdev_id; |
| 710 | ast_entry->callback = dp_peer_free_hmwds_cb; |
Chaithanya Garrepalli | 4fd2fe4 | 2019-02-19 23:48:21 +0530 | [diff] [blame] | 711 | ast_entry->pdev_id = vdev->pdev->pdev_id; |
| 712 | ast_entry->type = type; |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 713 | ast_entry->cookie = (void *)param; |
| 714 | if (!ast_entry->delete_in_progress) |
| 715 | dp_peer_del_ast(soc, ast_entry); |
| 716 | } |
| 717 | |
Sathyanarayanan Esakkiappan | 4af5584 | 2018-10-23 12:58:07 +0530 | [diff] [blame] | 718 | /* Modify an already existing AST entry from type |
| 719 | * WDS to MEC on promption. This serves as a fix when |
| 720 | * backbone of interfaces are interchanged wherein |
Nandha Kishore Easwaran | 8dd440d | 2018-11-30 15:02:20 +0530 | [diff] [blame] | 721 | * wds entr becomes its own MEC. The entry should be |
| 722 | * replaced only when the ast_entry peer matches the |
| 723 | * peer received in mec event. This additional check |
| 724 | * is needed in wds repeater cases where a multicast |
| 725 | * packet from station to the root via the repeater |
| 726 | * should not remove the wds entry. |
Sathyanarayanan Esakkiappan | 4af5584 | 2018-10-23 12:58:07 +0530 | [diff] [blame] | 727 | */ |
| 728 | if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) && |
Nandha Kishore Easwaran | 8dd440d | 2018-11-30 15:02:20 +0530 | [diff] [blame] | 729 | (type == CDP_TXRX_AST_TYPE_MEC) && |
| 730 | (ast_entry->peer == peer)) { |
Sathyanarayanan Esakkiappan | 4af5584 | 2018-10-23 12:58:07 +0530 | [diff] [blame] | 731 | ast_entry->is_active = FALSE; |
| 732 | dp_peer_del_ast(soc, ast_entry); |
| 733 | } |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 734 | qdf_spin_unlock_bh(&soc->ast_lock); |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 735 | |
| 736 | /* Call the saved callback*/ |
| 737 | if (cb) { |
Akshay Kosigi | 4002f76 | 2019-07-08 23:04:36 +0530 | [diff] [blame] | 738 | cb(soc->ctrl_psoc, |
Akshay Kosigi | a870c61 | 2019-07-08 23:10:30 +0530 | [diff] [blame] | 739 | dp_soc_to_cdp_soc(soc), |
Akshay Kosigi | 4002f76 | 2019-07-08 23:04:36 +0530 | [diff] [blame] | 740 | cookie, |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 741 | CDP_TXRX_AST_DELETE_IN_PROGRESS); |
| 742 | } |
Chaithanya Garrepalli | d203e2d | 2018-09-18 14:23:17 +0530 | [diff] [blame] | 743 | return 0; |
Pamidipati, Vijay | 13f5ec2 | 2018-08-06 17:34:21 +0530 | [diff] [blame] | 744 | } |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 745 | } |
| 746 | |
Tallapragada Kalyan | 5e3a39c | 2018-08-24 16:34:12 +0530 | [diff] [blame] | 747 | add_ast_entry: |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 748 | ast_entry = (struct dp_ast_entry *) |
| 749 | qdf_mem_malloc(sizeof(struct dp_ast_entry)); |
| 750 | |
| 751 | if (!ast_entry) { |
| 752 | qdf_spin_unlock_bh(&soc->ast_lock); |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 753 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 754 | FL("fail to allocate ast_entry")); |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 755 | QDF_ASSERT(0); |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 756 | return ret; |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 757 | } |
| 758 | |
Srinivas Girigowda | 2751b6d | 2019-02-27 12:28:13 -0800 | [diff] [blame] | 759 | qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE); |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 760 | ast_entry->pdev_id = vdev->pdev->pdev_id; |
Chaithanya Garrepalli | e10f87b | 2018-10-18 00:14:11 +0530 | [diff] [blame] | 761 | ast_entry->is_mapped = false; |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 762 | ast_entry->delete_in_progress = false; |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 763 | |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 764 | switch (type) { |
| 765 | case CDP_TXRX_AST_TYPE_STATIC: |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 766 | peer->self_ast_entry = ast_entry; |
Radha krishna Simha Jiguru | f70f991 | 2017-08-02 18:32:22 +0530 | [diff] [blame] | 767 | ast_entry->type = CDP_TXRX_AST_TYPE_STATIC; |
Radha krishna Simha Jiguru | 2734079 | 2018-09-06 15:08:12 +0530 | [diff] [blame] | 768 | if (peer->vdev->opmode == wlan_op_mode_sta) |
| 769 | ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS; |
Tallapragada Kalyan | fb72b63 | 2017-07-07 12:51:58 +0530 | [diff] [blame] | 770 | break; |
Pamidipati, Vijay | 3756b76 | 2018-05-12 11:10:37 +0530 | [diff] [blame] | 771 | case CDP_TXRX_AST_TYPE_SELF: |
| 772 | peer->self_ast_entry = ast_entry; |
| 773 | ast_entry->type = CDP_TXRX_AST_TYPE_SELF; |
| 774 | break; |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 775 | case CDP_TXRX_AST_TYPE_WDS: |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 776 | ast_entry->next_hop = 1; |
Radha krishna Simha Jiguru | f70f991 | 2017-08-02 18:32:22 +0530 | [diff] [blame] | 777 | ast_entry->type = CDP_TXRX_AST_TYPE_WDS; |
Tallapragada Kalyan | fb72b63 | 2017-07-07 12:51:58 +0530 | [diff] [blame] | 778 | break; |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 779 | case CDP_TXRX_AST_TYPE_WDS_HM: |
| 780 | ast_entry->next_hop = 1; |
| 781 | ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM; |
| 782 | break; |
Tallapragada Kalyan | 5e3a39c | 2018-08-24 16:34:12 +0530 | [diff] [blame] | 783 | case CDP_TXRX_AST_TYPE_WDS_HM_SEC: |
| 784 | ast_entry->next_hop = 1; |
| 785 | ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC; |
| 786 | break; |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 787 | case CDP_TXRX_AST_TYPE_MEC: |
Tallapragada Kalyan | fb72b63 | 2017-07-07 12:51:58 +0530 | [diff] [blame] | 788 | ast_entry->next_hop = 1; |
Radha krishna Simha Jiguru | f70f991 | 2017-08-02 18:32:22 +0530 | [diff] [blame] | 789 | ast_entry->type = CDP_TXRX_AST_TYPE_MEC; |
Tallapragada Kalyan | fb72b63 | 2017-07-07 12:51:58 +0530 | [diff] [blame] | 790 | break; |
Tallapragada Kalyan | 2ae71e0 | 2018-08-31 19:30:54 +0530 | [diff] [blame] | 791 | case CDP_TXRX_AST_TYPE_DA: |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 792 | peer = peer->vdev->vap_bss_peer; |
Tallapragada Kalyan | 2ae71e0 | 2018-08-31 19:30:54 +0530 | [diff] [blame] | 793 | ast_entry->next_hop = 1; |
| 794 | ast_entry->type = CDP_TXRX_AST_TYPE_DA; |
| 795 | break; |
Tallapragada Kalyan | fb72b63 | 2017-07-07 12:51:58 +0530 | [diff] [blame] | 796 | default: |
| 797 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 798 | FL("Incorrect AST entry type")); |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 799 | } |
| 800 | |
| 801 | ast_entry->is_active = TRUE; |
Pamidipati, Vijay | 899e775 | 2017-07-25 22:09:28 +0530 | [diff] [blame] | 802 | DP_STATS_INC(soc, ast.added, 1); |
Tallapragada Kalyan | a702362 | 2018-12-03 19:29:52 +0530 | [diff] [blame] | 803 | soc->num_ast_entries++; |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 804 | dp_peer_ast_hash_add(soc, ast_entry); |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 805 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 806 | ast_entry->peer = peer; |
| 807 | |
| 808 | if (type == CDP_TXRX_AST_TYPE_MEC) |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 809 | qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6); |
Ruchi, Agrawal | d536f88 | 2018-03-02 15:51:23 +0530 | [diff] [blame] | 810 | else |
| 811 | qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6); |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 812 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 813 | TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 814 | |
Pamidipati, Vijay | 3756b76 | 2018-05-12 11:10:37 +0530 | [diff] [blame] | 815 | if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) && |
Radha krishna Simha Jiguru | 2734079 | 2018-09-06 15:08:12 +0530 | [diff] [blame] | 816 | (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) && |
Tallapragada Kalyan | 5e3a39c | 2018-08-24 16:34:12 +0530 | [diff] [blame] | 817 | (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) && |
| 818 | (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) { |
Ruchi, Agrawal | d6ba7ae | 2018-02-23 16:54:58 +0530 | [diff] [blame] | 819 | if (QDF_STATUS_SUCCESS == |
| 820 | soc->cdp_soc.ol_ops->peer_add_wds_entry( |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 821 | soc->ctrl_psoc, |
| 822 | peer->vdev->vdev_id, |
| 823 | peer->mac_addr.raw, |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 824 | mac_addr, |
| 825 | next_node_mac, |
Subhranil Choudhury | 22434e5 | 2020-01-13 16:21:34 +0530 | [diff] [blame] | 826 | flags, |
| 827 | ast_entry->type)) { |
Chaithanya Garrepalli | 58e7c5e | 2019-04-02 16:55:16 +0530 | [diff] [blame] | 828 | qdf_spin_unlock_bh(&soc->ast_lock); |
Ruchi, Agrawal | d6ba7ae | 2018-02-23 16:54:58 +0530 | [diff] [blame] | 829 | return 0; |
Chaithanya Garrepalli | 58e7c5e | 2019-04-02 16:55:16 +0530 | [diff] [blame] | 830 | } |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 831 | } |
| 832 | |
Chaithanya Garrepalli | 58e7c5e | 2019-04-02 16:55:16 +0530 | [diff] [blame] | 833 | qdf_spin_unlock_bh(&soc->ast_lock); |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 834 | return ret; |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 835 | } |
| 836 | |
| 837 | /* |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 838 | * dp_peer_free_ast_entry() - Free up the ast entry memory |
| 839 | * @soc: SoC handle |
| 840 | * @ast_entry: Address search entry |
| 841 | * |
| 842 | * This API is used to free up the memory associated with |
| 843 | * AST entry. |
| 844 | * |
| 845 | * Return: None |
| 846 | */ |
| 847 | void dp_peer_free_ast_entry(struct dp_soc *soc, |
| 848 | struct dp_ast_entry *ast_entry) |
| 849 | { |
| 850 | /* |
| 851 | * NOTE: Ensure that call to this API is done |
| 852 | * after soc->ast_lock is taken |
| 853 | */ |
| 854 | ast_entry->callback = NULL; |
| 855 | ast_entry->cookie = NULL; |
| 856 | |
| 857 | DP_STATS_INC(soc, ast.deleted, 1); |
| 858 | dp_peer_ast_hash_remove(soc, ast_entry); |
| 859 | dp_peer_ast_cleanup(soc, ast_entry); |
| 860 | qdf_mem_free(ast_entry); |
| 861 | soc->num_ast_entries--; |
| 862 | } |
| 863 | |
| 864 | /* |
| 865 | * dp_peer_unlink_ast_entry() - Free up the ast entry memory |
| 866 | * @soc: SoC handle |
| 867 | * @ast_entry: Address search entry |
| 868 | * |
| 869 | * This API is used to remove/unlink AST entry from the peer list |
| 870 | * and hash list. |
| 871 | * |
| 872 | * Return: None |
| 873 | */ |
| 874 | void dp_peer_unlink_ast_entry(struct dp_soc *soc, |
| 875 | struct dp_ast_entry *ast_entry) |
| 876 | { |
| 877 | /* |
| 878 | * NOTE: Ensure that call to this API is done |
| 879 | * after soc->ast_lock is taken |
| 880 | */ |
| 881 | struct dp_peer *peer = ast_entry->peer; |
| 882 | |
| 883 | TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem); |
| 884 | |
| 885 | if (ast_entry == peer->self_ast_entry) |
| 886 | peer->self_ast_entry = NULL; |
| 887 | |
| 888 | /* |
| 889 | * release the reference only if it is mapped |
| 890 | * to ast_table |
| 891 | */ |
| 892 | if (ast_entry->is_mapped) |
| 893 | soc->ast_table[ast_entry->ast_idx] = NULL; |
| 894 | |
| 895 | ast_entry->peer = NULL; |
| 896 | } |
| 897 | |
| 898 | /* |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 899 | * dp_peer_del_ast() - Delete and free AST entry |
| 900 | * @soc: SoC handle |
| 901 | * @ast_entry: AST entry of the node |
| 902 | * |
| 903 | * This function removes the AST entry from peer and soc tables |
| 904 | * It assumes caller has taken the ast lock to protect the access to these |
| 905 | * tables |
| 906 | * |
| 907 | * Return: None |
| 908 | */ |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 909 | void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 910 | { |
Pavankumar Nandeshwar | 1ab908e | 2019-01-24 12:53:13 +0530 | [diff] [blame] | 911 | struct dp_peer *peer; |
| 912 | |
| 913 | if (!ast_entry) |
| 914 | return; |
| 915 | |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 916 | if (ast_entry->delete_in_progress) |
| 917 | return; |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 918 | |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 919 | ast_entry->delete_in_progress = true; |
| 920 | |
| 921 | peer = ast_entry->peer; |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 922 | dp_peer_ast_send_wds_del(soc, ast_entry); |
| 923 | |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 924 | /* Remove SELF and STATIC entries in teardown itself */ |
| 925 | if (!ast_entry->next_hop) |
| 926 | dp_peer_unlink_ast_entry(soc, ast_entry); |
| 927 | |
Tallapragada Kalyan | 9e4b36f | 2019-05-02 13:22:34 +0530 | [diff] [blame] | 928 | if (ast_entry->is_mapped) |
| 929 | soc->ast_table[ast_entry->ast_idx] = NULL; |
| 930 | |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 931 | /* if peer map v2 is enabled we are not freeing ast entry |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 932 | * here and it is supposed to be freed in unmap event (after |
| 933 | * we receive delete confirmation from target) |
| 934 | * |
| 935 | * if peer_id is invalid we did not get the peer map event |
| 936 | * for the peer free ast entry from here only in this case |
| 937 | */ |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 938 | |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 939 | /* For HM_SEC and SELF type we do not receive unmap event |
| 940 | * free ast_entry from here it self |
| 941 | */ |
| 942 | if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) && |
| 943 | (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) |
| 944 | return; |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 945 | |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 946 | /* for WDS secondary entry ast_entry->next_hop would be set so |
| 947 | * unlinking has to be done explicitly here. |
| 948 | * As this entry is not a mapped entry unmap notification from |
| 949 | * FW wil not come. Hence unlinkling is done right here. |
| 950 | */ |
| 951 | if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) |
| 952 | dp_peer_unlink_ast_entry(soc, ast_entry); |
Pamidipati, Vijay | 3eab5b1 | 2018-08-23 16:00:44 +0530 | [diff] [blame] | 953 | |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 954 | dp_peer_free_ast_entry(soc, ast_entry); |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 955 | } |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 956 | |
| 957 | /* |
| 958 | * dp_peer_update_ast() - Delete and free AST entry |
| 959 | * @soc: SoC handle |
| 960 | * @peer: peer to which ast node belongs |
| 961 | * @ast_entry: AST entry of the node |
| 962 | * @flags: wds or hmwds |
| 963 | * |
| 964 | * This function update the AST entry to the roamed peer and soc tables |
| 965 | * It assumes caller has taken the ast lock to protect the access to these |
| 966 | * tables |
| 967 | * |
| 968 | * Return: 0 if ast entry is updated successfully |
Ruchi, Agrawal | d6ba7ae | 2018-02-23 16:54:58 +0530 | [diff] [blame] | 969 | * -1 failure |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 970 | */ |
| 971 | int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, |
| 972 | struct dp_ast_entry *ast_entry, uint32_t flags) |
| 973 | { |
Ruchi, Agrawal | d6ba7ae | 2018-02-23 16:54:58 +0530 | [diff] [blame] | 974 | int ret = -1; |
Tallapragada Kalyan | 7a47aac | 2018-02-28 22:01:59 +0530 | [diff] [blame] | 975 | struct dp_peer *old_peer; |
| 976 | |
phadiman | d2e88e3 | 2019-01-23 12:58:43 +0530 | [diff] [blame] | 977 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
phadiman | e9fb547 | 2018-10-30 16:53:05 +0530 | [diff] [blame] | 978 | "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n", |
| 979 | __func__, ast_entry->type, peer->vdev->pdev->pdev_id, |
| 980 | peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw, |
| 981 | peer->mac_addr.raw); |
| 982 | |
Chaithanya Garrepalli | 1a39da4 | 2019-06-10 12:27:59 +0530 | [diff] [blame] | 983 | /* Do not send AST update in below cases |
| 984 | * 1) Ast entry delete has already triggered |
| 985 | * 2) Peer delete is already triggered |
| 986 | * 3) We did not get the HTT map for create event |
| 987 | */ |
| 988 | if (ast_entry->delete_in_progress || peer->delete_in_progress || |
| 989 | !ast_entry->is_mapped) |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 990 | return ret; |
| 991 | |
Pamidipati, Vijay | 3756b76 | 2018-05-12 11:10:37 +0530 | [diff] [blame] | 992 | if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) || |
Tallapragada Kalyan | 5e3a39c | 2018-08-24 16:34:12 +0530 | [diff] [blame] | 993 | (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) || |
| 994 | (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) || |
| 995 | (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) |
Pamidipati, Vijay | 3756b76 | 2018-05-12 11:10:37 +0530 | [diff] [blame] | 996 | return 0; |
Chaithanya Garrepalli | 4c7099f | 2018-03-23 12:20:18 +0530 | [diff] [blame] | 997 | |
syed touqeer pasha | 8a0928b | 2019-03-01 18:06:50 +0530 | [diff] [blame] | 998 | /* |
| 999 | * Avoids flood of WMI update messages sent to FW for same peer. |
| 1000 | */ |
| 1001 | if (qdf_unlikely(ast_entry->peer == peer) && |
| 1002 | (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) && |
Chaithanya Garrepalli | 6bc8263 | 2019-09-13 18:31:51 +0530 | [diff] [blame] | 1003 | (ast_entry->peer->vdev == peer->vdev) && |
syed touqeer pasha | 8a0928b | 2019-03-01 18:06:50 +0530 | [diff] [blame] | 1004 | (ast_entry->is_active)) |
| 1005 | return 0; |
| 1006 | |
Tallapragada Kalyan | 7a47aac | 2018-02-28 22:01:59 +0530 | [diff] [blame] | 1007 | old_peer = ast_entry->peer; |
| 1008 | TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem); |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 1009 | |
| 1010 | ast_entry->peer = peer; |
Tallapragada Kalyan | 7a47aac | 2018-02-28 22:01:59 +0530 | [diff] [blame] | 1011 | ast_entry->type = CDP_TXRX_AST_TYPE_WDS; |
| 1012 | ast_entry->pdev_id = peer->vdev->pdev->pdev_id; |
Tallapragada Kalyan | 7a47aac | 2018-02-28 22:01:59 +0530 | [diff] [blame] | 1013 | ast_entry->is_active = TRUE; |
| 1014 | TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); |
| 1015 | |
Pamidipati, Vijay | d578db1 | 2018-04-09 23:03:12 +0530 | [diff] [blame] | 1016 | ret = soc->cdp_soc.ol_ops->peer_update_wds_entry( |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1017 | soc->ctrl_psoc, |
| 1018 | peer->vdev->vdev_id, |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 1019 | ast_entry->mac_addr.raw, |
| 1020 | peer->mac_addr.raw, |
Pamidipati, Vijay | d578db1 | 2018-04-09 23:03:12 +0530 | [diff] [blame] | 1021 | flags); |
Chaithanya Garrepalli | 4c7099f | 2018-03-23 12:20:18 +0530 | [diff] [blame] | 1022 | |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 1023 | return ret; |
| 1024 | } |
| 1025 | |
| 1026 | /* |
| 1027 | * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry |
| 1028 | * @soc: SoC handle |
| 1029 | * @ast_entry: AST entry of the node |
| 1030 | * |
| 1031 | * This function gets the pdev_id from the ast entry. |
| 1032 | * |
| 1033 | * Return: (uint8_t) pdev_id |
| 1034 | */ |
| 1035 | uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, |
| 1036 | struct dp_ast_entry *ast_entry) |
| 1037 | { |
| 1038 | return ast_entry->pdev_id; |
| 1039 | } |
| 1040 | |
| 1041 | /* |
| 1042 | * dp_peer_ast_get_next_hop() - get next_hop from the ast entry |
| 1043 | * @soc: SoC handle |
| 1044 | * @ast_entry: AST entry of the node |
| 1045 | * |
| 1046 | * This function gets the next hop from the ast entry. |
| 1047 | * |
| 1048 | * Return: (uint8_t) next_hop |
| 1049 | */ |
| 1050 | uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, |
| 1051 | struct dp_ast_entry *ast_entry) |
| 1052 | { |
| 1053 | return ast_entry->next_hop; |
| 1054 | } |
| 1055 | |
| 1056 | /* |
| 1057 | * dp_peer_ast_set_type() - set type from the ast entry |
| 1058 | * @soc: SoC handle |
| 1059 | * @ast_entry: AST entry of the node |
| 1060 | * |
| 1061 | * This function sets the type in the ast entry. |
| 1062 | * |
| 1063 | * Return: |
| 1064 | */ |
| 1065 | void dp_peer_ast_set_type(struct dp_soc *soc, |
| 1066 | struct dp_ast_entry *ast_entry, |
| 1067 | enum cdp_txrx_ast_entry_type type) |
| 1068 | { |
| 1069 | ast_entry->type = type; |
| 1070 | } |
| 1071 | |
Manjunathappa Prakash | c850ec6 | 2017-11-13 16:55:50 -0800 | [diff] [blame] | 1072 | #else |
| 1073 | int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 1074 | uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, |
| 1075 | uint32_t flags) |
Manjunathappa Prakash | c850ec6 | 2017-11-13 16:55:50 -0800 | [diff] [blame] | 1076 | { |
| 1077 | return 1; |
| 1078 | } |
| 1079 | |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 1080 | void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) |
Manjunathappa Prakash | c850ec6 | 2017-11-13 16:55:50 -0800 | [diff] [blame] | 1081 | { |
| 1082 | } |
| 1083 | |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 1084 | int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, |
| 1085 | struct dp_ast_entry *ast_entry, uint32_t flags) |
| 1086 | { |
| 1087 | return 1; |
| 1088 | } |
Manjunathappa Prakash | c850ec6 | 2017-11-13 16:55:50 -0800 | [diff] [blame] | 1089 | |
Chaithanya Garrepalli | cf347d1 | 2018-09-18 14:28:55 +0530 | [diff] [blame] | 1090 | struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, |
| 1091 | uint8_t *ast_mac_addr) |
| 1092 | { |
| 1093 | return NULL; |
| 1094 | } |
| 1095 | |
| 1096 | struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, |
| 1097 | uint8_t *ast_mac_addr, |
| 1098 | uint8_t pdev_id) |
Manjunathappa Prakash | c850ec6 | 2017-11-13 16:55:50 -0800 | [diff] [blame] | 1099 | { |
| 1100 | return NULL; |
| 1101 | } |
| 1102 | |
| 1103 | static int dp_peer_ast_hash_attach(struct dp_soc *soc) |
| 1104 | { |
| 1105 | return 0; |
| 1106 | } |
| 1107 | |
| 1108 | static inline void dp_peer_map_ast(struct dp_soc *soc, |
| 1109 | struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1110 | uint8_t vdev_id, uint16_t ast_hash) |
Manjunathappa Prakash | c850ec6 | 2017-11-13 16:55:50 -0800 | [diff] [blame] | 1111 | { |
| 1112 | return; |
| 1113 | } |
| 1114 | |
| 1115 | static void dp_peer_ast_hash_detach(struct dp_soc *soc) |
| 1116 | { |
| 1117 | } |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 1118 | |
| 1119 | void dp_peer_ast_set_type(struct dp_soc *soc, |
| 1120 | struct dp_ast_entry *ast_entry, |
| 1121 | enum cdp_txrx_ast_entry_type type) |
| 1122 | { |
| 1123 | } |
| 1124 | |
| 1125 | uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, |
| 1126 | struct dp_ast_entry *ast_entry) |
| 1127 | { |
| 1128 | return 0xff; |
| 1129 | } |
| 1130 | |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 1131 | uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, |
| 1132 | struct dp_ast_entry *ast_entry) |
| 1133 | { |
| 1134 | return 0xff; |
| 1135 | } |
Amir Patel | cb99026 | 2019-05-28 15:12:48 +0530 | [diff] [blame] | 1136 | |
| 1137 | int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, |
| 1138 | struct dp_ast_entry *ast_entry, uint32_t flags) |
| 1139 | { |
| 1140 | return 1; |
| 1141 | } |
| 1142 | |
Manjunathappa Prakash | c850ec6 | 2017-11-13 16:55:50 -0800 | [diff] [blame] | 1143 | #endif |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 1144 | |
Kiran Venkatappa | ed35f44 | 2018-07-19 22:22:29 +0530 | [diff] [blame] | 1145 | void dp_peer_ast_send_wds_del(struct dp_soc *soc, |
| 1146 | struct dp_ast_entry *ast_entry) |
| 1147 | { |
| 1148 | struct dp_peer *peer = ast_entry->peer; |
| 1149 | struct cdp_soc_t *cdp_soc = &soc->cdp_soc; |
| 1150 | |
Chaithanya Garrepalli | 9ff4c54 | 2019-01-07 23:03:09 +0530 | [diff] [blame] | 1151 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE, |
| 1152 | "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n", |
| 1153 | __func__, ast_entry->type, peer->vdev->pdev->pdev_id, |
| 1154 | peer->vdev->vdev_id, ast_entry->mac_addr.raw, |
| 1155 | ast_entry->next_hop, ast_entry->peer->mac_addr.raw); |
| 1156 | |
Chaithanya Garrepalli | 267ae0e | 2019-02-19 23:45:12 +0530 | [diff] [blame] | 1157 | if (ast_entry->next_hop) { |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1158 | cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc, |
| 1159 | peer->vdev->vdev_id, |
Chaithanya Garrepalli | 267ae0e | 2019-02-19 23:45:12 +0530 | [diff] [blame] | 1160 | ast_entry->mac_addr.raw, |
| 1161 | ast_entry->type); |
| 1162 | } |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1163 | |
Kiran Venkatappa | ed35f44 | 2018-07-19 22:22:29 +0530 | [diff] [blame] | 1164 | } |
| 1165 | |
Tallapragada Kalyan | 17254ed | 2019-06-14 18:13:51 +0530 | [diff] [blame] | 1166 | /** |
| 1167 | * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete |
| 1168 | * @soc: soc handle |
| 1169 | * @peer: peer handle |
| 1170 | * @mac_addr: mac address of the AST entry to searc and delete |
| 1171 | * |
| 1172 | * find the ast entry from the peer list using the mac address and free |
| 1173 | * the entry. |
| 1174 | * |
| 1175 | * Return: SUCCESS or NOENT |
| 1176 | */ |
| 1177 | static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc, |
| 1178 | struct dp_peer *peer, |
| 1179 | uint8_t *mac_addr) |
Kiran Venkatappa | ed35f44 | 2018-07-19 22:22:29 +0530 | [diff] [blame] | 1180 | { |
Tallapragada Kalyan | 17254ed | 2019-06-14 18:13:51 +0530 | [diff] [blame] | 1181 | struct dp_ast_entry *ast_entry; |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1182 | void *cookie = NULL; |
| 1183 | txrx_ast_free_cb cb = NULL; |
Kiran Venkatappa | ed35f44 | 2018-07-19 22:22:29 +0530 | [diff] [blame] | 1184 | |
Chaithanya Garrepalli | e10f87b | 2018-10-18 00:14:11 +0530 | [diff] [blame] | 1185 | /* |
| 1186 | * release the reference only if it is mapped |
| 1187 | * to ast_table |
| 1188 | */ |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1189 | |
| 1190 | qdf_spin_lock_bh(&soc->ast_lock); |
Tallapragada Kalyan | 17254ed | 2019-06-14 18:13:51 +0530 | [diff] [blame] | 1191 | |
| 1192 | ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr); |
| 1193 | if (!ast_entry) { |
| 1194 | qdf_spin_unlock_bh(&soc->ast_lock); |
| 1195 | return QDF_STATUS_E_NOENT; |
| 1196 | } else if (ast_entry->is_mapped) { |
Chaithanya Garrepalli | e10f87b | 2018-10-18 00:14:11 +0530 | [diff] [blame] | 1197 | soc->ast_table[ast_entry->ast_idx] = NULL; |
Tallapragada Kalyan | 17254ed | 2019-06-14 18:13:51 +0530 | [diff] [blame] | 1198 | } |
Tallapragada Kalyan | 887fb5d | 2018-10-24 18:27:58 +0530 | [diff] [blame] | 1199 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1200 | cb = ast_entry->callback; |
| 1201 | cookie = ast_entry->cookie; |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1202 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1203 | |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 1204 | dp_peer_unlink_ast_entry(soc, ast_entry); |
| 1205 | dp_peer_free_ast_entry(soc, ast_entry); |
| 1206 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1207 | qdf_spin_unlock_bh(&soc->ast_lock); |
| 1208 | |
| 1209 | if (cb) { |
| 1210 | cb(soc->ctrl_psoc, |
Akshay Kosigi | a870c61 | 2019-07-08 23:10:30 +0530 | [diff] [blame] | 1211 | dp_soc_to_cdp_soc(soc), |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1212 | cookie, |
| 1213 | CDP_TXRX_AST_DELETED); |
| 1214 | } |
Tallapragada Kalyan | 17254ed | 2019-06-14 18:13:51 +0530 | [diff] [blame] | 1215 | |
| 1216 | return QDF_STATUS_SUCCESS; |
Kiran Venkatappa | ed35f44 | 2018-07-19 22:22:29 +0530 | [diff] [blame] | 1217 | } |
Kiran Venkatappa | ed35f44 | 2018-07-19 22:22:29 +0530 | [diff] [blame] | 1218 | |
Chaithanya Garrepalli | 0323f80 | 2018-03-14 17:45:21 +0530 | [diff] [blame] | 1219 | struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1220 | uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1221 | { |
| 1222 | union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; |
| 1223 | unsigned index; |
| 1224 | struct dp_peer *peer; |
| 1225 | |
| 1226 | if (mac_addr_is_aligned) { |
| 1227 | mac_addr = (union dp_align_mac_addr *) peer_mac_addr; |
| 1228 | } else { |
| 1229 | qdf_mem_copy( |
| 1230 | &local_mac_addr_aligned.raw[0], |
Srinivas Girigowda | 2751b6d | 2019-02-27 12:28:13 -0800 | [diff] [blame] | 1231 | peer_mac_addr, QDF_MAC_ADDR_SIZE); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1232 | mac_addr = &local_mac_addr_aligned; |
| 1233 | } |
| 1234 | index = dp_peer_find_hash_index(soc, mac_addr); |
| 1235 | qdf_spin_lock_bh(&soc->peer_ref_mutex); |
| 1236 | TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1237 | if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && |
Pamidipati, Vijay | 3b0f916 | 2018-04-16 19:06:20 +0530 | [diff] [blame] | 1238 | ((peer->vdev->vdev_id == vdev_id) || |
| 1239 | (vdev_id == DP_VDEV_ALL))) { |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1240 | /* found it - increment the ref count before releasing |
| 1241 | * the lock |
| 1242 | */ |
| 1243 | qdf_atomic_inc(&peer->ref_cnt); |
| 1244 | qdf_spin_unlock_bh(&soc->peer_ref_mutex); |
| 1245 | return peer; |
| 1246 | } |
| 1247 | } |
| 1248 | qdf_spin_unlock_bh(&soc->peer_ref_mutex); |
| 1249 | return NULL; /* failure */ |
| 1250 | } |
| 1251 | |
| 1252 | void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer) |
| 1253 | { |
| 1254 | unsigned index; |
| 1255 | struct dp_peer *tmppeer = NULL; |
| 1256 | int found = 0; |
| 1257 | |
| 1258 | index = dp_peer_find_hash_index(soc, &peer->mac_addr); |
| 1259 | /* Check if tail is not empty before delete*/ |
| 1260 | QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index])); |
| 1261 | /* |
| 1262 | * DO NOT take the peer_ref_mutex lock here - it needs to be taken |
| 1263 | * by the caller. |
| 1264 | * The caller needs to hold the lock from the time the peer object's |
| 1265 | * reference count is decremented and tested up through the time the |
| 1266 | * reference to the peer object is removed from the hash table, by |
| 1267 | * this function. |
| 1268 | * Holding the lock only while removing the peer object reference |
| 1269 | * from the hash table keeps the hash table consistent, but does not |
| 1270 | * protect against a new HL tx context starting to use the peer object |
| 1271 | * if it looks up the peer object from its MAC address just after the |
| 1272 | * peer ref count is decremented to zero, but just before the peer |
| 1273 | * object reference is removed from the hash table. |
| 1274 | */ |
| 1275 | TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) { |
| 1276 | if (tmppeer == peer) { |
| 1277 | found = 1; |
| 1278 | break; |
| 1279 | } |
| 1280 | } |
| 1281 | QDF_ASSERT(found); |
| 1282 | TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem); |
| 1283 | } |
| 1284 | |
| 1285 | void dp_peer_find_hash_erase(struct dp_soc *soc) |
| 1286 | { |
| 1287 | int i; |
| 1288 | |
| 1289 | /* |
| 1290 | * Not really necessary to take peer_ref_mutex lock - by this point, |
| 1291 | * it's known that the soc is no longer in use. |
| 1292 | */ |
| 1293 | for (i = 0; i <= soc->peer_hash.mask; i++) { |
| 1294 | if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) { |
| 1295 | struct dp_peer *peer, *peer_next; |
| 1296 | |
| 1297 | /* |
| 1298 | * TAILQ_FOREACH_SAFE must be used here to avoid any |
| 1299 | * memory access violation after peer is freed |
| 1300 | */ |
| 1301 | TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i], |
| 1302 | hash_list_elem, peer_next) { |
| 1303 | /* |
| 1304 | * Don't remove the peer from the hash table - |
| 1305 | * that would modify the list we are currently |
| 1306 | * traversing, and it's not necessary anyway. |
| 1307 | */ |
| 1308 | /* |
| 1309 | * Artificially adjust the peer's ref count to |
| 1310 | * 1, so it will get deleted by |
| 1311 | * dp_peer_unref_delete. |
| 1312 | */ |
| 1313 | /* set to zero */ |
| 1314 | qdf_atomic_init(&peer->ref_cnt); |
| 1315 | /* incr to one */ |
| 1316 | qdf_atomic_inc(&peer->ref_cnt); |
| 1317 | dp_peer_unref_delete(peer); |
| 1318 | } |
| 1319 | } |
| 1320 | } |
| 1321 | } |
| 1322 | |
Tallapragada Kalyan | c741308 | 2019-03-07 21:22:10 +0530 | [diff] [blame] | 1323 | static void dp_peer_ast_table_detach(struct dp_soc *soc) |
| 1324 | { |
phadiman | b100750 | 2019-04-03 15:21:53 +0530 | [diff] [blame] | 1325 | if (soc->ast_table) { |
| 1326 | qdf_mem_free(soc->ast_table); |
| 1327 | soc->ast_table = NULL; |
| 1328 | } |
Tallapragada Kalyan | c741308 | 2019-03-07 21:22:10 +0530 | [diff] [blame] | 1329 | } |
| 1330 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1331 | static void dp_peer_find_map_detach(struct dp_soc *soc) |
| 1332 | { |
phadiman | b100750 | 2019-04-03 15:21:53 +0530 | [diff] [blame] | 1333 | if (soc->peer_id_to_obj_map) { |
| 1334 | qdf_mem_free(soc->peer_id_to_obj_map); |
| 1335 | soc->peer_id_to_obj_map = NULL; |
| 1336 | } |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1337 | } |
| 1338 | |
| 1339 | int dp_peer_find_attach(struct dp_soc *soc) |
| 1340 | { |
| 1341 | if (dp_peer_find_map_attach(soc)) |
| 1342 | return 1; |
| 1343 | |
| 1344 | if (dp_peer_find_hash_attach(soc)) { |
| 1345 | dp_peer_find_map_detach(soc); |
| 1346 | return 1; |
| 1347 | } |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 1348 | |
Tallapragada Kalyan | c741308 | 2019-03-07 21:22:10 +0530 | [diff] [blame] | 1349 | if (dp_peer_ast_table_attach(soc)) { |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 1350 | dp_peer_find_hash_detach(soc); |
| 1351 | dp_peer_find_map_detach(soc); |
| 1352 | return 1; |
| 1353 | } |
Tallapragada Kalyan | c741308 | 2019-03-07 21:22:10 +0530 | [diff] [blame] | 1354 | |
| 1355 | if (dp_peer_ast_hash_attach(soc)) { |
| 1356 | dp_peer_ast_table_detach(soc); |
| 1357 | dp_peer_find_hash_detach(soc); |
| 1358 | dp_peer_find_map_detach(soc); |
| 1359 | return 1; |
| 1360 | } |
| 1361 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1362 | return 0; /* success */ |
| 1363 | } |
| 1364 | |
Pratik Gandhi | 51b6b6d | 2017-09-18 15:02:43 +0530 | [diff] [blame] | 1365 | void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 1366 | union hal_reo_status *reo_status) |
| 1367 | { |
| 1368 | struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; |
| 1369 | struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); |
| 1370 | |
Debasis Das | 7a08136 | 2019-08-27 13:40:21 +0530 | [diff] [blame] | 1371 | if (queue_status->header.status == HAL_REO_CMD_DRAIN) |
| 1372 | return; |
| 1373 | |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 1374 | if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { |
Venkata Sharath Chandra Manchala | c61826c | 2019-05-14 22:24:25 -0700 | [diff] [blame] | 1375 | DP_PRINT_STATS("REO stats failure %d for TID %d\n", |
| 1376 | queue_status->header.status, rx_tid->tid); |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 1377 | return; |
| 1378 | } |
| 1379 | |
Venkata Sharath Chandra Manchala | c61826c | 2019-05-14 22:24:25 -0700 | [diff] [blame] | 1380 | DP_PRINT_STATS("REO queue stats (TID: %d):\n" |
| 1381 | "ssn: %d\n" |
| 1382 | "curr_idx : %d\n" |
| 1383 | "pn_31_0 : %08x\n" |
| 1384 | "pn_63_32 : %08x\n" |
| 1385 | "pn_95_64 : %08x\n" |
| 1386 | "pn_127_96 : %08x\n" |
| 1387 | "last_rx_enq_tstamp : %08x\n" |
| 1388 | "last_rx_deq_tstamp : %08x\n" |
| 1389 | "rx_bitmap_31_0 : %08x\n" |
| 1390 | "rx_bitmap_63_32 : %08x\n" |
| 1391 | "rx_bitmap_95_64 : %08x\n" |
| 1392 | "rx_bitmap_127_96 : %08x\n" |
| 1393 | "rx_bitmap_159_128 : %08x\n" |
| 1394 | "rx_bitmap_191_160 : %08x\n" |
| 1395 | "rx_bitmap_223_192 : %08x\n" |
| 1396 | "rx_bitmap_255_224 : %08x\n", |
| 1397 | rx_tid->tid, |
| 1398 | queue_status->ssn, queue_status->curr_idx, |
| 1399 | queue_status->pn_31_0, queue_status->pn_63_32, |
| 1400 | queue_status->pn_95_64, queue_status->pn_127_96, |
| 1401 | queue_status->last_rx_enq_tstamp, |
| 1402 | queue_status->last_rx_deq_tstamp, |
| 1403 | queue_status->rx_bitmap_31_0, |
| 1404 | queue_status->rx_bitmap_63_32, |
| 1405 | queue_status->rx_bitmap_95_64, |
| 1406 | queue_status->rx_bitmap_127_96, |
| 1407 | queue_status->rx_bitmap_159_128, |
| 1408 | queue_status->rx_bitmap_191_160, |
| 1409 | queue_status->rx_bitmap_223_192, |
| 1410 | queue_status->rx_bitmap_255_224); |
Karunakar Dasineni | 3da0811 | 2017-06-15 14:42:39 -0700 | [diff] [blame] | 1411 | |
Venkata Sharath Chandra Manchala | c61826c | 2019-05-14 22:24:25 -0700 | [diff] [blame] | 1412 | DP_PRINT_STATS( |
| 1413 | "curr_mpdu_cnt : %d\n" |
| 1414 | "curr_msdu_cnt : %d\n" |
| 1415 | "fwd_timeout_cnt : %d\n" |
| 1416 | "fwd_bar_cnt : %d\n" |
| 1417 | "dup_cnt : %d\n" |
| 1418 | "frms_in_order_cnt : %d\n" |
| 1419 | "bar_rcvd_cnt : %d\n" |
| 1420 | "mpdu_frms_cnt : %d\n" |
| 1421 | "msdu_frms_cnt : %d\n" |
| 1422 | "total_byte_cnt : %d\n" |
| 1423 | "late_recv_mpdu_cnt : %d\n" |
| 1424 | "win_jump_2k : %d\n" |
| 1425 | "hole_cnt : %d\n", |
| 1426 | queue_status->curr_mpdu_cnt, |
| 1427 | queue_status->curr_msdu_cnt, |
| 1428 | queue_status->fwd_timeout_cnt, |
| 1429 | queue_status->fwd_bar_cnt, |
| 1430 | queue_status->dup_cnt, |
| 1431 | queue_status->frms_in_order_cnt, |
| 1432 | queue_status->bar_rcvd_cnt, |
| 1433 | queue_status->mpdu_frms_cnt, |
| 1434 | queue_status->msdu_frms_cnt, |
| 1435 | queue_status->total_cnt, |
| 1436 | queue_status->late_recv_mpdu_cnt, |
| 1437 | queue_status->win_jump_2k, |
| 1438 | queue_status->hole_cnt); |
sumedh baikady | e3947bd | 2017-11-29 19:19:25 -0800 | [diff] [blame] | 1439 | |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 1440 | DP_PRINT_STATS("Addba Req : %d\n" |
| 1441 | "Addba Resp : %d\n" |
| 1442 | "Addba Resp success : %d\n" |
| 1443 | "Addba Resp failed : %d\n" |
| 1444 | "Delba Req received : %d\n" |
| 1445 | "Delba Tx success : %d\n" |
| 1446 | "Delba Tx Fail : %d\n" |
| 1447 | "BA window size : %d\n" |
| 1448 | "Pn size : %d\n", |
| 1449 | rx_tid->num_of_addba_req, |
| 1450 | rx_tid->num_of_addba_resp, |
| 1451 | rx_tid->num_addba_rsp_success, |
| 1452 | rx_tid->num_addba_rsp_failed, |
| 1453 | rx_tid->num_of_delba_req, |
| 1454 | rx_tid->delba_tx_success_cnt, |
| 1455 | rx_tid->delba_tx_fail_cnt, |
| 1456 | rx_tid->ba_win_size, |
| 1457 | rx_tid->pn_size); |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 1458 | } |
| 1459 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 1460 | static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc, |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 1461 | uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id, |
| 1462 | uint8_t vdev_id) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1463 | { |
| 1464 | struct dp_peer *peer; |
| 1465 | |
Chaithanya Garrepalli | 2f57279 | 2018-04-11 17:49:28 +0530 | [diff] [blame] | 1466 | QDF_ASSERT(peer_id <= soc->max_peers); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1467 | /* check if there's already a peer object with this MAC address */ |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1468 | peer = dp_peer_find_hash_find(soc, peer_mac_addr, |
| 1469 | 0 /* is aligned */, vdev_id); |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 1470 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
Shivani Soni | a5707a4 | 2020-01-08 16:42:08 +0530 | [diff] [blame] | 1471 | "%s: peer %pK ID %d vid %d mac %pM", |
| 1472 | __func__, peer, peer_id, vdev_id, peer_mac_addr); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1473 | |
| 1474 | if (peer) { |
| 1475 | /* peer's ref count was already incremented by |
| 1476 | * peer_find_hash_find |
| 1477 | */ |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 1478 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, |
Krishna Kumaar Natarajan | 71e5b83 | 2017-01-26 08:04:13 -0800 | [diff] [blame] | 1479 | "%s: ref_cnt: %d", __func__, |
| 1480 | qdf_atomic_read(&peer->ref_cnt)); |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1481 | if (!soc->peer_id_to_obj_map[peer_id]) |
| 1482 | soc->peer_id_to_obj_map[peer_id] = peer; |
| 1483 | else { |
| 1484 | /* Peer map event came for peer_id which |
| 1485 | * is already mapped, this is not expected |
| 1486 | */ |
| 1487 | QDF_ASSERT(0); |
| 1488 | } |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1489 | |
| 1490 | if (dp_peer_find_add_id_to_obj(peer, peer_id)) { |
| 1491 | /* TBDXXX: assert for now */ |
| 1492 | QDF_ASSERT(0); |
| 1493 | } |
| 1494 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 1495 | return peer; |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 1496 | } |
| 1497 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 1498 | return NULL; |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 1499 | } |
| 1500 | |
| 1501 | /** |
| 1502 | * dp_rx_peer_map_handler() - handle peer map event from firmware |
| 1503 | * @soc_handle - genereic soc handle |
| 1504 | * @peeri_id - peer_id from firmware |
| 1505 | * @hw_peer_id - ast index for this peer |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1506 | * @vdev_id - vdev ID |
| 1507 | * @peer_mac_addr - mac address of the peer |
| 1508 | * @ast_hash - ast hash value |
| 1509 | * @is_wds - flag to indicate peer map event for WDS ast entry |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 1510 | * |
| 1511 | * associate the peer_id that firmware provided with peer entry |
| 1512 | * and update the ast table in the host with the hw_peer_id. |
| 1513 | * |
| 1514 | * Return: none |
| 1515 | */ |
| 1516 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1517 | void |
Akshay Kosigi | 8a75314 | 2019-06-27 14:17:08 +0530 | [diff] [blame] | 1518 | dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1519 | uint16_t hw_peer_id, uint8_t vdev_id, |
| 1520 | uint8_t *peer_mac_addr, uint16_t ast_hash, |
| 1521 | uint8_t is_wds) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1522 | { |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 1523 | struct dp_peer *peer = NULL; |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1524 | enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC; |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 1525 | |
Shivani Soni | a5707a4 | 2020-01-08 16:42:08 +0530 | [diff] [blame] | 1526 | dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %pM, vdev_id %d", |
| 1527 | soc, peer_id, hw_peer_id, |
| 1528 | peer_mac_addr, vdev_id); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1529 | |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1530 | /* Peer map event for WDS ast entry get the peer from |
| 1531 | * obj map |
Tallapragada Kalyan | 6f6166e | 2017-02-17 17:00:23 +0530 | [diff] [blame] | 1532 | */ |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1533 | if (is_wds) { |
| 1534 | peer = soc->peer_id_to_obj_map[peer_id]; |
Tallapragada Kalyan | 17254ed | 2019-06-14 18:13:51 +0530 | [diff] [blame] | 1535 | /* |
| 1536 | * In certain cases like Auth attack on a repeater |
| 1537 | * can result in the number of ast_entries falling |
| 1538 | * in the same hash bucket to exceed the max_skid |
| 1539 | * length supported by HW in root AP. In these cases |
| 1540 | * the FW will return the hw_peer_id (ast_index) as |
| 1541 | * 0xffff indicating HW could not add the entry in |
| 1542 | * its table. Host has to delete the entry from its |
| 1543 | * table in these cases. |
| 1544 | */ |
| 1545 | if (hw_peer_id == HTT_INVALID_PEER) { |
| 1546 | DP_STATS_INC(soc, ast.map_err, 1); |
| 1547 | if (!dp_peer_ast_free_entry_by_mac(soc, |
| 1548 | peer, |
| 1549 | peer_mac_addr)) |
| 1550 | return; |
| 1551 | |
| 1552 | dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u", |
| 1553 | peer, peer->peer_ids[0], |
| 1554 | peer->mac_addr.raw, peer_mac_addr, vdev_id, |
| 1555 | is_wds); |
| 1556 | |
| 1557 | return; |
| 1558 | } |
| 1559 | |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1560 | } else { |
Tallapragada Kalyan | 17254ed | 2019-06-14 18:13:51 +0530 | [diff] [blame] | 1561 | /* |
| 1562 | * It's the responsibility of the CP and FW to ensure |
| 1563 | * that peer is created successfully. Ideally DP should |
| 1564 | * not hit the below condition for directly assocaited |
| 1565 | * peers. |
| 1566 | */ |
| 1567 | if ((hw_peer_id < 0) || |
| 1568 | (hw_peer_id >= |
| 1569 | wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { |
| 1570 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 1571 | "invalid hw_peer_id: %d", hw_peer_id); |
| 1572 | qdf_assert_always(0); |
| 1573 | } |
| 1574 | |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 1575 | peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id, |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1576 | hw_peer_id, vdev_id); |
Bharat Kumar M | 9a5d537 | 2017-05-08 17:41:42 +0530 | [diff] [blame] | 1577 | |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1578 | if (peer) { |
Chaitanya Kiran Godavarthi | 70aeda1 | 2019-02-01 17:32:48 +0530 | [diff] [blame] | 1579 | if (wlan_op_mode_sta == peer->vdev->opmode && |
| 1580 | qdf_mem_cmp(peer->mac_addr.raw, |
| 1581 | peer->vdev->mac_addr.raw, |
| 1582 | QDF_MAC_ADDR_SIZE) != 0) { |
| 1583 | dp_info("STA vdev bss_peer!!!!"); |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1584 | peer->bss_peer = 1; |
| 1585 | peer->vdev->vap_bss_peer = peer; |
Venkata Sharath Chandra Manchala | a12702b | 2020-01-17 14:46:19 -0800 | [diff] [blame] | 1586 | qdf_mem_copy(peer->vdev->vap_bss_peer_mac_addr, |
| 1587 | peer->mac_addr.raw, |
| 1588 | QDF_MAC_ADDR_SIZE); |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1589 | } |
| 1590 | |
Subhranil Choudhury | 5985716 | 2019-09-19 13:33:13 +0530 | [diff] [blame] | 1591 | if (peer->vdev->opmode == wlan_op_mode_sta) { |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1592 | peer->vdev->bss_ast_hash = ast_hash; |
Subhranil Choudhury | 5985716 | 2019-09-19 13:33:13 +0530 | [diff] [blame] | 1593 | peer->vdev->bss_ast_idx = hw_peer_id; |
| 1594 | } |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1595 | |
| 1596 | /* Add ast entry incase self ast entry is |
| 1597 | * deleted due to DP CP sync issue |
| 1598 | * |
| 1599 | * self_ast_entry is modified in peer create |
| 1600 | * and peer unmap path which cannot run in |
| 1601 | * parllel with peer map, no lock need before |
| 1602 | * referring it |
| 1603 | */ |
| 1604 | if (!peer->self_ast_entry) { |
Mohit Khanna | 0255314 | 2019-04-11 17:49:27 -0700 | [diff] [blame] | 1605 | dp_info("Add self ast from map %pM", |
| 1606 | peer_mac_addr); |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1607 | dp_peer_add_ast(soc, peer, |
| 1608 | peer_mac_addr, |
| 1609 | type, 0); |
| 1610 | } |
| 1611 | |
sumedh baikady | 68450ab | 2018-03-23 18:36:29 -0700 | [diff] [blame] | 1612 | } |
Anish Nataraj | 0dae676 | 2018-03-02 22:31:45 +0530 | [diff] [blame] | 1613 | } |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 1614 | dp_peer_map_ast(soc, peer, peer_mac_addr, |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1615 | hw_peer_id, vdev_id, ast_hash); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1616 | } |
| 1617 | |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1618 | /** |
| 1619 | * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware |
| 1620 | * @soc_handle - genereic soc handle |
| 1621 | * @peeri_id - peer_id from firmware |
| 1622 | * @vdev_id - vdev ID |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1623 | * @mac_addr - mac address of the peer or wds entry |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1624 | * @is_wds - flag to indicate peer map event for WDS ast entry |
| 1625 | * |
| 1626 | * Return: none |
| 1627 | */ |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1628 | void |
Akshay Kosigi | 8a75314 | 2019-06-27 14:17:08 +0530 | [diff] [blame] | 1629 | dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id, |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1630 | uint8_t vdev_id, uint8_t *mac_addr, |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 1631 | uint8_t is_wds) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1632 | { |
| 1633 | struct dp_peer *peer; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1634 | uint8_t i; |
Chaithanya Garrepalli | 974da26 | 2018-02-22 20:32:19 +0530 | [diff] [blame] | 1635 | |
| 1636 | peer = __dp_peer_find_by_id(soc, peer_id); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1637 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1638 | /* |
| 1639 | * Currently peer IDs are assigned for vdevs as well as peers. |
| 1640 | * If the peer ID is for a vdev, then the peer pointer stored |
| 1641 | * in peer_id_to_obj_map will be NULL. |
| 1642 | */ |
Chaithanya Garrepalli | 974da26 | 2018-02-22 20:32:19 +0530 | [diff] [blame] | 1643 | if (!peer) { |
Mohit Khanna | 0255314 | 2019-04-11 17:49:27 -0700 | [diff] [blame] | 1644 | dp_err("Received unmap event for invalid peer_id %u", peer_id); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1645 | return; |
Chaithanya Garrepalli | 974da26 | 2018-02-22 20:32:19 +0530 | [diff] [blame] | 1646 | } |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1647 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1648 | /* If V2 Peer map messages are enabled AST entry has to be freed here |
| 1649 | */ |
Radha Krishna Simha Jiguru | 64b4848 | 2019-12-23 17:09:41 +0530 | [diff] [blame] | 1650 | if (is_wds) { |
Tallapragada Kalyan | 17254ed | 2019-06-14 18:13:51 +0530 | [diff] [blame] | 1651 | if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr)) |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1652 | return; |
Pavankumar Nandeshwar | 1ab908e | 2019-01-24 12:53:13 +0530 | [diff] [blame] | 1653 | |
Mohit Khanna | 0255314 | 2019-04-11 17:49:27 -0700 | [diff] [blame] | 1654 | dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u", |
| 1655 | peer, peer->peer_ids[0], |
| 1656 | peer->mac_addr.raw, mac_addr, vdev_id, |
| 1657 | is_wds); |
Pavankumar Nandeshwar | 1ab908e | 2019-01-24 12:53:13 +0530 | [diff] [blame] | 1658 | |
| 1659 | return; |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1660 | } |
| 1661 | |
Mohit Khanna | 0255314 | 2019-04-11 17:49:27 -0700 | [diff] [blame] | 1662 | dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK", |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 1663 | soc, peer_id, peer); |
| 1664 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1665 | soc->peer_id_to_obj_map[peer_id] = NULL; |
| 1666 | for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { |
| 1667 | if (peer->peer_ids[i] == peer_id) { |
| 1668 | peer->peer_ids[i] = HTT_INVALID_PEER; |
| 1669 | break; |
| 1670 | } |
| 1671 | } |
| 1672 | |
Mainak Sen | d13ed3e | 2019-12-24 14:52:01 +0530 | [diff] [blame] | 1673 | /* |
| 1674 | * Reset ast flow mapping table |
| 1675 | */ |
| 1676 | dp_peer_reset_flowq_map(peer); |
| 1677 | |
Bharat Kumar M | 9a5d537 | 2017-05-08 17:41:42 +0530 | [diff] [blame] | 1678 | if (soc->cdp_soc.ol_ops->peer_unmap_event) { |
Sathyanarayanan Esakkiappan | 38c6f98 | 2017-12-05 12:00:31 +0530 | [diff] [blame] | 1679 | soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc, |
Subhranil Choudhury | 9bcfecf | 2019-02-28 13:41:45 +0530 | [diff] [blame] | 1680 | peer_id, vdev_id); |
Bharat Kumar M | 9a5d537 | 2017-05-08 17:41:42 +0530 | [diff] [blame] | 1681 | } |
| 1682 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1683 | /* |
| 1684 | * Remove a reference to the peer. |
| 1685 | * If there are no more references, delete the peer object. |
| 1686 | */ |
| 1687 | dp_peer_unref_delete(peer); |
| 1688 | } |
| 1689 | |
| 1690 | void |
| 1691 | dp_peer_find_detach(struct dp_soc *soc) |
| 1692 | { |
| 1693 | dp_peer_find_map_detach(soc); |
| 1694 | dp_peer_find_hash_detach(soc); |
Pamidipati, Vijay | b8bbf16 | 2017-06-26 23:47:39 +0530 | [diff] [blame] | 1695 | dp_peer_ast_hash_detach(soc); |
Tallapragada Kalyan | c741308 | 2019-03-07 21:22:10 +0530 | [diff] [blame] | 1696 | dp_peer_ast_table_detach(soc); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1697 | } |
| 1698 | |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 1699 | static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt, |
| 1700 | union hal_reo_status *reo_status) |
| 1701 | { |
| 1702 | struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 1703 | |
Karunakar Dasineni | 31b98d4 | 2018-02-27 23:05:08 -0800 | [diff] [blame] | 1704 | if ((reo_status->rx_queue_status.header.status != |
| 1705 | HAL_REO_CMD_SUCCESS) && |
| 1706 | (reo_status->rx_queue_status.header.status != |
| 1707 | HAL_REO_CMD_DRAIN)) { |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 1708 | /* Should not happen normally. Just print error for now */ |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 1709 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 1710 | "%s: Rx tid HW desc update failed(%d): tid %d", |
| 1711 | __func__, |
| 1712 | reo_status->rx_queue_status.header.status, |
| 1713 | rx_tid->tid); |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 1714 | } |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 1715 | } |
| 1716 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1717 | /* |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 1718 | * dp_find_peer_by_addr - find peer instance by mac address |
| 1719 | * @dev: physical device instance |
| 1720 | * @peer_mac_addr: peer mac address |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 1721 | * |
| 1722 | * Return: peer instance pointer |
| 1723 | */ |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 1724 | void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr) |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 1725 | { |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 1726 | struct dp_pdev *pdev = (struct dp_pdev *)dev; |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 1727 | struct dp_peer *peer; |
| 1728 | |
Pamidipati, Vijay | 3b0f916 | 2018-04-16 19:06:20 +0530 | [diff] [blame] | 1729 | peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL); |
Chaithanya Garrepalli | 0323f80 | 2018-03-14 17:45:21 +0530 | [diff] [blame] | 1730 | |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 1731 | if (!peer) |
| 1732 | return NULL; |
| 1733 | |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 1734 | dp_verbose_debug("peer %pK mac: %pM", peer, |
| 1735 | peer->mac_addr.raw); |
Krishna Kumaar Natarajan | 71e5b83 | 2017-01-26 08:04:13 -0800 | [diff] [blame] | 1736 | |
| 1737 | /* ref_cnt is incremented inside dp_peer_find_hash_find(). |
| 1738 | * Decrement it here. |
| 1739 | */ |
Chaithanya Garrepalli | 7c8cf12 | 2018-09-07 19:23:52 +0530 | [diff] [blame] | 1740 | dp_peer_unref_delete(peer); |
Krishna Kumaar Natarajan | 71e5b83 | 2017-01-26 08:04:13 -0800 | [diff] [blame] | 1741 | |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 1742 | return peer; |
| 1743 | } |
| 1744 | |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1745 | static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer) |
| 1746 | { |
| 1747 | struct ol_if_ops *ol_ops = NULL; |
| 1748 | bool is_roaming = false; |
| 1749 | uint8_t vdev_id = -1; |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 1750 | struct cdp_soc_t *soc; |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1751 | |
| 1752 | if (!peer) { |
| 1753 | dp_info("Peer is NULL. No roaming possible"); |
| 1754 | return false; |
| 1755 | } |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 1756 | |
| 1757 | soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc); |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1758 | ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops; |
| 1759 | |
| 1760 | if (ol_ops && ol_ops->is_roam_inprogress) { |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 1761 | dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id); |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1762 | is_roaming = ol_ops->is_roam_inprogress(vdev_id); |
| 1763 | } |
| 1764 | |
| 1765 | dp_info("peer: %pM, vdev_id: %d, is_roaming: %d", |
| 1766 | peer->mac_addr.raw, vdev_id, is_roaming); |
| 1767 | |
| 1768 | return is_roaming; |
| 1769 | } |
| 1770 | |
Mohit Khanna | 82382b3 | 2019-12-09 19:15:27 -0800 | [diff] [blame] | 1771 | QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1772 | ba_window_size, uint32_t start_seq) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1773 | { |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 1774 | struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; |
| 1775 | struct dp_soc *soc = peer->vdev->pdev->soc; |
| 1776 | struct hal_reo_cmd_params params; |
| 1777 | |
| 1778 | qdf_mem_zero(¶ms, sizeof(params)); |
| 1779 | |
| 1780 | params.std.need_status = 1; |
| 1781 | params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; |
| 1782 | params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; |
| 1783 | params.u.upd_queue_params.update_ba_window_size = 1; |
| 1784 | params.u.upd_queue_params.ba_window_size = ba_window_size; |
| 1785 | |
| 1786 | if (start_seq < IEEE80211_SEQ_MAX) { |
| 1787 | params.u.upd_queue_params.update_ssn = 1; |
| 1788 | params.u.upd_queue_params.ssn = start_seq; |
sumedh baikady | c0bd0be | 2019-08-08 17:52:24 -0700 | [diff] [blame] | 1789 | } else { |
| 1790 | dp_set_ssn_valid_flag(¶ms, 0); |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 1791 | } |
Rakesh Pillai | ae0f601 | 2020-01-02 11:03:09 +0530 | [diff] [blame] | 1792 | |
| 1793 | if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, |
| 1794 | dp_rx_tid_update_cb, rx_tid)) { |
| 1795 | dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE"); |
| 1796 | DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); |
| 1797 | } |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 1798 | |
| 1799 | rx_tid->ba_win_size = ba_window_size; |
Gyanranjan Hazarika | 7f9c050 | 2018-07-25 23:26:16 -0700 | [diff] [blame] | 1800 | |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1801 | if (dp_get_peer_vdev_roaming_in_progress(peer)) |
| 1802 | return QDF_STATUS_E_PERM; |
| 1803 | |
| 1804 | if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) |
Rakesh Pillai | 9498cd7 | 2019-04-05 18:43:47 +0530 | [diff] [blame] | 1805 | soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1806 | soc->ctrl_psoc, peer->vdev->pdev->pdev_id, |
Rakesh Pillai | 9498cd7 | 2019-04-05 18:43:47 +0530 | [diff] [blame] | 1807 | peer->vdev->vdev_id, peer->mac_addr.raw, |
| 1808 | rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size); |
Sravan Kumar Kairam | 4f6b8f5 | 2019-03-18 14:53:06 +0530 | [diff] [blame] | 1809 | |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1810 | return QDF_STATUS_SUCCESS; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1811 | } |
| 1812 | |
| 1813 | /* |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 1814 | * dp_reo_desc_free() - Callback free reo descriptor memory after |
| 1815 | * HW cache flush |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 1816 | * |
| 1817 | * @soc: DP SOC handle |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 1818 | * @cb_ctxt: Callback context |
| 1819 | * @reo_status: REO command status |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 1820 | */ |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 1821 | static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt, |
| 1822 | union hal_reo_status *reo_status) |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 1823 | { |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 1824 | struct reo_desc_list_node *freedesc = |
| 1825 | (struct reo_desc_list_node *)cb_ctxt; |
| 1826 | struct dp_rx_tid *rx_tid = &freedesc->rx_tid; |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 1827 | |
Karunakar Dasineni | 31b98d4 | 2018-02-27 23:05:08 -0800 | [diff] [blame] | 1828 | if ((reo_status->fl_cache_status.header.status != |
| 1829 | HAL_REO_CMD_SUCCESS) && |
| 1830 | (reo_status->fl_cache_status.header.status != |
| 1831 | HAL_REO_CMD_DRAIN)) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 1832 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 1833 | "%s: Rx tid HW desc flush failed(%d): tid %d", |
| 1834 | __func__, |
| 1835 | reo_status->rx_queue_status.header.status, |
| 1836 | freedesc->rx_tid.tid); |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 1837 | } |
chenguo | 8df4d46 | 2018-12-19 16:33:14 +0800 | [diff] [blame] | 1838 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, |
| 1839 | "%s: hw_qdesc_paddr: %pK, tid:%d", __func__, |
| 1840 | (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid); |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 1841 | qdf_mem_unmap_nbytes_single(soc->osdev, |
| 1842 | rx_tid->hw_qdesc_paddr, |
| 1843 | QDF_DMA_BIDIRECTIONAL, |
| 1844 | rx_tid->hw_qdesc_alloc_size); |
| 1845 | qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); |
| 1846 | qdf_mem_free(freedesc); |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 1847 | } |
| 1848 | |
Nandha Kishore Easwaran | b7c1884 | 2019-12-24 10:36:37 +0530 | [diff] [blame] | 1849 | #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 1850 | /* Hawkeye emulation requires bus address to be >= 0x50000000 */ |
| 1851 | static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) |
| 1852 | { |
| 1853 | if (dma_addr < 0x50000000) |
| 1854 | return QDF_STATUS_E_FAILURE; |
| 1855 | else |
| 1856 | return QDF_STATUS_SUCCESS; |
| 1857 | } |
| 1858 | #else |
| 1859 | static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) |
| 1860 | { |
| 1861 | return QDF_STATUS_SUCCESS; |
| 1862 | } |
| 1863 | #endif |
| 1864 | |
| 1865 | |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 1866 | /* |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1867 | * dp_rx_tid_setup_wifi3() – Setup receive TID state |
| 1868 | * @peer: Datapath peer handle |
| 1869 | * @tid: TID |
| 1870 | * @ba_window_size: BlockAck window size |
| 1871 | * @start_seq: Starting sequence number |
| 1872 | * |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1873 | * Return: QDF_STATUS code |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1874 | */ |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1875 | QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, |
| 1876 | uint32_t ba_window_size, uint32_t start_seq) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1877 | { |
| 1878 | struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; |
| 1879 | struct dp_vdev *vdev = peer->vdev; |
| 1880 | struct dp_soc *soc = vdev->pdev->soc; |
| 1881 | uint32_t hw_qdesc_size; |
| 1882 | uint32_t hw_qdesc_align; |
| 1883 | int hal_pn_type; |
| 1884 | void *hw_qdesc_vaddr; |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 1885 | uint32_t alloc_tries = 0; |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 1886 | QDF_STATUS err = QDF_STATUS_SUCCESS; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1887 | |
Tallapragada Kalyan | 8c93d5d | 2018-05-28 05:02:53 +0530 | [diff] [blame] | 1888 | if (peer->delete_in_progress || |
| 1889 | !qdf_atomic_read(&peer->is_default_route_set)) |
Karunakar Dasineni | 372647d | 2018-01-15 22:27:39 -0800 | [diff] [blame] | 1890 | return QDF_STATUS_E_FAILURE; |
| 1891 | |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 1892 | rx_tid->ba_win_size = ba_window_size; |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 1893 | if (rx_tid->hw_qdesc_vaddr_unaligned) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1894 | return dp_rx_tid_update_wifi3(peer, tid, ba_window_size, |
| 1895 | start_seq); |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 1896 | rx_tid->delba_tx_status = 0; |
| 1897 | rx_tid->ppdu_id_2k = 0; |
sumedh baikady | e3947bd | 2017-11-29 19:19:25 -0800 | [diff] [blame] | 1898 | rx_tid->num_of_addba_req = 0; |
| 1899 | rx_tid->num_of_delba_req = 0; |
| 1900 | rx_tid->num_of_addba_resp = 0; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 1901 | rx_tid->num_addba_rsp_failed = 0; |
| 1902 | rx_tid->num_addba_rsp_success = 0; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 1903 | rx_tid->delba_tx_success_cnt = 0; |
| 1904 | rx_tid->delba_tx_fail_cnt = 0; |
| 1905 | rx_tid->statuscode = 0; |
Karunakar Dasineni | 26ebbe4 | 2018-05-31 07:59:10 -0700 | [diff] [blame] | 1906 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1907 | /* TODO: Allocating HW queue descriptors based on max BA window size |
| 1908 | * for all QOS TIDs so that same descriptor can be used later when |
| 1909 | * ADDBA request is recevied. This should be changed to allocate HW |
| 1910 | * queue descriptors based on BA window size being negotiated (0 for |
| 1911 | * non BA cases), and reallocate when BA window size changes and also |
| 1912 | * send WMI message to FW to change the REO queue descriptor in Rx |
| 1913 | * peer entry as part of dp_rx_tid_update. |
| 1914 | */ |
| 1915 | if (tid != DP_NON_QOS_TID) |
| 1916 | hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, |
Karunakar Dasineni | 26ebbe4 | 2018-05-31 07:59:10 -0700 | [diff] [blame] | 1917 | HAL_RX_MAX_BA_WINDOW, tid); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1918 | else |
| 1919 | hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, |
Karunakar Dasineni | 26ebbe4 | 2018-05-31 07:59:10 -0700 | [diff] [blame] | 1920 | ba_window_size, tid); |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 1921 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1922 | hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc); |
| 1923 | /* To avoid unnecessary extra allocation for alignment, try allocating |
| 1924 | * exact size and see if we already have aligned address. |
| 1925 | */ |
| 1926 | rx_tid->hw_qdesc_alloc_size = hw_qdesc_size; |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 1927 | |
| 1928 | try_desc_alloc: |
| 1929 | rx_tid->hw_qdesc_vaddr_unaligned = |
| 1930 | qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1931 | |
| 1932 | if (!rx_tid->hw_qdesc_vaddr_unaligned) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 1933 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 1934 | "%s: Rx tid HW desc alloc failed: tid %d", |
| 1935 | __func__, tid); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1936 | return QDF_STATUS_E_NOMEM; |
| 1937 | } |
| 1938 | |
| 1939 | if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) % |
| 1940 | hw_qdesc_align) { |
| 1941 | /* Address allocated above is not alinged. Allocate extra |
| 1942 | * memory for alignment |
| 1943 | */ |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 1944 | qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 1945 | rx_tid->hw_qdesc_vaddr_unaligned = |
Pramod Simha | 6b23f75 | 2017-03-30 11:54:18 -0700 | [diff] [blame] | 1946 | qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size + |
| 1947 | hw_qdesc_align - 1); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1948 | |
| 1949 | if (!rx_tid->hw_qdesc_vaddr_unaligned) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 1950 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 1951 | "%s: Rx tid HW desc alloc failed: tid %d", |
| 1952 | __func__, tid); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1953 | return QDF_STATUS_E_NOMEM; |
| 1954 | } |
| 1955 | |
Pramod Simha | 6b23f75 | 2017-03-30 11:54:18 -0700 | [diff] [blame] | 1956 | hw_qdesc_vaddr = (void *)qdf_align((unsigned long) |
| 1957 | rx_tid->hw_qdesc_vaddr_unaligned, |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1958 | hw_qdesc_align); |
Pramod Simha | 6b23f75 | 2017-03-30 11:54:18 -0700 | [diff] [blame] | 1959 | |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 1960 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 1961 | "%s: Total Size %d Aligned Addr %pK", |
| 1962 | __func__, rx_tid->hw_qdesc_alloc_size, |
| 1963 | hw_qdesc_vaddr); |
Pramod Simha | 6b23f75 | 2017-03-30 11:54:18 -0700 | [diff] [blame] | 1964 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1965 | } else { |
| 1966 | hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1967 | } |
| 1968 | |
| 1969 | /* TODO: Ensure that sec_type is set before ADDBA is received. |
| 1970 | * Currently this is set based on htt indication |
| 1971 | * HTT_T2H_MSG_TYPE_SEC_IND from target |
| 1972 | */ |
| 1973 | switch (peer->security[dp_sec_ucast].sec_type) { |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 1974 | case cdp_sec_type_tkip_nomic: |
| 1975 | case cdp_sec_type_aes_ccmp: |
| 1976 | case cdp_sec_type_aes_ccmp_256: |
| 1977 | case cdp_sec_type_aes_gcmp: |
| 1978 | case cdp_sec_type_aes_gcmp_256: |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1979 | hal_pn_type = HAL_PN_WPA; |
| 1980 | break; |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 1981 | case cdp_sec_type_wapi: |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 1982 | if (vdev->opmode == wlan_op_mode_ap) |
| 1983 | hal_pn_type = HAL_PN_WAPI_EVEN; |
| 1984 | else |
| 1985 | hal_pn_type = HAL_PN_WAPI_UNEVEN; |
| 1986 | break; |
| 1987 | default: |
| 1988 | hal_pn_type = HAL_PN_NONE; |
| 1989 | break; |
| 1990 | } |
| 1991 | |
| 1992 | hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq, |
| 1993 | hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type); |
| 1994 | |
Pramod Simha | 6b23f75 | 2017-03-30 11:54:18 -0700 | [diff] [blame] | 1995 | qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr, |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 1996 | QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size, |
Pramod Simha | 6b23f75 | 2017-03-30 11:54:18 -0700 | [diff] [blame] | 1997 | &(rx_tid->hw_qdesc_paddr)); |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 1998 | |
Pramod Simha | 6b23f75 | 2017-03-30 11:54:18 -0700 | [diff] [blame] | 1999 | if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) != |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 2000 | QDF_STATUS_SUCCESS) { |
nobelj | fdfe7ea | 2018-06-19 18:08:25 -0700 | [diff] [blame] | 2001 | if (alloc_tries++ < 10) { |
| 2002 | qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); |
| 2003 | rx_tid->hw_qdesc_vaddr_unaligned = NULL; |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 2004 | goto try_desc_alloc; |
nobelj | fdfe7ea | 2018-06-19 18:08:25 -0700 | [diff] [blame] | 2005 | } else { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2006 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 2007 | "%s: Rx tid HW desc alloc failed (lowmem): tid %d", |
| 2008 | __func__, tid); |
nobelj | fdfe7ea | 2018-06-19 18:08:25 -0700 | [diff] [blame] | 2009 | err = QDF_STATUS_E_NOMEM; |
| 2010 | goto error; |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 2011 | } |
| 2012 | } |
| 2013 | |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 2014 | if (dp_get_peer_vdev_roaming_in_progress(peer)) { |
| 2015 | err = QDF_STATUS_E_PERM; |
| 2016 | goto error; |
| 2017 | } |
| 2018 | |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 2019 | if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { |
nobelj | fdfe7ea | 2018-06-19 18:08:25 -0700 | [diff] [blame] | 2020 | if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 2021 | soc->ctrl_psoc, |
| 2022 | peer->vdev->pdev->pdev_id, |
| 2023 | peer->vdev->vdev_id, |
nobelj | fdfe7ea | 2018-06-19 18:08:25 -0700 | [diff] [blame] | 2024 | peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid, |
| 2025 | 1, ba_window_size)) { |
| 2026 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 2027 | "%s: Failed to send reo queue setup to FW - tid %d\n", |
| 2028 | __func__, tid); |
| 2029 | err = QDF_STATUS_E_FAILURE; |
| 2030 | goto error; |
| 2031 | } |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2032 | } |
| 2033 | return 0; |
nobelj | fdfe7ea | 2018-06-19 18:08:25 -0700 | [diff] [blame] | 2034 | error: |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 2035 | if (rx_tid->hw_qdesc_vaddr_unaligned) { |
nobelj | fdfe7ea | 2018-06-19 18:08:25 -0700 | [diff] [blame] | 2036 | if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) == |
| 2037 | QDF_STATUS_SUCCESS) |
| 2038 | qdf_mem_unmap_nbytes_single( |
| 2039 | soc->osdev, |
| 2040 | rx_tid->hw_qdesc_paddr, |
| 2041 | QDF_DMA_BIDIRECTIONAL, |
| 2042 | rx_tid->hw_qdesc_alloc_size); |
| 2043 | qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); |
| 2044 | rx_tid->hw_qdesc_vaddr_unaligned = NULL; |
| 2045 | } |
| 2046 | return err; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2047 | } |
| 2048 | |
Nisha Menon | 98c4dd4 | 2019-11-01 18:54:19 -0700 | [diff] [blame] | 2049 | #ifdef REO_DESC_DEFER_FREE |
| 2050 | /* |
| 2051 | * dp_reo_desc_clean_up() - If cmd to flush base desc fails add |
| 2052 | * desc back to freelist and defer the deletion |
| 2053 | * |
| 2054 | * @soc: DP SOC handle |
| 2055 | * @desc: Base descriptor to be freed |
| 2056 | * @reo_status: REO command status |
| 2057 | */ |
| 2058 | static void dp_reo_desc_clean_up(struct dp_soc *soc, |
| 2059 | struct reo_desc_list_node *desc, |
| 2060 | union hal_reo_status *reo_status) |
| 2061 | { |
| 2062 | desc->free_ts = qdf_get_system_timestamp(); |
| 2063 | DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); |
| 2064 | qdf_list_insert_back(&soc->reo_desc_freelist, |
| 2065 | (qdf_list_node_t *)desc); |
| 2066 | } |
| 2067 | |
| 2068 | #else |
| 2069 | /* |
| 2070 | * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush |
| 2071 | * cache fails free the base REO desc anyway |
| 2072 | * |
| 2073 | * @soc: DP SOC handle |
| 2074 | * @desc: Base descriptor to be freed |
| 2075 | * @reo_status: REO command status |
| 2076 | */ |
| 2077 | static void dp_reo_desc_clean_up(struct dp_soc *soc, |
| 2078 | struct reo_desc_list_node *desc, |
| 2079 | union hal_reo_status *reo_status) |
| 2080 | { |
| 2081 | if (reo_status) { |
| 2082 | qdf_mem_zero(reo_status, sizeof(*reo_status)); |
| 2083 | reo_status->fl_cache_status.header.status = 0; |
| 2084 | dp_reo_desc_free(soc, (void *)desc, reo_status); |
| 2085 | } |
| 2086 | } |
| 2087 | #endif |
| 2088 | |
| 2089 | /* |
| 2090 | * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE |
| 2091 | * cmd and re-insert desc into free list if send fails. |
| 2092 | * |
| 2093 | * @soc: DP SOC handle |
| 2094 | * @desc: desc with resend update cmd flag set |
| 2095 | * @rx_tid: Desc RX tid associated with update cmd for resetting |
| 2096 | * valid field to 0 in h/w |
| 2097 | */ |
| 2098 | static void dp_resend_update_reo_cmd(struct dp_soc *soc, |
| 2099 | struct reo_desc_list_node *desc, |
| 2100 | struct dp_rx_tid *rx_tid) |
| 2101 | { |
| 2102 | struct hal_reo_cmd_params params; |
| 2103 | |
| 2104 | qdf_mem_zero(¶ms, sizeof(params)); |
| 2105 | params.std.need_status = 1; |
| 2106 | params.std.addr_lo = |
| 2107 | rx_tid->hw_qdesc_paddr & 0xffffffff; |
| 2108 | params.std.addr_hi = |
| 2109 | (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; |
| 2110 | params.u.upd_queue_params.update_vld = 1; |
| 2111 | params.u.upd_queue_params.vld = 0; |
| 2112 | desc->resend_update_reo_cmd = false; |
| 2113 | /* |
| 2114 | * If the cmd send fails then set resend_update_reo_cmd flag |
| 2115 | * and insert the desc at the end of the free list to retry. |
| 2116 | */ |
| 2117 | if (dp_reo_send_cmd(soc, |
| 2118 | CMD_UPDATE_RX_REO_QUEUE, |
| 2119 | ¶ms, |
| 2120 | dp_rx_tid_delete_cb, |
| 2121 | (void *)desc) |
| 2122 | != QDF_STATUS_SUCCESS) { |
| 2123 | desc->resend_update_reo_cmd = true; |
| 2124 | desc->free_ts = qdf_get_system_timestamp(); |
| 2125 | qdf_list_insert_back(&soc->reo_desc_freelist, |
| 2126 | (qdf_list_node_t *)desc); |
Rakesh Pillai | ae0f601 | 2020-01-02 11:03:09 +0530 | [diff] [blame] | 2127 | dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE"); |
Nisha Menon | 98c4dd4 | 2019-11-01 18:54:19 -0700 | [diff] [blame] | 2128 | DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); |
| 2129 | } |
| 2130 | } |
| 2131 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2132 | /* |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2133 | * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache |
| 2134 | * after deleting the entries (ie., setting valid=0) |
| 2135 | * |
| 2136 | * @soc: DP SOC handle |
| 2137 | * @cb_ctxt: Callback context |
| 2138 | * @reo_status: REO command status |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2139 | */ |
Nisha Menon | 98c4dd4 | 2019-11-01 18:54:19 -0700 | [diff] [blame] | 2140 | void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt, |
| 2141 | union hal_reo_status *reo_status) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2142 | { |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 2143 | struct reo_desc_list_node *freedesc = |
| 2144 | (struct reo_desc_list_node *)cb_ctxt; |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2145 | uint32_t list_size; |
| 2146 | struct reo_desc_list_node *desc; |
| 2147 | unsigned long curr_ts = qdf_get_system_timestamp(); |
| 2148 | uint32_t desc_size, tot_desc_size; |
| 2149 | struct hal_reo_cmd_params params; |
| 2150 | |
Karunakar Dasineni | 31b98d4 | 2018-02-27 23:05:08 -0800 | [diff] [blame] | 2151 | if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) { |
| 2152 | qdf_mem_zero(reo_status, sizeof(*reo_status)); |
| 2153 | reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN; |
| 2154 | dp_reo_desc_free(soc, (void *)freedesc, reo_status); |
| 2155 | return; |
| 2156 | } else if (reo_status->rx_queue_status.header.status != |
| 2157 | HAL_REO_CMD_SUCCESS) { |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2158 | /* Should not happen normally. Just print error for now */ |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2159 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 2160 | "%s: Rx tid HW desc deletion failed(%d): tid %d", |
| 2161 | __func__, |
| 2162 | reo_status->rx_queue_status.header.status, |
| 2163 | freedesc->rx_tid.tid); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2164 | } |
| 2165 | |
Houston Hoffman | 41b912c | 2017-08-30 14:27:51 -0700 | [diff] [blame] | 2166 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 2167 | "%s: rx_tid: %d status: %d", __func__, |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 2168 | freedesc->rx_tid.tid, |
| 2169 | reo_status->rx_queue_status.header.status); |
Krishna Kumaar Natarajan | 1741dc4 | 2017-01-26 19:24:48 -0800 | [diff] [blame] | 2170 | |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2171 | qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); |
| 2172 | freedesc->free_ts = curr_ts; |
| 2173 | qdf_list_insert_back_size(&soc->reo_desc_freelist, |
| 2174 | (qdf_list_node_t *)freedesc, &list_size); |
| 2175 | |
| 2176 | while ((qdf_list_peek_front(&soc->reo_desc_freelist, |
| 2177 | (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) && |
| 2178 | ((list_size >= REO_DESC_FREELIST_SIZE) || |
Nisha Menon | 98c4dd4 | 2019-11-01 18:54:19 -0700 | [diff] [blame] | 2179 | (curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) || |
| 2180 | (desc->resend_update_reo_cmd && list_size))) { |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2181 | struct dp_rx_tid *rx_tid; |
| 2182 | |
| 2183 | qdf_list_remove_front(&soc->reo_desc_freelist, |
| 2184 | (qdf_list_node_t **)&desc); |
| 2185 | list_size--; |
| 2186 | rx_tid = &desc->rx_tid; |
| 2187 | |
Nisha Menon | 98c4dd4 | 2019-11-01 18:54:19 -0700 | [diff] [blame] | 2188 | /* First process descs with resend_update_reo_cmd set */ |
| 2189 | if (desc->resend_update_reo_cmd) { |
| 2190 | dp_resend_update_reo_cmd(soc, desc, rx_tid); |
| 2191 | continue; |
| 2192 | } |
| 2193 | |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2194 | /* Flush and invalidate REO descriptor from HW cache: Base and |
| 2195 | * extension descriptors should be flushed separately */ |
Karunakar Dasineni | 26ebbe4 | 2018-05-31 07:59:10 -0700 | [diff] [blame] | 2196 | tot_desc_size = rx_tid->hw_qdesc_alloc_size; |
| 2197 | /* Get base descriptor size by passing non-qos TID */ |
| 2198 | desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0, |
| 2199 | DP_NON_QOS_TID); |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2200 | |
| 2201 | /* Flush reo extension descriptors */ |
| 2202 | while ((tot_desc_size -= desc_size) > 0) { |
| 2203 | qdf_mem_zero(¶ms, sizeof(params)); |
| 2204 | params.std.addr_lo = |
| 2205 | ((uint64_t)(rx_tid->hw_qdesc_paddr) + |
| 2206 | tot_desc_size) & 0xffffffff; |
| 2207 | params.std.addr_hi = |
| 2208 | (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; |
| 2209 | |
| 2210 | if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, |
| 2211 | CMD_FLUSH_CACHE, |
| 2212 | ¶ms, |
| 2213 | NULL, |
| 2214 | NULL)) { |
Rakesh Pillai | ae0f601 | 2020-01-02 11:03:09 +0530 | [diff] [blame] | 2215 | dp_err_rl("fail to send CMD_CACHE_FLUSH:" |
| 2216 | "tid %d desc %pK", rx_tid->tid, |
| 2217 | (void *)(rx_tid->hw_qdesc_paddr)); |
| 2218 | DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2219 | } |
| 2220 | } |
| 2221 | |
| 2222 | /* Flush base descriptor */ |
| 2223 | qdf_mem_zero(¶ms, sizeof(params)); |
| 2224 | params.std.need_status = 1; |
| 2225 | params.std.addr_lo = |
| 2226 | (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff; |
| 2227 | params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; |
| 2228 | |
| 2229 | if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, |
| 2230 | CMD_FLUSH_CACHE, |
| 2231 | ¶ms, |
| 2232 | dp_reo_desc_free, |
| 2233 | (void *)desc)) { |
| 2234 | union hal_reo_status reo_status; |
| 2235 | /* |
| 2236 | * If dp_reo_send_cmd return failure, related TID queue desc |
| 2237 | * should be unmapped. Also locally reo_desc, together with |
| 2238 | * TID queue desc also need to be freed accordingly. |
| 2239 | * |
| 2240 | * Here invoke desc_free function directly to do clean up. |
Nisha Menon | 98c4dd4 | 2019-11-01 18:54:19 -0700 | [diff] [blame] | 2241 | * |
| 2242 | * In case of MCL path add the desc back to the free |
| 2243 | * desc list and defer deletion. |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2244 | */ |
Venkata Sharath Chandra Manchala | ea6518b | 2019-10-25 18:03:25 -0700 | [diff] [blame] | 2245 | dp_err_log("%s: fail to send REO cmd to flush cache: tid %d", |
| 2246 | __func__, rx_tid->tid); |
Nisha Menon | 98c4dd4 | 2019-11-01 18:54:19 -0700 | [diff] [blame] | 2247 | dp_reo_desc_clean_up(soc, desc, &reo_status); |
Rakesh Pillai | ae0f601 | 2020-01-02 11:03:09 +0530 | [diff] [blame] | 2248 | DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2249 | } |
| 2250 | } |
| 2251 | qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2252 | } |
| 2253 | |
| 2254 | /* |
| 2255 | * dp_rx_tid_delete_wifi3() – Delete receive TID queue |
| 2256 | * @peer: Datapath peer handle |
| 2257 | * @tid: TID |
| 2258 | * |
| 2259 | * Return: 0 on success, error code on failure |
| 2260 | */ |
Jeff Johnson | 416168b | 2017-01-06 09:42:43 -0800 | [diff] [blame] | 2261 | static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2262 | { |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 2263 | struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]); |
| 2264 | struct dp_soc *soc = peer->vdev->pdev->soc; |
| 2265 | struct hal_reo_cmd_params params; |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 2266 | struct reo_desc_list_node *freedesc = |
| 2267 | qdf_mem_malloc(sizeof(*freedesc)); |
Lin Bai | fca7640 | 2017-12-11 15:03:49 +0800 | [diff] [blame] | 2268 | |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 2269 | if (!freedesc) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2270 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 2271 | "%s: malloc failed for freedesc: tid %d", |
| 2272 | __func__, tid); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 2273 | return -ENOMEM; |
| 2274 | } |
| 2275 | |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 2276 | freedesc->rx_tid = *rx_tid; |
Nisha Menon | 98c4dd4 | 2019-11-01 18:54:19 -0700 | [diff] [blame] | 2277 | freedesc->resend_update_reo_cmd = false; |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 2278 | |
| 2279 | qdf_mem_zero(¶ms, sizeof(params)); |
| 2280 | |
Karunakar Dasineni | 6a52675 | 2018-08-02 08:56:19 -0700 | [diff] [blame] | 2281 | params.std.need_status = 1; |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 2282 | params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; |
| 2283 | params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; |
| 2284 | params.u.upd_queue_params.update_vld = 1; |
| 2285 | params.u.upd_queue_params.vld = 0; |
| 2286 | |
Nisha Menon | 98c4dd4 | 2019-11-01 18:54:19 -0700 | [diff] [blame] | 2287 | if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, |
| 2288 | dp_rx_tid_delete_cb, (void *)freedesc) |
| 2289 | != QDF_STATUS_SUCCESS) { |
| 2290 | /* Defer the clean up to the call back context */ |
| 2291 | qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); |
| 2292 | freedesc->free_ts = qdf_get_system_timestamp(); |
| 2293 | freedesc->resend_update_reo_cmd = true; |
| 2294 | qdf_list_insert_front(&soc->reo_desc_freelist, |
| 2295 | (qdf_list_node_t *)freedesc); |
| 2296 | DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); |
| 2297 | qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); |
| 2298 | dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE"); |
| 2299 | } |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 2300 | |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 2301 | rx_tid->hw_qdesc_vaddr_unaligned = NULL; |
| 2302 | rx_tid->hw_qdesc_alloc_size = 0; |
Karunakar Dasineni | 8bebb00 | 2017-02-09 22:15:23 -0800 | [diff] [blame] | 2303 | rx_tid->hw_qdesc_paddr = 0; |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 2304 | |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 2305 | return 0; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2306 | } |
| 2307 | |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 2308 | #ifdef DP_LFR |
| 2309 | static void dp_peer_setup_remaining_tids(struct dp_peer *peer) |
| 2310 | { |
| 2311 | int tid; |
| 2312 | |
| 2313 | for (tid = 1; tid < DP_MAX_TIDS-1; tid++) { |
| 2314 | dp_rx_tid_setup_wifi3(peer, tid, 1, 0); |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2315 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 2316 | "Setting up TID %d for peer %pK peer->local_id %d", |
| 2317 | tid, peer, peer->local_id); |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 2318 | } |
| 2319 | } |
| 2320 | #else |
| 2321 | static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {}; |
| 2322 | #endif |
nobelj | debe2b3 | 2019-04-23 11:18:47 -0700 | [diff] [blame] | 2323 | |
| 2324 | #ifndef WLAN_TX_PKT_CAPTURE_ENH |
| 2325 | /* |
| 2326 | * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID |
| 2327 | * @peer: Datapath peer |
| 2328 | * |
| 2329 | */ |
| 2330 | static inline void dp_peer_tid_queue_init(struct dp_peer *peer) |
| 2331 | { |
| 2332 | } |
| 2333 | |
| 2334 | /* |
| 2335 | * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID |
| 2336 | * @peer: Datapath peer |
| 2337 | * |
| 2338 | */ |
| 2339 | static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer) |
| 2340 | { |
| 2341 | } |
| 2342 | |
| 2343 | /* |
| 2344 | * dp_peer_update_80211_hdr() – dp peer update 80211 hdr |
| 2345 | * @vdev: Datapath vdev |
| 2346 | * @peer: Datapath peer |
| 2347 | * |
| 2348 | */ |
| 2349 | static inline void |
| 2350 | dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer) |
| 2351 | { |
| 2352 | } |
| 2353 | #endif |
| 2354 | |
| 2355 | /* |
| 2356 | * dp_peer_tx_init() – Initialize receive TID state |
| 2357 | * @pdev: Datapath pdev |
| 2358 | * @peer: Datapath peer |
| 2359 | * |
| 2360 | */ |
| 2361 | void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer) |
| 2362 | { |
| 2363 | dp_peer_tid_queue_init(peer); |
| 2364 | dp_peer_update_80211_hdr(peer->vdev, peer); |
| 2365 | } |
| 2366 | |
| 2367 | /* |
| 2368 | * dp_peer_tx_cleanup() – Deinitialize receive TID state |
| 2369 | * @vdev: Datapath vdev |
| 2370 | * @peer: Datapath peer |
| 2371 | * |
| 2372 | */ |
| 2373 | static inline void |
| 2374 | dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) |
| 2375 | { |
| 2376 | dp_peer_tid_queue_cleanup(peer); |
| 2377 | } |
| 2378 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2379 | /* |
| 2380 | * dp_peer_rx_init() – Initialize receive TID state |
| 2381 | * @pdev: Datapath pdev |
| 2382 | * @peer: Datapath peer |
| 2383 | * |
| 2384 | */ |
| 2385 | void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer) |
| 2386 | { |
| 2387 | int tid; |
| 2388 | struct dp_rx_tid *rx_tid; |
| 2389 | for (tid = 0; tid < DP_MAX_TIDS; tid++) { |
| 2390 | rx_tid = &peer->rx_tid[tid]; |
| 2391 | rx_tid->array = &rx_tid->base; |
| 2392 | rx_tid->base.head = rx_tid->base.tail = NULL; |
| 2393 | rx_tid->tid = tid; |
| 2394 | rx_tid->defrag_timeout_ms = 0; |
| 2395 | rx_tid->ba_win_size = 0; |
| 2396 | rx_tid->ba_status = DP_RX_BA_INACTIVE; |
| 2397 | |
| 2398 | rx_tid->defrag_waitlist_elem.tqe_next = NULL; |
| 2399 | rx_tid->defrag_waitlist_elem.tqe_prev = NULL; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2400 | } |
| 2401 | |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2402 | peer->active_ba_session_cnt = 0; |
| 2403 | peer->hw_buffer_size = 0; |
| 2404 | peer->kill_256_sessions = 0; |
| 2405 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2406 | /* Setup default (non-qos) rx tid queue */ |
| 2407 | dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0); |
Karunakar Dasineni | ed1de12 | 2016-08-02 11:57:59 -0700 | [diff] [blame] | 2408 | |
Karunakar Dasineni | a0f09ea | 2016-11-21 17:41:31 -0800 | [diff] [blame] | 2409 | /* Setup rx tid queue for TID 0. |
| 2410 | * Other queues will be setup on receiving first packet, which will cause |
| 2411 | * NULL REO queue error |
| 2412 | */ |
| 2413 | dp_rx_tid_setup_wifi3(peer, 0, 1, 0); |
| 2414 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2415 | /* |
Pramod Simha | b17d067 | 2017-03-06 17:20:13 -0800 | [diff] [blame] | 2416 | * Setup the rest of TID's to handle LFR |
| 2417 | */ |
| 2418 | dp_peer_setup_remaining_tids(peer); |
| 2419 | |
| 2420 | /* |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2421 | * Set security defaults: no PN check, no security. The target may |
| 2422 | * send a HTT SEC_IND message to overwrite these defaults. |
| 2423 | */ |
| 2424 | peer->security[dp_sec_ucast].sec_type = |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 2425 | peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2426 | } |
| 2427 | |
| 2428 | /* |
| 2429 | * dp_peer_rx_cleanup() – Cleanup receive TID state |
| 2430 | * @vdev: Datapath vdev |
| 2431 | * @peer: Datapath peer |
Sravan Kumar Kairam | 1e8591a | 2019-08-07 20:06:52 +0530 | [diff] [blame] | 2432 | * @reuse: Peer reference reuse |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2433 | * |
| 2434 | */ |
Sravan Kumar Kairam | 1e8591a | 2019-08-07 20:06:52 +0530 | [diff] [blame] | 2435 | void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2436 | { |
| 2437 | int tid; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2438 | uint32_t tid_delete_mask = 0; |
Venkata Sharath Chandra Manchala | 7f30b27 | 2018-08-22 16:04:19 -0700 | [diff] [blame] | 2439 | |
Saket Jha | ce8c310 | 2019-10-10 19:48:09 -0700 | [diff] [blame] | 2440 | dp_info("Remove tids for peer: %pK", peer); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2441 | for (tid = 0; tid < DP_MAX_TIDS; tid++) { |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2442 | struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; |
| 2443 | |
| 2444 | qdf_spin_lock_bh(&rx_tid->tid_lock); |
Saket Jha | ce8c310 | 2019-10-10 19:48:09 -0700 | [diff] [blame] | 2445 | if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) { |
Lin Bai | f1c577e | 2018-05-22 20:45:42 +0800 | [diff] [blame] | 2446 | /* Cleanup defrag related resource */ |
| 2447 | dp_rx_defrag_waitlist_remove(peer, tid); |
| 2448 | dp_rx_reorder_flush_frag(peer, tid); |
Karunakar Dasineni | f8ec0cb | 2019-01-29 13:07:05 -0800 | [diff] [blame] | 2449 | } |
| 2450 | |
| 2451 | if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) { |
| 2452 | dp_rx_tid_delete_wifi3(peer, tid); |
Lin Bai | f1c577e | 2018-05-22 20:45:42 +0800 | [diff] [blame] | 2453 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2454 | tid_delete_mask |= (1 << tid); |
| 2455 | } |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2456 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2457 | } |
| 2458 | #ifdef notyet /* See if FW can remove queues as part of peer cleanup */ |
| 2459 | if (soc->ol_ops->peer_rx_reorder_queue_remove) { |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 2460 | soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc, |
| 2461 | peer->vdev->pdev->pdev_id, |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2462 | peer->vdev->vdev_id, peer->mac_addr.raw, |
| 2463 | tid_delete_mask); |
| 2464 | } |
| 2465 | #endif |
Sravan Kumar Kairam | 1e8591a | 2019-08-07 20:06:52 +0530 | [diff] [blame] | 2466 | if (!reuse) |
| 2467 | for (tid = 0; tid < DP_MAX_TIDS; tid++) |
| 2468 | qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2469 | } |
| 2470 | |
nobelj | 7b0e273 | 2019-05-31 00:19:07 -0700 | [diff] [blame] | 2471 | #ifdef FEATURE_PERPKT_INFO |
| 2472 | /* |
| 2473 | * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer |
| 2474 | * @peer: Datapath peer |
| 2475 | * |
| 2476 | * return: void |
| 2477 | */ |
| 2478 | void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) |
| 2479 | { |
| 2480 | qdf_mem_zero(&peer->delayed_ba_ppdu_stats, |
| 2481 | sizeof(struct cdp_delayed_tx_completion_ppdu_user)); |
| 2482 | peer->last_delayed_ba = false; |
| 2483 | peer->last_delayed_ba_ppduid = 0; |
| 2484 | } |
| 2485 | #else |
| 2486 | /* |
| 2487 | * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer |
| 2488 | * @peer: Datapath peer |
| 2489 | * |
| 2490 | * return: void |
| 2491 | */ |
| 2492 | void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) |
| 2493 | { |
| 2494 | } |
| 2495 | #endif |
| 2496 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2497 | /* |
Krishna Kumaar Natarajan | d684ba2 | 2017-01-25 15:48:43 -0800 | [diff] [blame] | 2498 | * dp_peer_cleanup() – Cleanup peer information |
| 2499 | * @vdev: Datapath vdev |
| 2500 | * @peer: Datapath peer |
Sravan Kumar Kairam | 1e8591a | 2019-08-07 20:06:52 +0530 | [diff] [blame] | 2501 | * @reuse: Peer reference reuse |
Krishna Kumaar Natarajan | d684ba2 | 2017-01-25 15:48:43 -0800 | [diff] [blame] | 2502 | * |
| 2503 | */ |
Sravan Kumar Kairam | 1e8591a | 2019-08-07 20:06:52 +0530 | [diff] [blame] | 2504 | void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse) |
Krishna Kumaar Natarajan | d684ba2 | 2017-01-25 15:48:43 -0800 | [diff] [blame] | 2505 | { |
nobelj | debe2b3 | 2019-04-23 11:18:47 -0700 | [diff] [blame] | 2506 | dp_peer_tx_cleanup(vdev, peer); |
| 2507 | |
Krishna Kumaar Natarajan | d684ba2 | 2017-01-25 15:48:43 -0800 | [diff] [blame] | 2508 | /* cleanup the Rx reorder queues for this peer */ |
Sravan Kumar Kairam | 1e8591a | 2019-08-07 20:06:52 +0530 | [diff] [blame] | 2509 | dp_peer_rx_cleanup(vdev, peer, reuse); |
Krishna Kumaar Natarajan | d684ba2 | 2017-01-25 15:48:43 -0800 | [diff] [blame] | 2510 | } |
| 2511 | |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2512 | /* dp_teardown_256_ba_session() - Teardown sessions using 256 |
| 2513 | * window size when a request with |
| 2514 | * 64 window size is received. |
| 2515 | * This is done as a WAR since HW can |
| 2516 | * have only one setting per peer (64 or 256). |
sumedh baikady | 61cbe85 | 2018-10-09 11:04:34 -0700 | [diff] [blame] | 2517 | * For HKv2, we use per tid buffersize setting |
| 2518 | * for 0 to per_tid_basize_max_tid. For tid |
| 2519 | * more than per_tid_basize_max_tid we use HKv1 |
| 2520 | * method. |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2521 | * @peer: Datapath peer |
| 2522 | * |
| 2523 | * Return: void |
| 2524 | */ |
| 2525 | static void dp_teardown_256_ba_sessions(struct dp_peer *peer) |
| 2526 | { |
| 2527 | uint8_t delba_rcode = 0; |
| 2528 | int tid; |
| 2529 | struct dp_rx_tid *rx_tid = NULL; |
| 2530 | |
sumedh baikady | 61cbe85 | 2018-10-09 11:04:34 -0700 | [diff] [blame] | 2531 | tid = peer->vdev->pdev->soc->per_tid_basize_max_tid; |
| 2532 | for (; tid < DP_MAX_TIDS; tid++) { |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2533 | rx_tid = &peer->rx_tid[tid]; |
| 2534 | qdf_spin_lock_bh(&rx_tid->tid_lock); |
| 2535 | |
| 2536 | if (rx_tid->ba_win_size <= 64) { |
| 2537 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
| 2538 | continue; |
| 2539 | } else { |
| 2540 | if (rx_tid->ba_status == DP_RX_BA_ACTIVE || |
| 2541 | rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { |
| 2542 | /* send delba */ |
| 2543 | if (!rx_tid->delba_tx_status) { |
| 2544 | rx_tid->delba_tx_retry++; |
| 2545 | rx_tid->delba_tx_status = 1; |
| 2546 | rx_tid->delba_rcode = |
| 2547 | IEEE80211_REASON_QOS_SETUP_REQUIRED; |
| 2548 | delba_rcode = rx_tid->delba_rcode; |
| 2549 | |
| 2550 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Venkata Sharath Chandra Manchala | a6c0470 | 2019-06-20 15:27:58 -0700 | [diff] [blame] | 2551 | if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba) |
| 2552 | peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( |
Pavankumar Nandeshwar | 715fdc3 | 2019-10-03 20:51:01 +0530 | [diff] [blame] | 2553 | peer->vdev->pdev->soc->ctrl_psoc, |
| 2554 | peer->vdev->vdev_id, |
| 2555 | peer->mac_addr.raw, |
| 2556 | tid, delba_rcode); |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2557 | } else { |
| 2558 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
| 2559 | } |
| 2560 | } else { |
| 2561 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
| 2562 | } |
| 2563 | } |
| 2564 | } |
| 2565 | } |
| 2566 | |
Krishna Kumaar Natarajan | d684ba2 | 2017-01-25 15:48:43 -0800 | [diff] [blame] | 2567 | /* |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2568 | * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2569 | * |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2570 | * @soc: Datapath soc handle |
| 2571 | * @peer_mac: Datapath peer mac address |
| 2572 | * @vdev_id: id of atapath vdev |
Karunakar Dasineni | ed1de12 | 2016-08-02 11:57:59 -0700 | [diff] [blame] | 2573 | * @tid: TID number |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2574 | * @status: tx completion status |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2575 | * Return: 0 on success, error code on failure |
| 2576 | */ |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2577 | int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, |
| 2578 | uint8_t *peer_mac, |
| 2579 | uint16_t vdev_id, |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2580 | uint8_t tid, int status) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2581 | { |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2582 | struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, |
| 2583 | peer_mac, 0, vdev_id); |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2584 | struct dp_rx_tid *rx_tid = NULL; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2585 | |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2586 | if (!peer || peer->delete_in_progress) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2587 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2588 | "%s: Peer is NULL!\n", __func__); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2589 | goto fail; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2590 | } |
| 2591 | rx_tid = &peer->rx_tid[tid]; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2592 | qdf_spin_lock_bh(&rx_tid->tid_lock); |
| 2593 | if (status) { |
| 2594 | rx_tid->num_addba_rsp_failed++; |
Tiger Yu | 1e974a9 | 2019-12-09 10:38:08 +0800 | [diff] [blame] | 2595 | dp_rx_tid_update_wifi3(peer, tid, 1, |
| 2596 | IEEE80211_SEQ_MAX); |
| 2597 | rx_tid->ba_status = DP_RX_BA_INACTIVE; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2598 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Varun Reddy Yeturu | 8119e12 | 2019-07-27 14:14:02 -0700 | [diff] [blame] | 2599 | dp_err("RxTid- %d addba rsp tx completion failed", tid); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2600 | |
| 2601 | goto success; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2602 | } |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2603 | |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2604 | rx_tid->num_addba_rsp_success++; |
| 2605 | if (rx_tid->ba_status == DP_RX_BA_INACTIVE) { |
| 2606 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2607 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2608 | "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS", |
| 2609 | __func__, tid); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2610 | goto fail; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2611 | } |
| 2612 | |
Tallapragada Kalyan | 8c93d5d | 2018-05-28 05:02:53 +0530 | [diff] [blame] | 2613 | if (!qdf_atomic_read(&peer->is_default_route_set)) { |
| 2614 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2615 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
Tallapragada Kalyan | 8c93d5d | 2018-05-28 05:02:53 +0530 | [diff] [blame] | 2616 | "%s: default route is not set for peer: %pM", |
| 2617 | __func__, peer->mac_addr.raw); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2618 | goto fail; |
Tallapragada Kalyan | 8c93d5d | 2018-05-28 05:02:53 +0530 | [diff] [blame] | 2619 | } |
| 2620 | |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2621 | /* First Session */ |
| 2622 | if (peer->active_ba_session_cnt == 0) { |
| 2623 | if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256) |
| 2624 | peer->hw_buffer_size = 256; |
| 2625 | else |
| 2626 | peer->hw_buffer_size = 64; |
| 2627 | } |
| 2628 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2629 | rx_tid->ba_status = DP_RX_BA_ACTIVE; |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2630 | |
| 2631 | peer->active_ba_session_cnt++; |
| 2632 | |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2633 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2634 | |
| 2635 | /* Kill any session having 256 buffer size |
| 2636 | * when 64 buffer size request is received. |
| 2637 | * Also, latch on to 64 as new buffer size. |
| 2638 | */ |
| 2639 | if (peer->kill_256_sessions) { |
| 2640 | dp_teardown_256_ba_sessions(peer); |
| 2641 | peer->kill_256_sessions = 0; |
| 2642 | } |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2643 | |
| 2644 | success: |
| 2645 | dp_peer_unref_delete(peer); |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2646 | return QDF_STATUS_SUCCESS; |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2647 | |
| 2648 | fail: |
| 2649 | if (peer) |
| 2650 | dp_peer_unref_delete(peer); |
| 2651 | |
| 2652 | return QDF_STATUS_E_FAILURE; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2653 | } |
| 2654 | |
| 2655 | /* |
| 2656 | * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer |
| 2657 | * |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2658 | * @soc: Datapath soc handle |
| 2659 | * @peer_mac: Datapath peer mac address |
| 2660 | * @vdev_id: id of atapath vdev |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2661 | * @tid: TID number |
| 2662 | * @dialogtoken: output dialogtoken |
| 2663 | * @statuscode: output dialogtoken |
Jeff Johnson | ff2dfb2 | 2018-05-12 10:27:57 -0700 | [diff] [blame] | 2664 | * @buffersize: Output BA window size |
| 2665 | * @batimeout: Output BA timeout |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2666 | */ |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2667 | QDF_STATUS |
| 2668 | dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, |
| 2669 | uint16_t vdev_id, uint8_t tid, |
| 2670 | uint8_t *dialogtoken, uint16_t *statuscode, |
| 2671 | uint16_t *buffersize, uint16_t *batimeout) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2672 | { |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2673 | struct dp_rx_tid *rx_tid = NULL; |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2674 | QDF_STATUS status = QDF_STATUS_SUCCESS; |
| 2675 | struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, |
| 2676 | peer_mac, 0, vdev_id); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2677 | |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2678 | if (!peer || peer->delete_in_progress) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2679 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2680 | "%s: Peer is NULL!\n", __func__); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2681 | status = QDF_STATUS_E_FAILURE; |
| 2682 | goto fail; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2683 | } |
| 2684 | rx_tid = &peer->rx_tid[tid]; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2685 | qdf_spin_lock_bh(&rx_tid->tid_lock); |
sumedh baikady | e3947bd | 2017-11-29 19:19:25 -0800 | [diff] [blame] | 2686 | rx_tid->num_of_addba_resp++; |
Jeff Johnson | 97a1cc5 | 2018-05-06 15:28:56 -0700 | [diff] [blame] | 2687 | /* setup ADDBA response parameters */ |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2688 | *dialogtoken = rx_tid->dialogtoken; |
| 2689 | *statuscode = rx_tid->statuscode; |
Karunakar Dasineni | ed1de12 | 2016-08-02 11:57:59 -0700 | [diff] [blame] | 2690 | *buffersize = rx_tid->ba_win_size; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2691 | *batimeout = 0; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2692 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2693 | |
| 2694 | fail: |
| 2695 | if (peer) |
| 2696 | dp_peer_unref_delete(peer); |
| 2697 | |
| 2698 | return status; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2699 | } |
| 2700 | |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2701 | /* dp_check_ba_buffersize() - Check buffer size in request |
| 2702 | * and latch onto this size based on |
| 2703 | * size used in first active session. |
| 2704 | * @peer: Datapath peer |
| 2705 | * @tid: Tid |
| 2706 | * @buffersize: Block ack window size |
| 2707 | * |
| 2708 | * Return: void |
| 2709 | */ |
| 2710 | static void dp_check_ba_buffersize(struct dp_peer *peer, |
| 2711 | uint16_t tid, |
| 2712 | uint16_t buffersize) |
| 2713 | { |
| 2714 | struct dp_rx_tid *rx_tid = NULL; |
| 2715 | |
| 2716 | rx_tid = &peer->rx_tid[tid]; |
sumedh baikady | 61cbe85 | 2018-10-09 11:04:34 -0700 | [diff] [blame] | 2717 | if (peer->vdev->pdev->soc->per_tid_basize_max_tid && |
| 2718 | tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) { |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2719 | rx_tid->ba_win_size = buffersize; |
sumedh baikady | 61cbe85 | 2018-10-09 11:04:34 -0700 | [diff] [blame] | 2720 | return; |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2721 | } else { |
sumedh baikady | 61cbe85 | 2018-10-09 11:04:34 -0700 | [diff] [blame] | 2722 | if (peer->active_ba_session_cnt == 0) { |
| 2723 | rx_tid->ba_win_size = buffersize; |
| 2724 | } else { |
| 2725 | if (peer->hw_buffer_size == 64) { |
| 2726 | if (buffersize <= 64) |
| 2727 | rx_tid->ba_win_size = buffersize; |
| 2728 | else |
| 2729 | rx_tid->ba_win_size = peer->hw_buffer_size; |
| 2730 | } else if (peer->hw_buffer_size == 256) { |
| 2731 | if (buffersize > 64) { |
| 2732 | rx_tid->ba_win_size = buffersize; |
| 2733 | } else { |
| 2734 | rx_tid->ba_win_size = buffersize; |
| 2735 | peer->hw_buffer_size = 64; |
| 2736 | peer->kill_256_sessions = 1; |
| 2737 | } |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2738 | } |
| 2739 | } |
| 2740 | } |
| 2741 | } |
| 2742 | |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2743 | /* |
| 2744 | * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer |
| 2745 | * |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2746 | * @soc: Datapath soc handle |
| 2747 | * @peer_mac: Datapath peer mac address |
| 2748 | * @vdev_id: id of atapath vdev |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2749 | * @dialogtoken: dialogtoken from ADDBA frame |
| 2750 | * @tid: TID number |
| 2751 | * @batimeout: BA timeout |
| 2752 | * @buffersize: BA window size |
| 2753 | * @startseqnum: Start seq. number received in BA sequence control |
| 2754 | * |
| 2755 | * Return: 0 on success, error code on failure |
| 2756 | */ |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2757 | int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc, |
| 2758 | uint8_t *peer_mac, |
| 2759 | uint16_t vdev_id, |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2760 | uint8_t dialogtoken, |
| 2761 | uint16_t tid, uint16_t batimeout, |
| 2762 | uint16_t buffersize, |
| 2763 | uint16_t startseqnum) |
| 2764 | { |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2765 | QDF_STATUS status = QDF_STATUS_SUCCESS; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2766 | struct dp_rx_tid *rx_tid = NULL; |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2767 | struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, |
| 2768 | peer_mac, 0, vdev_id); |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2769 | |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2770 | if (!peer || peer->delete_in_progress) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2771 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2772 | "%s: Peer is NULL!\n", __func__); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2773 | status = QDF_STATUS_E_FAILURE; |
| 2774 | goto fail; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2775 | } |
| 2776 | rx_tid = &peer->rx_tid[tid]; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2777 | qdf_spin_lock_bh(&rx_tid->tid_lock); |
| 2778 | rx_tid->num_of_addba_req++; |
| 2779 | if ((rx_tid->ba_status == DP_RX_BA_ACTIVE && |
sumedh baikady | 6d66c7e | 2019-02-28 15:20:18 -0800 | [diff] [blame] | 2780 | rx_tid->hw_qdesc_vaddr_unaligned)) { |
sumedh baikady | c773848 | 2019-04-02 18:14:46 -0700 | [diff] [blame] | 2781 | dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2782 | rx_tid->ba_status = DP_RX_BA_INACTIVE; |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2783 | peer->active_ba_session_cnt--; |
sumedh baikady | c773848 | 2019-04-02 18:14:46 -0700 | [diff] [blame] | 2784 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2785 | "%s: Rx Tid- %d hw qdesc is already setup", |
| 2786 | __func__, tid); |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2787 | } |
| 2788 | |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2789 | if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { |
| 2790 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2791 | status = QDF_STATUS_E_FAILURE; |
| 2792 | goto fail; |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2793 | } |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2794 | dp_check_ba_buffersize(peer, tid, buffersize); |
| 2795 | |
sumedh baikady | 61cbe85 | 2018-10-09 11:04:34 -0700 | [diff] [blame] | 2796 | if (dp_rx_tid_setup_wifi3(peer, tid, |
| 2797 | rx_tid->ba_win_size, startseqnum)) { |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2798 | rx_tid->ba_status = DP_RX_BA_INACTIVE; |
| 2799 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2800 | status = QDF_STATUS_E_FAILURE; |
| 2801 | goto fail; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2802 | } |
| 2803 | rx_tid->ba_status = DP_RX_BA_IN_PROGRESS; |
| 2804 | |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2805 | rx_tid->dialogtoken = dialogtoken; |
| 2806 | rx_tid->startseqnum = startseqnum; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2807 | |
| 2808 | if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS) |
| 2809 | rx_tid->statuscode = rx_tid->userstatuscode; |
| 2810 | else |
| 2811 | rx_tid->statuscode = IEEE80211_STATUS_SUCCESS; |
| 2812 | |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2813 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2814 | |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2815 | fail: |
| 2816 | if (peer) |
| 2817 | dp_peer_unref_delete(peer); |
| 2818 | |
| 2819 | return status; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2820 | } |
| 2821 | |
| 2822 | /* |
Gyanranjan Hazarika | 99a58d3 | 2017-12-22 21:56:17 -0800 | [diff] [blame] | 2823 | * dp_set_addba_response() – Set a user defined ADDBA response status code |
| 2824 | * |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2825 | * @soc: Datapath soc handle |
| 2826 | * @peer_mac: Datapath peer mac address |
| 2827 | * @vdev_id: id of atapath vdev |
Gyanranjan Hazarika | 99a58d3 | 2017-12-22 21:56:17 -0800 | [diff] [blame] | 2828 | * @tid: TID number |
| 2829 | * @statuscode: response status code to be set |
| 2830 | */ |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2831 | QDF_STATUS |
| 2832 | dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, |
| 2833 | uint16_t vdev_id, uint8_t tid, uint16_t statuscode) |
Gyanranjan Hazarika | 99a58d3 | 2017-12-22 21:56:17 -0800 | [diff] [blame] | 2834 | { |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2835 | struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, |
| 2836 | peer_mac, 0, vdev_id); |
| 2837 | struct dp_rx_tid *rx_tid; |
| 2838 | QDF_STATUS status = QDF_STATUS_SUCCESS; |
Gyanranjan Hazarika | 99a58d3 | 2017-12-22 21:56:17 -0800 | [diff] [blame] | 2839 | |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2840 | if (!peer || peer->delete_in_progress) { |
| 2841 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 2842 | "%s: Peer is NULL!\n", __func__); |
| 2843 | status = QDF_STATUS_E_FAILURE; |
| 2844 | goto fail; |
| 2845 | } |
| 2846 | |
| 2847 | rx_tid = &peer->rx_tid[tid]; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2848 | qdf_spin_lock_bh(&rx_tid->tid_lock); |
Gyanranjan Hazarika | 99a58d3 | 2017-12-22 21:56:17 -0800 | [diff] [blame] | 2849 | rx_tid->userstatuscode = statuscode; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2850 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2851 | fail: |
| 2852 | if (peer) |
| 2853 | dp_peer_unref_delete(peer); |
| 2854 | |
| 2855 | return status; |
Gyanranjan Hazarika | 99a58d3 | 2017-12-22 21:56:17 -0800 | [diff] [blame] | 2856 | } |
| 2857 | |
| 2858 | /* |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2859 | * dp_rx_delba_process_wifi3() – Process DELBA from peer |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2860 | * @soc: Datapath soc handle |
| 2861 | * @peer_mac: Datapath peer mac address |
| 2862 | * @vdev_id: id of atapath vdev |
Karunakar Dasineni | ed1de12 | 2016-08-02 11:57:59 -0700 | [diff] [blame] | 2863 | * @tid: TID number |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2864 | * @reasoncode: Reason code received in DELBA frame |
| 2865 | * |
| 2866 | * Return: 0 on success, error code on failure |
| 2867 | */ |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2868 | int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, |
| 2869 | uint16_t vdev_id, int tid, uint16_t reasoncode) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2870 | { |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2871 | QDF_STATUS status = QDF_STATUS_SUCCESS; |
| 2872 | struct dp_rx_tid *rx_tid; |
| 2873 | struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, |
| 2874 | peer_mac, 0, vdev_id); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2875 | |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2876 | if (!peer || peer->delete_in_progress) { |
| 2877 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 2878 | "%s: Peer is NULL!\n", __func__); |
| 2879 | status = QDF_STATUS_E_FAILURE; |
| 2880 | goto fail; |
| 2881 | } |
| 2882 | rx_tid = &peer->rx_tid[tid]; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2883 | qdf_spin_lock_bh(&rx_tid->tid_lock); |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2884 | if (rx_tid->ba_status == DP_RX_BA_INACTIVE || |
| 2885 | rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2886 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2887 | status = QDF_STATUS_E_FAILURE; |
| 2888 | goto fail; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2889 | } |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2890 | /* TODO: See if we can delete the existing REO queue descriptor and |
| 2891 | * replace with a new one without queue extenstion descript to save |
| 2892 | * memory |
| 2893 | */ |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2894 | rx_tid->delba_rcode = reasoncode; |
sumedh baikady | e3947bd | 2017-11-29 19:19:25 -0800 | [diff] [blame] | 2895 | rx_tid->num_of_delba_req++; |
sumedh baikady | c773848 | 2019-04-02 18:14:46 -0700 | [diff] [blame] | 2896 | dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2897 | |
| 2898 | rx_tid->ba_status = DP_RX_BA_INACTIVE; |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2899 | peer->active_ba_session_cnt--; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 2900 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2901 | fail: |
| 2902 | if (peer) |
| 2903 | dp_peer_unref_delete(peer); |
| 2904 | |
| 2905 | return status; |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 2906 | } |
| 2907 | |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2908 | /* |
| 2909 | * dp_rx_delba_tx_completion_wifi3() – Send Delba Request |
| 2910 | * |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2911 | * @soc: Datapath soc handle |
| 2912 | * @peer_mac: Datapath peer mac address |
| 2913 | * @vdev_id: id of atapath vdev |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2914 | * @tid: TID number |
| 2915 | * @status: tx completion status |
| 2916 | * Return: 0 on success, error code on failure |
| 2917 | */ |
| 2918 | |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2919 | int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, |
| 2920 | uint16_t vdev_id, |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2921 | uint8_t tid, int status) |
| 2922 | { |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2923 | QDF_STATUS ret = QDF_STATUS_SUCCESS; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2924 | struct dp_rx_tid *rx_tid = NULL; |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2925 | struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, |
| 2926 | peer_mac, 0, vdev_id); |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2927 | |
| 2928 | if (!peer || peer->delete_in_progress) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 2929 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2930 | "%s: Peer is NULL!", __func__); |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2931 | ret = QDF_STATUS_E_FAILURE; |
| 2932 | goto end; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2933 | } |
| 2934 | rx_tid = &peer->rx_tid[tid]; |
| 2935 | qdf_spin_lock_bh(&rx_tid->tid_lock); |
| 2936 | if (status) { |
| 2937 | rx_tid->delba_tx_fail_cnt++; |
| 2938 | if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) { |
| 2939 | rx_tid->delba_tx_retry = 0; |
| 2940 | rx_tid->delba_tx_status = 0; |
| 2941 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
| 2942 | } else { |
| 2943 | rx_tid->delba_tx_retry++; |
| 2944 | rx_tid->delba_tx_status = 1; |
| 2945 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Venkata Sharath Chandra Manchala | a6c0470 | 2019-06-20 15:27:58 -0700 | [diff] [blame] | 2946 | if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba) |
| 2947 | peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( |
Pavankumar Nandeshwar | 715fdc3 | 2019-10-03 20:51:01 +0530 | [diff] [blame] | 2948 | peer->vdev->pdev->soc->ctrl_psoc, |
| 2949 | peer->vdev->vdev_id, |
| 2950 | peer->mac_addr.raw, tid, |
Venkata Sharath Chandra Manchala | a6c0470 | 2019-06-20 15:27:58 -0700 | [diff] [blame] | 2951 | rx_tid->delba_rcode); |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2952 | } |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2953 | goto end; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2954 | } else { |
| 2955 | rx_tid->delba_tx_success_cnt++; |
| 2956 | rx_tid->delba_tx_retry = 0; |
| 2957 | rx_tid->delba_tx_status = 0; |
| 2958 | } |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2959 | if (rx_tid->ba_status == DP_RX_BA_ACTIVE) { |
sumedh baikady | c773848 | 2019-04-02 18:14:46 -0700 | [diff] [blame] | 2960 | dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2961 | rx_tid->ba_status = DP_RX_BA_INACTIVE; |
| 2962 | peer->active_ba_session_cnt--; |
| 2963 | } |
| 2964 | if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { |
sumedh baikady | c773848 | 2019-04-02 18:14:46 -0700 | [diff] [blame] | 2965 | dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 2966 | rx_tid->ba_status = DP_RX_BA_INACTIVE; |
| 2967 | } |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2968 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
| 2969 | |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2970 | end: |
| 2971 | if (peer) |
| 2972 | dp_peer_unref_delete(peer); |
| 2973 | |
| 2974 | return ret; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 2975 | } |
| 2976 | |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 2977 | /** |
| 2978 | * dp_set_pn_check_wifi3() - enable PN check in REO for security |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2979 | * @soc: Datapath soc handle |
| 2980 | * @peer_mac: Datapath peer mac address |
| 2981 | * @vdev_id: id of atapath vdev |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 2982 | * @vdev: Datapath vdev |
| 2983 | * @pdev - data path device instance |
| 2984 | * @sec_type - security type |
| 2985 | * @rx_pn - Receive pn starting number |
| 2986 | * |
| 2987 | */ |
| 2988 | |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2989 | QDF_STATUS |
| 2990 | dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 2991 | uint8_t *peer_mac, enum cdp_sec_type sec_type, |
| 2992 | uint32_t *rx_pn) |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 2993 | { |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 2994 | struct dp_pdev *pdev; |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 2995 | int i; |
sumedh baikady | e3947bd | 2017-11-29 19:19:25 -0800 | [diff] [blame] | 2996 | uint8_t pn_size; |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 2997 | struct hal_reo_cmd_params params; |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 2998 | QDF_STATUS status = QDF_STATUS_SUCCESS; |
| 2999 | struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, |
| 3000 | peer_mac, 0, vdev_id); |
| 3001 | struct dp_vdev *vdev = |
| 3002 | dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, |
| 3003 | vdev_id); |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3004 | |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 3005 | if (!vdev || !peer || peer->delete_in_progress) { |
| 3006 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
| 3007 | "%s: Peer is NULL!\n", __func__); |
| 3008 | status = QDF_STATUS_E_FAILURE; |
| 3009 | goto fail; |
| 3010 | } |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3011 | |
| 3012 | pdev = vdev->pdev; |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3013 | qdf_mem_zero(¶ms, sizeof(params)); |
| 3014 | |
| 3015 | params.std.need_status = 1; |
| 3016 | params.u.upd_queue_params.update_pn_valid = 1; |
| 3017 | params.u.upd_queue_params.update_pn_size = 1; |
| 3018 | params.u.upd_queue_params.update_pn = 1; |
| 3019 | params.u.upd_queue_params.update_pn_check_needed = 1; |
Gurumoorthi Gnanasambandhan | d733cd7 | 2018-06-12 17:05:52 +0530 | [diff] [blame] | 3020 | params.u.upd_queue_params.update_svld = 1; |
| 3021 | params.u.upd_queue_params.svld = 0; |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3022 | |
| 3023 | peer->security[dp_sec_ucast].sec_type = sec_type; |
| 3024 | |
| 3025 | switch (sec_type) { |
| 3026 | case cdp_sec_type_tkip_nomic: |
| 3027 | case cdp_sec_type_aes_ccmp: |
| 3028 | case cdp_sec_type_aes_ccmp_256: |
| 3029 | case cdp_sec_type_aes_gcmp: |
| 3030 | case cdp_sec_type_aes_gcmp_256: |
| 3031 | params.u.upd_queue_params.pn_check_needed = 1; |
| 3032 | params.u.upd_queue_params.pn_size = 48; |
sumedh baikady | e3947bd | 2017-11-29 19:19:25 -0800 | [diff] [blame] | 3033 | pn_size = 48; |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3034 | break; |
| 3035 | case cdp_sec_type_wapi: |
| 3036 | params.u.upd_queue_params.pn_check_needed = 1; |
| 3037 | params.u.upd_queue_params.pn_size = 128; |
sumedh baikady | e3947bd | 2017-11-29 19:19:25 -0800 | [diff] [blame] | 3038 | pn_size = 128; |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3039 | if (vdev->opmode == wlan_op_mode_ap) { |
| 3040 | params.u.upd_queue_params.pn_even = 1; |
| 3041 | params.u.upd_queue_params.update_pn_even = 1; |
| 3042 | } else { |
| 3043 | params.u.upd_queue_params.pn_uneven = 1; |
| 3044 | params.u.upd_queue_params.update_pn_uneven = 1; |
| 3045 | } |
| 3046 | break; |
| 3047 | default: |
| 3048 | params.u.upd_queue_params.pn_check_needed = 0; |
sumedh baikady | e3947bd | 2017-11-29 19:19:25 -0800 | [diff] [blame] | 3049 | pn_size = 0; |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3050 | break; |
| 3051 | } |
| 3052 | |
| 3053 | |
| 3054 | for (i = 0; i < DP_MAX_TIDS; i++) { |
| 3055 | struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 3056 | qdf_spin_lock_bh(&rx_tid->tid_lock); |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 3057 | if (rx_tid->hw_qdesc_vaddr_unaligned) { |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3058 | params.std.addr_lo = |
| 3059 | rx_tid->hw_qdesc_paddr & 0xffffffff; |
| 3060 | params.std.addr_hi = |
| 3061 | (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; |
| 3062 | |
Krunal Soni | d3eb8bc | 2018-11-12 19:06:15 -0800 | [diff] [blame] | 3063 | if (pn_size) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 3064 | QDF_TRACE(QDF_MODULE_ID_DP, |
Krunal Soni | d3eb8bc | 2018-11-12 19:06:15 -0800 | [diff] [blame] | 3065 | QDF_TRACE_LEVEL_INFO_HIGH, |
| 3066 | "%s PN set for TID:%d pn:%x:%x:%x:%x", |
| 3067 | __func__, i, rx_pn[3], rx_pn[2], |
| 3068 | rx_pn[1], rx_pn[0]); |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3069 | params.u.upd_queue_params.update_pn_valid = 1; |
| 3070 | params.u.upd_queue_params.pn_31_0 = rx_pn[0]; |
| 3071 | params.u.upd_queue_params.pn_63_32 = rx_pn[1]; |
| 3072 | params.u.upd_queue_params.pn_95_64 = rx_pn[2]; |
| 3073 | params.u.upd_queue_params.pn_127_96 = rx_pn[3]; |
| 3074 | } |
sumedh baikady | e3947bd | 2017-11-29 19:19:25 -0800 | [diff] [blame] | 3075 | rx_tid->pn_size = pn_size; |
Rakesh Pillai | ae0f601 | 2020-01-02 11:03:09 +0530 | [diff] [blame] | 3076 | if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc), |
| 3077 | CMD_UPDATE_RX_REO_QUEUE, |
| 3078 | ¶ms, dp_rx_tid_update_cb, |
| 3079 | rx_tid)) { |
| 3080 | dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE" |
| 3081 | "tid %d desc %pK", rx_tid->tid, |
| 3082 | (void *)(rx_tid->hw_qdesc_paddr)); |
| 3083 | DP_STATS_INC(cdp_soc_t_to_dp_soc(soc), |
| 3084 | rx.err.reo_cmd_send_fail, 1); |
| 3085 | } |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3086 | } else { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 3087 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, |
| 3088 | "PN Check not setup for TID :%d ", i); |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3089 | } |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 3090 | qdf_spin_unlock_bh(&rx_tid->tid_lock); |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3091 | } |
Pavankumar Nandeshwar | a234716 | 2019-12-18 23:20:31 +0530 | [diff] [blame] | 3092 | fail: |
| 3093 | if (peer) |
| 3094 | dp_peer_unref_delete(peer); |
| 3095 | |
| 3096 | return status; |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 3097 | } |
| 3098 | |
| 3099 | |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 3100 | void |
Akshay Kosigi | 8a75314 | 2019-06-27 14:17:08 +0530 | [diff] [blame] | 3101 | dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id, |
| 3102 | enum cdp_sec_type sec_type, int is_unicast, |
| 3103 | u_int32_t *michael_key, |
| 3104 | u_int32_t *rx_pn) |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 3105 | { |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 3106 | struct dp_peer *peer; |
| 3107 | int sec_index; |
| 3108 | |
| 3109 | peer = dp_peer_find_by_id(soc, peer_id); |
| 3110 | if (!peer) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 3111 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 3112 | "Couldn't find peer from ID %d - skipping security inits", |
| 3113 | peer_id); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 3114 | return; |
| 3115 | } |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 3116 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, |
Shivani Soni | a5707a4 | 2020-01-08 16:42:08 +0530 | [diff] [blame] | 3117 | "sec spec for peer %pK %pM: %s key of type %d", |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 3118 | peer, |
Shivani Soni | a5707a4 | 2020-01-08 16:42:08 +0530 | [diff] [blame] | 3119 | peer->mac_addr.raw, |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 3120 | is_unicast ? "ucast" : "mcast", |
| 3121 | sec_type); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 3122 | sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast; |
| 3123 | peer->security[sec_index].sec_type = sec_type; |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3124 | #ifdef notyet /* TODO: See if this is required for defrag support */ |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 3125 | /* michael key only valid for TKIP, but for simplicity, |
| 3126 | * copy it anyway |
| 3127 | */ |
| 3128 | qdf_mem_copy( |
| 3129 | &peer->security[sec_index].michael_key[0], |
| 3130 | michael_key, |
| 3131 | sizeof(peer->security[sec_index].michael_key)); |
| 3132 | #ifdef BIG_ENDIAN_HOST |
| 3133 | OL_IF_SWAPBO(peer->security[sec_index].michael_key[0], |
| 3134 | sizeof(peer->security[sec_index].michael_key)); |
| 3135 | #endif /* BIG_ENDIAN_HOST */ |
| 3136 | #endif |
| 3137 | |
| 3138 | #ifdef notyet /* TODO: Check if this is required for wifi3.0 */ |
Venkata Sharath Chandra Manchala | d18887e | 2018-10-02 18:18:52 -0700 | [diff] [blame] | 3139 | if (sec_type != cdp_sec_type_wapi) { |
hangtian | fe681a5 | 2019-01-16 17:16:28 +0800 | [diff] [blame] | 3140 | qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 3141 | } else { |
| 3142 | for (i = 0; i < DP_MAX_TIDS; i++) { |
| 3143 | /* |
| 3144 | * Setting PN valid bit for WAPI sec_type, |
| 3145 | * since WAPI PN has to be started with predefined value |
| 3146 | */ |
| 3147 | peer->tids_last_pn_valid[i] = 1; |
| 3148 | qdf_mem_copy( |
| 3149 | (u_int8_t *) &peer->tids_last_pn[i], |
| 3150 | (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t)); |
| 3151 | peer->tids_last_pn[i].pn128[1] = |
| 3152 | qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]); |
| 3153 | peer->tids_last_pn[i].pn128[0] = |
| 3154 | qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]); |
| 3155 | } |
| 3156 | } |
| 3157 | #endif |
| 3158 | /* TODO: Update HW TID queue with PN check parameters (pn type for |
| 3159 | * all security types and last pn for WAPI) once REO command API |
| 3160 | * is available |
| 3161 | */ |
Sravan Kumar Kairam | 26d471e | 2018-08-14 23:51:58 +0530 | [diff] [blame] | 3162 | |
| 3163 | dp_peer_unref_del_find_by_id(peer); |
Karunakar Dasineni | 9b814ce | 2016-09-01 15:00:09 -0700 | [diff] [blame] | 3164 | } |
| 3165 | |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 3166 | #ifdef DP_PEER_EXTENDED_API |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3167 | QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 3168 | struct ol_txrx_desc_type *sta_desc) |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3169 | { |
| 3170 | struct dp_peer *peer; |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3171 | struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); |
| 3172 | struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3173 | |
Rakshith Suresh Patkar | 9e02e1e | 2019-07-26 11:25:29 +0530 | [diff] [blame] | 3174 | peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 3175 | sta_desc->peer_addr.bytes); |
Rakshith Suresh Patkar | 9e02e1e | 2019-07-26 11:25:29 +0530 | [diff] [blame] | 3176 | |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3177 | if (!pdev) |
| 3178 | return QDF_STATUS_E_FAULT; |
| 3179 | |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3180 | if (!peer) |
| 3181 | return QDF_STATUS_E_FAULT; |
| 3182 | |
| 3183 | qdf_spin_lock_bh(&peer->peer_info_lock); |
| 3184 | peer->state = OL_TXRX_PEER_STATE_CONN; |
| 3185 | qdf_spin_unlock_bh(&peer->peer_info_lock); |
| 3186 | |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 3187 | dp_rx_flush_rx_cached(peer, false); |
| 3188 | |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3189 | return QDF_STATUS_SUCCESS; |
| 3190 | } |
| 3191 | |
Rakshith Suresh Patkar | 0375108 | 2019-07-26 12:30:23 +0530 | [diff] [blame] | 3192 | QDF_STATUS |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3193 | dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 3194 | struct qdf_mac_addr peer_addr) |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3195 | { |
| 3196 | struct dp_peer *peer; |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3197 | struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); |
| 3198 | struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); |
| 3199 | |
| 3200 | if (!pdev) |
| 3201 | return QDF_STATUS_E_FAULT; |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3202 | |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 3203 | peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes); |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3204 | if (!peer) |
| 3205 | return QDF_STATUS_E_FAULT; |
| 3206 | |
| 3207 | qdf_spin_lock_bh(&peer->peer_info_lock); |
| 3208 | peer->state = OL_TXRX_PEER_STATE_DISC; |
| 3209 | qdf_spin_unlock_bh(&peer->peer_info_lock); |
| 3210 | |
Sravan Kumar Kairam | ebd627e | 2018-08-28 23:32:52 +0530 | [diff] [blame] | 3211 | dp_rx_flush_rx_cached(peer, true); |
| 3212 | |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3213 | return QDF_STATUS_SUCCESS; |
| 3214 | } |
| 3215 | |
| 3216 | /** |
| 3217 | * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev |
| 3218 | * @pdev - data path device instance |
| 3219 | * @vdev - virtual interface instance |
| 3220 | * @peer_addr - peer mac address |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3221 | * |
| 3222 | * Find peer by peer mac address within vdev |
| 3223 | * |
| 3224 | * Return: peer instance void pointer |
| 3225 | * NULL cannot find target peer |
| 3226 | */ |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 3227 | void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle, |
| 3228 | struct cdp_vdev *vdev_handle, |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 3229 | uint8_t *peer_addr) |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3230 | { |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 3231 | struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; |
| 3232 | struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3233 | struct dp_peer *peer; |
| 3234 | |
Chaithanya Garrepalli | 3583cfb | 2019-12-20 17:40:53 +0530 | [diff] [blame] | 3235 | peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL); |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3236 | |
| 3237 | if (!peer) |
| 3238 | return NULL; |
| 3239 | |
Krunal Soni | 304792a | 2018-06-28 14:18:30 -0700 | [diff] [blame] | 3240 | if (peer->vdev != vdev) { |
Chaithanya Garrepalli | 7c8cf12 | 2018-09-07 19:23:52 +0530 | [diff] [blame] | 3241 | dp_peer_unref_delete(peer); |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3242 | return NULL; |
Krunal Soni | 304792a | 2018-06-28 14:18:30 -0700 | [diff] [blame] | 3243 | } |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3244 | |
Krishna Kumaar Natarajan | 71e5b83 | 2017-01-26 08:04:13 -0800 | [diff] [blame] | 3245 | /* ref_cnt is incremented inside dp_peer_find_hash_find(). |
| 3246 | * Decrement it here. |
| 3247 | */ |
Chaithanya Garrepalli | 7c8cf12 | 2018-09-07 19:23:52 +0530 | [diff] [blame] | 3248 | dp_peer_unref_delete(peer); |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3249 | |
| 3250 | return peer; |
| 3251 | } |
| 3252 | |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3253 | QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, |
| 3254 | enum ol_txrx_peer_state state) |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3255 | { |
| 3256 | struct dp_peer *peer; |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3257 | struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3258 | |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3259 | peer = dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL); |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 3260 | if (!peer) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 3261 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 3262 | "Failed to find peer for: [%pM]", peer_mac); |
Ankit Gupta | 6fb389b | 2017-01-03 12:23:45 -0800 | [diff] [blame] | 3263 | return QDF_STATUS_E_FAILURE; |
| 3264 | } |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3265 | peer->state = state; |
Krishna Kumaar Natarajan | 71e5b83 | 2017-01-26 08:04:13 -0800 | [diff] [blame] | 3266 | |
Amruta Kulkarni | 0f0a36c | 2020-01-03 15:09:57 -0800 | [diff] [blame] | 3267 | dp_info("peer %pK state %d", peer, peer->state); |
Krishna Kumaar Natarajan | 71e5b83 | 2017-01-26 08:04:13 -0800 | [diff] [blame] | 3268 | /* ref_cnt is incremented inside dp_peer_find_hash_find(). |
| 3269 | * Decrement it here. |
| 3270 | */ |
Chaithanya Garrepalli | 7c8cf12 | 2018-09-07 19:23:52 +0530 | [diff] [blame] | 3271 | dp_peer_unref_delete(peer); |
Krishna Kumaar Natarajan | 71e5b83 | 2017-01-26 08:04:13 -0800 | [diff] [blame] | 3272 | |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3273 | return QDF_STATUS_SUCCESS; |
| 3274 | } |
| 3275 | |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3276 | QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, |
| 3277 | uint8_t *vdev_id) |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3278 | { |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3279 | struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); |
| 3280 | struct dp_peer *peer = |
| 3281 | dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL); |
| 3282 | |
| 3283 | if (!peer) |
| 3284 | return QDF_STATUS_E_FAILURE; |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3285 | |
Jinwei Chen | b02de7e | 2019-09-10 17:21:14 +0800 | [diff] [blame] | 3286 | dp_info("peer %pK vdev %pK vdev id %d", |
| 3287 | peer, peer->vdev, peer->vdev->vdev_id); |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3288 | *vdev_id = peer->vdev->vdev_id; |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3289 | /* ref_cnt is incremented inside dp_peer_find_hash_find(). |
| 3290 | * Decrement it here. |
| 3291 | */ |
| 3292 | dp_peer_unref_delete(peer); |
| 3293 | |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3294 | return QDF_STATUS_SUCCESS; |
| 3295 | } |
| 3296 | |
Rakshith Suresh Patkar | fb42ec3 | 2019-07-26 13:52:00 +0530 | [diff] [blame] | 3297 | struct cdp_vdev * |
| 3298 | dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, |
| 3299 | struct qdf_mac_addr peer_addr) |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 3300 | { |
Yun Park | 601d0d8 | 2017-08-28 21:49:31 -0700 | [diff] [blame] | 3301 | struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 3302 | struct dp_peer *peer = NULL; |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 3303 | |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 3304 | if (!pdev) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 3305 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, |
Shivani Soni | a5707a4 | 2020-01-08 16:42:08 +0530 | [diff] [blame] | 3306 | "PDEV not found for peer_addr: %pM", |
| 3307 | peer_addr.bytes); |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 3308 | return NULL; |
| 3309 | } |
| 3310 | |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 3311 | peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes); |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 3312 | if (!peer) { |
Rakshith Suresh Patkar | fb42ec3 | 2019-07-26 13:52:00 +0530 | [diff] [blame] | 3313 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, |
Shivani Soni | a5707a4 | 2020-01-08 16:42:08 +0530 | [diff] [blame] | 3314 | "PDEV not found for peer_addr: %pM", |
| 3315 | peer_addr.bytes); |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 3316 | return NULL; |
| 3317 | } |
| 3318 | |
| 3319 | return (struct cdp_vdev *)peer->vdev; |
| 3320 | } |
| 3321 | |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3322 | /** |
| 3323 | * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs |
| 3324 | * @peer - peer instance |
| 3325 | * |
| 3326 | * Get virtual interface instance which peer belongs |
| 3327 | * |
| 3328 | * Return: virtual interface instance pointer |
| 3329 | * NULL in case cannot find |
| 3330 | */ |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 3331 | struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle) |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3332 | { |
| 3333 | struct dp_peer *peer = peer_handle; |
| 3334 | |
Mohit Khanna | 7ac554b | 2018-05-24 11:58:13 -0700 | [diff] [blame] | 3335 | DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev); |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 3336 | return (struct cdp_vdev *)peer->vdev; |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3337 | } |
| 3338 | |
| 3339 | /** |
| 3340 | * dp_peer_get_peer_mac_addr() - Get peer mac address |
| 3341 | * @peer - peer instance |
| 3342 | * |
| 3343 | * Get peer mac address |
| 3344 | * |
| 3345 | * Return: peer mac address pointer |
| 3346 | * NULL in case cannot find |
| 3347 | */ |
| 3348 | uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle) |
| 3349 | { |
| 3350 | struct dp_peer *peer = peer_handle; |
| 3351 | uint8_t *mac; |
| 3352 | |
| 3353 | mac = peer->mac_addr.raw; |
Amruta Kulkarni | 0f0a36c | 2020-01-03 15:09:57 -0800 | [diff] [blame] | 3354 | dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3355 | peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); |
| 3356 | return peer->mac_addr.raw; |
| 3357 | } |
| 3358 | |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3359 | int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, |
| 3360 | uint8_t *peer_mac) |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3361 | { |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3362 | enum ol_txrx_peer_state peer_state; |
| 3363 | struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); |
| 3364 | struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0, |
| 3365 | vdev_id); |
| 3366 | |
| 3367 | if (!peer) |
| 3368 | return QDF_STATUS_E_FAILURE; |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3369 | |
Yun Park | 11d46e0 | 2017-11-27 10:51:53 -0800 | [diff] [blame] | 3370 | DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state); |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3371 | peer_state = peer->state; |
| 3372 | dp_peer_unref_delete(peer); |
| 3373 | |
| 3374 | return peer_state; |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3375 | } |
| 3376 | |
| 3377 | /** |
| 3378 | * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device |
| 3379 | * @pdev - data path device instance |
| 3380 | * |
| 3381 | * local peer id pool alloc for physical device |
| 3382 | * |
| 3383 | * Return: none |
| 3384 | */ |
| 3385 | void dp_local_peer_id_pool_init(struct dp_pdev *pdev) |
| 3386 | { |
| 3387 | int i; |
| 3388 | |
| 3389 | /* point the freelist to the first ID */ |
| 3390 | pdev->local_peer_ids.freelist = 0; |
| 3391 | |
| 3392 | /* link each ID to the next one */ |
| 3393 | for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) { |
| 3394 | pdev->local_peer_ids.pool[i] = i + 1; |
| 3395 | pdev->local_peer_ids.map[i] = NULL; |
| 3396 | } |
| 3397 | |
| 3398 | /* link the last ID to itself, to mark the end of the list */ |
| 3399 | i = OL_TXRX_NUM_LOCAL_PEER_IDS; |
| 3400 | pdev->local_peer_ids.pool[i] = i; |
| 3401 | |
| 3402 | qdf_spinlock_create(&pdev->local_peer_ids.lock); |
| 3403 | DP_TRACE(INFO, "Peer pool init"); |
| 3404 | } |
| 3405 | |
| 3406 | /** |
| 3407 | * dp_local_peer_id_alloc() - allocate local peer id |
| 3408 | * @pdev - data path device instance |
| 3409 | * @peer - new peer instance |
| 3410 | * |
| 3411 | * allocate local peer id |
| 3412 | * |
| 3413 | * Return: none |
| 3414 | */ |
| 3415 | void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) |
| 3416 | { |
| 3417 | int i; |
| 3418 | |
| 3419 | qdf_spin_lock_bh(&pdev->local_peer_ids.lock); |
| 3420 | i = pdev->local_peer_ids.freelist; |
| 3421 | if (pdev->local_peer_ids.pool[i] == i) { |
| 3422 | /* the list is empty, except for the list-end marker */ |
| 3423 | peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID; |
| 3424 | } else { |
| 3425 | /* take the head ID and advance the freelist */ |
| 3426 | peer->local_id = i; |
| 3427 | pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i]; |
| 3428 | pdev->local_peer_ids.map[i] = peer; |
| 3429 | } |
| 3430 | qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); |
Amruta Kulkarni | 0f0a36c | 2020-01-03 15:09:57 -0800 | [diff] [blame] | 3431 | dp_info("peer %pK, local id %d", peer, peer->local_id); |
Leo Chang | 5ea93a4 | 2016-11-03 12:39:49 -0700 | [diff] [blame] | 3432 | } |
| 3433 | |
| 3434 | /** |
| 3435 | * dp_local_peer_id_free() - remove local peer id |
| 3436 | * @pdev - data path device instance |
| 3437 | * @peer - peer instance should be removed |
| 3438 | * |
| 3439 | * remove local peer id |
| 3440 | * |
| 3441 | * Return: none |
| 3442 | */ |
| 3443 | void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) |
| 3444 | { |
| 3445 | int i = peer->local_id; |
| 3446 | if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) || |
| 3447 | (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) { |
| 3448 | return; |
| 3449 | } |
| 3450 | |
| 3451 | /* put this ID on the head of the freelist */ |
| 3452 | qdf_spin_lock_bh(&pdev->local_peer_ids.lock); |
| 3453 | pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist; |
| 3454 | pdev->local_peer_ids.freelist = i; |
| 3455 | pdev->local_peer_ids.map[i] = NULL; |
| 3456 | qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); |
| 3457 | } |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3458 | |
| 3459 | bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, |
| 3460 | uint8_t vdev_id, uint8_t *peer_addr) |
| 3461 | { |
| 3462 | struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); |
| 3463 | struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); |
| 3464 | |
| 3465 | if (!vdev) |
| 3466 | return false; |
| 3467 | |
| 3468 | return !!dp_find_peer_by_addr_and_vdev( |
| 3469 | dp_pdev_to_cdp_pdev(vdev->pdev), |
| 3470 | dp_vdev_to_cdp_vdev(vdev), |
| 3471 | peer_addr); |
| 3472 | } |
| 3473 | |
| 3474 | bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, |
| 3475 | uint8_t vdev_id, uint8_t *peer_addr, |
| 3476 | uint16_t max_bssid) |
| 3477 | { |
| 3478 | int i; |
| 3479 | struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); |
| 3480 | struct dp_vdev *vdev; |
| 3481 | |
| 3482 | for (i = 0; i < max_bssid; i++) { |
Ananya Gupta | f489773 | 2020-02-25 19:38:01 +0530 | [diff] [blame] | 3483 | vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, i); |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3484 | /* Need to check vdevs other than the vdev_id */ |
| 3485 | if (vdev_id == i || !vdev) |
| 3486 | continue; |
| 3487 | if (dp_find_peer_by_addr_and_vdev( |
| 3488 | dp_pdev_to_cdp_pdev(vdev->pdev), |
| 3489 | dp_vdev_to_cdp_vdev(vdev), |
| 3490 | peer_addr)) { |
Ananya Gupta | f489773 | 2020-02-25 19:38:01 +0530 | [diff] [blame] | 3491 | dp_err("%s: Duplicate peer %pM already exist on vdev %d", |
| 3492 | __func__, peer_addr, i); |
Vevek Venkatesan | af77698 | 2019-09-12 03:43:08 +0530 | [diff] [blame] | 3493 | return true; |
| 3494 | } |
| 3495 | } |
| 3496 | |
| 3497 | return false; |
| 3498 | } |
| 3499 | |
| 3500 | bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 3501 | uint8_t *peer_addr) |
| 3502 | { |
| 3503 | struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); |
| 3504 | struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); |
| 3505 | |
| 3506 | if (!pdev) |
| 3507 | return false; |
| 3508 | |
| 3509 | return !!dp_find_peer_by_addr(dp_pdev_to_cdp_pdev(pdev), peer_addr); |
| 3510 | } |
Nandha Kishore Easwaran | fd7832e | 2016-11-20 18:22:48 +0530 | [diff] [blame] | 3511 | #endif |
Ishank Jain | 1e7401c | 2017-02-17 15:38:39 +0530 | [diff] [blame] | 3512 | |
| 3513 | /** |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 3514 | * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW |
| 3515 | * @peer: DP peer handle |
Pratik Gandhi | 51b6b6d | 2017-09-18 15:02:43 +0530 | [diff] [blame] | 3516 | * @dp_stats_cmd_cb: REO command callback function |
| 3517 | * @cb_ctxt: Callback context |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 3518 | * |
Sravan Goud | 0bbce75 | 2020-02-11 18:07:03 +0530 | [diff] [blame] | 3519 | * Return: count of tid stats cmd send succeeded |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 3520 | */ |
Sravan Goud | 0bbce75 | 2020-02-11 18:07:03 +0530 | [diff] [blame] | 3521 | int dp_peer_rxtid_stats(struct dp_peer *peer, |
| 3522 | dp_rxtid_stats_cmd_cb dp_stats_cmd_cb, |
Pratik Gandhi | 51b6b6d | 2017-09-18 15:02:43 +0530 | [diff] [blame] | 3523 | void *cb_ctxt) |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 3524 | { |
| 3525 | struct dp_soc *soc = peer->vdev->pdev->soc; |
| 3526 | struct hal_reo_cmd_params params; |
| 3527 | int i; |
Sravan Goud | 0bbce75 | 2020-02-11 18:07:03 +0530 | [diff] [blame] | 3528 | int stats_cmd_sent_cnt = 0; |
| 3529 | QDF_STATUS status; |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 3530 | |
Pratik Gandhi | 51b6b6d | 2017-09-18 15:02:43 +0530 | [diff] [blame] | 3531 | if (!dp_stats_cmd_cb) |
Sravan Goud | 0bbce75 | 2020-02-11 18:07:03 +0530 | [diff] [blame] | 3532 | return stats_cmd_sent_cnt; |
Pratik Gandhi | 51b6b6d | 2017-09-18 15:02:43 +0530 | [diff] [blame] | 3533 | |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 3534 | qdf_mem_zero(¶ms, sizeof(params)); |
| 3535 | for (i = 0; i < DP_MAX_TIDS; i++) { |
| 3536 | struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; |
Jeff Johnson | a8edf33 | 2019-03-18 09:51:52 -0700 | [diff] [blame] | 3537 | if (rx_tid->hw_qdesc_vaddr_unaligned) { |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 3538 | params.std.need_status = 1; |
| 3539 | params.std.addr_lo = |
| 3540 | rx_tid->hw_qdesc_paddr & 0xffffffff; |
| 3541 | params.std.addr_hi = |
| 3542 | (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; |
Pratik Gandhi | 51b6b6d | 2017-09-18 15:02:43 +0530 | [diff] [blame] | 3543 | |
| 3544 | if (cb_ctxt) { |
Sravan Goud | 0bbce75 | 2020-02-11 18:07:03 +0530 | [diff] [blame] | 3545 | status = dp_reo_send_cmd( |
| 3546 | soc, CMD_GET_QUEUE_STATS, |
| 3547 | ¶ms, dp_stats_cmd_cb, |
| 3548 | cb_ctxt); |
Pratik Gandhi | 51b6b6d | 2017-09-18 15:02:43 +0530 | [diff] [blame] | 3549 | } else { |
Sravan Goud | 0bbce75 | 2020-02-11 18:07:03 +0530 | [diff] [blame] | 3550 | status = dp_reo_send_cmd( |
| 3551 | soc, CMD_GET_QUEUE_STATS, |
| 3552 | ¶ms, dp_stats_cmd_cb, |
| 3553 | rx_tid); |
Pratik Gandhi | 51b6b6d | 2017-09-18 15:02:43 +0530 | [diff] [blame] | 3554 | } |
Karunakar Dasineni | 3da0811 | 2017-06-15 14:42:39 -0700 | [diff] [blame] | 3555 | |
Sravan Goud | 0bbce75 | 2020-02-11 18:07:03 +0530 | [diff] [blame] | 3556 | if (QDF_IS_STATUS_SUCCESS(status)) |
| 3557 | stats_cmd_sent_cnt++; |
| 3558 | |
Karunakar Dasineni | 3da0811 | 2017-06-15 14:42:39 -0700 | [diff] [blame] | 3559 | /* Flush REO descriptor from HW cache to update stats |
| 3560 | * in descriptor memory. This is to help debugging */ |
| 3561 | qdf_mem_zero(¶ms, sizeof(params)); |
| 3562 | params.std.need_status = 0; |
| 3563 | params.std.addr_lo = |
| 3564 | rx_tid->hw_qdesc_paddr & 0xffffffff; |
| 3565 | params.std.addr_hi = |
| 3566 | (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; |
Karunakar Dasineni | 25f1b04 | 2018-02-15 23:26:17 -0800 | [diff] [blame] | 3567 | params.u.fl_cache_params.flush_no_inval = 1; |
Karunakar Dasineni | 3da0811 | 2017-06-15 14:42:39 -0700 | [diff] [blame] | 3568 | dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL, |
| 3569 | NULL); |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 3570 | } |
| 3571 | } |
Sravan Goud | 0bbce75 | 2020-02-11 18:07:03 +0530 | [diff] [blame] | 3572 | |
| 3573 | return stats_cmd_sent_cnt; |
Karunakar Dasineni | 93f633c | 2017-06-02 19:04:46 -0700 | [diff] [blame] | 3574 | } |
Chaithanya Garrepalli | 0323f80 | 2018-03-14 17:45:21 +0530 | [diff] [blame] | 3575 | |
Pavankumar Nandeshwar | 6c83405 | 2020-01-06 20:20:31 +0530 | [diff] [blame] | 3576 | QDF_STATUS |
| 3577 | dp_set_michael_key(struct cdp_soc_t *soc, |
| 3578 | uint8_t vdev_id, |
| 3579 | uint8_t *peer_mac, |
| 3580 | bool is_unicast, uint32_t *key) |
Pramod Simha | 6e10cb2 | 2018-06-20 12:05:44 -0700 | [diff] [blame] | 3581 | { |
Pavankumar Nandeshwar | 6c83405 | 2020-01-06 20:20:31 +0530 | [diff] [blame] | 3582 | QDF_STATUS status = QDF_STATUS_SUCCESS; |
Pramod Simha | 6e10cb2 | 2018-06-20 12:05:44 -0700 | [diff] [blame] | 3583 | uint8_t sec_index = is_unicast ? 1 : 0; |
Pavankumar Nandeshwar | 6c83405 | 2020-01-06 20:20:31 +0530 | [diff] [blame] | 3584 | struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, |
| 3585 | peer_mac, 0, vdev_id); |
Pramod Simha | 6e10cb2 | 2018-06-20 12:05:44 -0700 | [diff] [blame] | 3586 | |
Pavankumar Nandeshwar | 6c83405 | 2020-01-06 20:20:31 +0530 | [diff] [blame] | 3587 | if (!peer || peer->delete_in_progress) { |
Chaitanya Kiran Godavarthi | e0b3414 | 2019-01-16 17:05:15 +0530 | [diff] [blame] | 3588 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
Pramod Simha | 6e10cb2 | 2018-06-20 12:05:44 -0700 | [diff] [blame] | 3589 | "peer not found "); |
Pavankumar Nandeshwar | 6c83405 | 2020-01-06 20:20:31 +0530 | [diff] [blame] | 3590 | status = QDF_STATUS_E_FAILURE; |
| 3591 | goto fail; |
Pramod Simha | 6e10cb2 | 2018-06-20 12:05:44 -0700 | [diff] [blame] | 3592 | } |
| 3593 | |
| 3594 | qdf_mem_copy(&peer->security[sec_index].michael_key[0], |
| 3595 | key, IEEE80211_WEP_MICLEN); |
Pavankumar Nandeshwar | 6c83405 | 2020-01-06 20:20:31 +0530 | [diff] [blame] | 3596 | |
| 3597 | fail: |
| 3598 | if (peer) |
| 3599 | dp_peer_unref_delete(peer); |
| 3600 | |
| 3601 | return status; |
Pramod Simha | 6e10cb2 | 2018-06-20 12:05:44 -0700 | [diff] [blame] | 3602 | } |
Sravan Kumar Kairam | 26d471e | 2018-08-14 23:51:58 +0530 | [diff] [blame] | 3603 | |
| 3604 | bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id) |
| 3605 | { |
| 3606 | struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id); |
| 3607 | |
| 3608 | if (peer) { |
| 3609 | /* |
| 3610 | * Decrement the peer ref which is taken as part of |
| 3611 | * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled |
| 3612 | */ |
| 3613 | dp_peer_unref_del_find_by_id(peer); |
| 3614 | |
| 3615 | return true; |
| 3616 | } |
| 3617 | |
| 3618 | return false; |
| 3619 | } |