Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Dustin Brown | 763f396 | 2018-01-04 14:05:42 -0800 | [diff] [blame] | 2 | * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 19 | /*=== includes ===*/ |
| 20 | /* header files for OS primitives */ |
| 21 | #include <osdep.h> /* uint32_t, etc. */ |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 22 | #include <qdf_mem.h> /* qdf_mem_malloc, etc. */ |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 23 | #include <qdf_types.h> /* qdf_device_t, qdf_print */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 24 | /* header files for utilities */ |
| 25 | #include <cds_queue.h> /* TAILQ */ |
| 26 | |
| 27 | /* header files for configuration API */ |
| 28 | #include <ol_cfg.h> /* ol_cfg_max_peer_id */ |
| 29 | |
| 30 | /* header files for our internal definitions */ |
| 31 | #include <ol_txrx_api.h> /* ol_txrx_pdev_t, etc. */ |
| 32 | #include <ol_txrx_dbg.h> /* TXRX_DEBUG_LEVEL */ |
| 33 | #include <ol_txrx_internal.h> /* ol_txrx_pdev_t, etc. */ |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 34 | #include <ol_txrx.h> /* ol_txrx_peer_release_ref */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 35 | #include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */ |
| 36 | #include <ol_tx_queue.h> |
Deepak Dhamdhere | f918d42 | 2017-07-06 12:56:29 -0700 | [diff] [blame] | 37 | #include "wlan_roam_debug.h" |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 38 | |
| 39 | /*=== misc. / utility function definitions ==================================*/ |
| 40 | |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 41 | static int ol_txrx_log2_ceil(unsigned int value) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 42 | { |
| 43 | /* need to switch to unsigned math so that negative values |
| 44 | * will right-shift towards 0 instead of -1 |
| 45 | */ |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 46 | unsigned int tmp = value; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 47 | int log2 = -1; |
| 48 | |
| 49 | if (value == 0) { |
| 50 | TXRX_ASSERT2(0); |
| 51 | return 0; |
| 52 | } |
| 53 | |
| 54 | while (tmp) { |
| 55 | log2++; |
| 56 | tmp >>= 1; |
| 57 | } |
| 58 | if (1U << log2 != value) |
| 59 | log2++; |
| 60 | |
| 61 | return log2; |
| 62 | } |
| 63 | |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 64 | int ol_txrx_peer_get_ref(struct ol_txrx_peer_t *peer, |
| 65 | enum peer_debug_id_type dbg_id) |
Mohit Khanna | b04dfcd | 2017-02-13 18:54:35 -0800 | [diff] [blame] | 66 | { |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 67 | int refs_dbg_id; |
| 68 | |
| 69 | if (!peer) { |
| 70 | ol_txrx_err("peer is null for ID %d", dbg_id); |
| 71 | return -EINVAL; |
| 72 | } |
| 73 | |
| 74 | if (dbg_id >= PEER_DEBUG_ID_MAX || dbg_id < 0) { |
| 75 | ol_txrx_err("incorrect debug_id %d ", dbg_id); |
| 76 | return -EINVAL; |
| 77 | } |
| 78 | |
| 79 | qdf_atomic_inc(&peer->ref_cnt); |
| 80 | qdf_atomic_inc(&peer->access_list[dbg_id]); |
| 81 | refs_dbg_id = qdf_atomic_read(&peer->access_list[dbg_id]); |
Jingxiang Ge | 3badb98 | 2018-01-02 17:39:01 +0800 | [diff] [blame] | 82 | |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 83 | return refs_dbg_id; |
Mohit Khanna | b04dfcd | 2017-02-13 18:54:35 -0800 | [diff] [blame] | 84 | } |
| 85 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 86 | /*=== function definitions for peer MAC addr --> peer object hash table =====*/ |
| 87 | |
| 88 | /* |
| 89 | * TXRX_PEER_HASH_LOAD_FACTOR: |
| 90 | * Multiply by 2 and divide by 2^0 (shift by 0), then round up to a |
| 91 | * power of two. |
| 92 | * This provides at least twice as many bins in the peer hash table |
| 93 | * as there will be entries. |
| 94 | * Having substantially more bins than spaces minimizes the probability of |
| 95 | * having to compare MAC addresses. |
| 96 | * Because the MAC address comparison is fairly efficient, it is okay if the |
| 97 | * hash table is sparsely loaded, but it's generally better to use extra mem |
| 98 | * to keep the table sparse, to keep the lookups as fast as possible. |
| 99 | * An optimization would be to apply a more conservative loading factor for |
| 100 | * high latency, where the lookup happens during the tx classification of |
| 101 | * every tx frame, than for low-latency, where the lookup only happens |
| 102 | * during association, when the PEER_MAP message is received. |
| 103 | */ |
| 104 | #define TXRX_PEER_HASH_LOAD_MULT 2 |
| 105 | #define TXRX_PEER_HASH_LOAD_SHIFT 0 |
| 106 | |
| 107 | static int ol_txrx_peer_find_hash_attach(struct ol_txrx_pdev_t *pdev) |
| 108 | { |
| 109 | int i, hash_elems, log2; |
| 110 | |
| 111 | /* allocate the peer MAC address -> peer object hash table */ |
| 112 | hash_elems = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1; |
| 113 | hash_elems *= TXRX_PEER_HASH_LOAD_MULT; |
| 114 | hash_elems >>= TXRX_PEER_HASH_LOAD_SHIFT; |
| 115 | log2 = ol_txrx_log2_ceil(hash_elems); |
| 116 | hash_elems = 1 << log2; |
| 117 | |
| 118 | pdev->peer_hash.mask = hash_elems - 1; |
| 119 | pdev->peer_hash.idx_bits = log2; |
| 120 | /* allocate an array of TAILQ peer object lists */ |
| 121 | pdev->peer_hash.bins = |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 122 | qdf_mem_malloc(hash_elems * |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 123 | sizeof(TAILQ_HEAD(anonymous_tail_q, |
| 124 | ol_txrx_peer_t))); |
| 125 | if (!pdev->peer_hash.bins) |
| 126 | return 1; /* failure */ |
| 127 | |
| 128 | for (i = 0; i < hash_elems; i++) |
| 129 | TAILQ_INIT(&pdev->peer_hash.bins[i]); |
| 130 | |
| 131 | return 0; /* success */ |
| 132 | } |
| 133 | |
| 134 | static void ol_txrx_peer_find_hash_detach(struct ol_txrx_pdev_t *pdev) |
| 135 | { |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 136 | qdf_mem_free(pdev->peer_hash.bins); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 137 | } |
| 138 | |
Krunal Soni | 9e54d98 | 2018-08-20 17:29:51 -0700 | [diff] [blame] | 139 | static inline unsigned int |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 140 | ol_txrx_peer_find_hash_index(struct ol_txrx_pdev_t *pdev, |
| 141 | union ol_txrx_align_mac_addr_t *mac_addr) |
| 142 | { |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 143 | unsigned int index; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 144 | |
| 145 | index = |
| 146 | mac_addr->align2.bytes_ab ^ |
| 147 | mac_addr->align2.bytes_cd ^ mac_addr->align2.bytes_ef; |
| 148 | index ^= index >> pdev->peer_hash.idx_bits; |
| 149 | index &= pdev->peer_hash.mask; |
| 150 | return index; |
| 151 | } |
| 152 | |
| 153 | void |
| 154 | ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t *pdev, |
| 155 | struct ol_txrx_peer_t *peer) |
| 156 | { |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 157 | unsigned int index; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 158 | |
| 159 | index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr); |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 160 | qdf_spin_lock_bh(&pdev->peer_ref_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 161 | /* |
| 162 | * It is important to add the new peer at the tail of the peer list |
| 163 | * with the bin index. Together with having the hash_find function |
| 164 | * search from head to tail, this ensures that if two entries with |
| 165 | * the same MAC address are stored, the one added first will be |
| 166 | * found first. |
| 167 | */ |
| 168 | TAILQ_INSERT_TAIL(&pdev->peer_hash.bins[index], peer, hash_list_elem); |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 169 | qdf_spin_unlock_bh(&pdev->peer_ref_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 170 | } |
| 171 | |
| 172 | struct ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev, |
| 173 | struct ol_txrx_vdev_t *vdev, |
| 174 | uint8_t *peer_mac_addr, |
| 175 | int mac_addr_is_aligned, |
| 176 | uint8_t check_valid) |
| 177 | { |
| 178 | union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr; |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 179 | unsigned int index; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 180 | struct ol_txrx_peer_t *peer; |
| 181 | |
| 182 | if (mac_addr_is_aligned) { |
| 183 | mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr; |
| 184 | } else { |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 185 | qdf_mem_copy(&local_mac_addr_aligned.raw[0], |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 186 | peer_mac_addr, OL_TXRX_MAC_ADDR_LEN); |
| 187 | mac_addr = &local_mac_addr_aligned; |
| 188 | } |
| 189 | index = ol_txrx_peer_find_hash_index(pdev, mac_addr); |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 190 | qdf_spin_lock_bh(&pdev->peer_ref_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 191 | TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) { |
| 192 | if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == |
| 193 | 0 && (check_valid == 0 || peer->valid) |
| 194 | && peer->vdev == vdev) { |
Mohit Khanna | b04dfcd | 2017-02-13 18:54:35 -0800 | [diff] [blame] | 195 | /* found it */ |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 196 | ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL); |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 197 | qdf_spin_unlock_bh(&pdev->peer_ref_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 198 | return peer; |
| 199 | } |
| 200 | } |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 201 | qdf_spin_unlock_bh(&pdev->peer_ref_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 202 | return NULL; /* failure */ |
| 203 | } |
| 204 | |
Mohit Khanna | babadb8 | 2017-02-21 18:54:19 -0800 | [diff] [blame] | 205 | struct ol_txrx_peer_t * |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 206 | ol_txrx_peer_find_hash_find_get_ref |
Mohit Khanna | babadb8 | 2017-02-21 18:54:19 -0800 | [diff] [blame] | 207 | (struct ol_txrx_pdev_t *pdev, |
| 208 | uint8_t *peer_mac_addr, |
| 209 | int mac_addr_is_aligned, |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 210 | u8 check_valid, |
| 211 | enum peer_debug_id_type dbg_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 212 | { |
| 213 | union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr; |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 214 | unsigned int index; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 215 | struct ol_txrx_peer_t *peer; |
| 216 | |
| 217 | if (mac_addr_is_aligned) { |
| 218 | mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr; |
| 219 | } else { |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 220 | qdf_mem_copy(&local_mac_addr_aligned.raw[0], |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 221 | peer_mac_addr, OL_TXRX_MAC_ADDR_LEN); |
| 222 | mac_addr = &local_mac_addr_aligned; |
| 223 | } |
| 224 | index = ol_txrx_peer_find_hash_index(pdev, mac_addr); |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 225 | qdf_spin_lock_bh(&pdev->peer_ref_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 226 | TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) { |
| 227 | if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == |
| 228 | 0 && (check_valid == 0 || peer->valid)) { |
Mohit Khanna | b04dfcd | 2017-02-13 18:54:35 -0800 | [diff] [blame] | 229 | /* found it */ |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 230 | ol_txrx_peer_get_ref(peer, dbg_id); |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 231 | qdf_spin_unlock_bh(&pdev->peer_ref_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 232 | return peer; |
| 233 | } |
| 234 | } |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 235 | qdf_spin_unlock_bh(&pdev->peer_ref_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 236 | return NULL; /* failure */ |
| 237 | } |
| 238 | |
| 239 | void |
| 240 | ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t *pdev, |
| 241 | struct ol_txrx_peer_t *peer) |
| 242 | { |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 243 | unsigned int index; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 244 | |
| 245 | index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr); |
| 246 | /* |
| 247 | * DO NOT take the peer_ref_mutex lock here - it needs to be taken |
| 248 | * by the caller. |
| 249 | * The caller needs to hold the lock from the time the peer object's |
| 250 | * reference count is decremented and tested up through the time the |
| 251 | * reference to the peer object is removed from the hash table, by |
| 252 | * this function. |
| 253 | * Holding the lock only while removing the peer object reference |
| 254 | * from the hash table keeps the hash table consistent, but does not |
| 255 | * protect against a new HL tx context starting to use the peer object |
| 256 | * if it looks up the peer object from its MAC address just after the |
| 257 | * peer ref count is decremented to zero, but just before the peer |
| 258 | * object reference is removed from the hash table. |
| 259 | */ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 260 | /* qdf_spin_lock_bh(&pdev->peer_ref_mutex); */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 261 | TAILQ_REMOVE(&pdev->peer_hash.bins[index], peer, hash_list_elem); |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 262 | /* qdf_spin_unlock_bh(&pdev->peer_ref_mutex); */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev) |
| 266 | { |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 267 | unsigned int i; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 268 | /* |
| 269 | * Not really necessary to take peer_ref_mutex lock - by this point, |
| 270 | * it's known that the pdev is no longer in use. |
| 271 | */ |
| 272 | |
| 273 | for (i = 0; i <= pdev->peer_hash.mask; i++) { |
| 274 | if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) { |
| 275 | struct ol_txrx_peer_t *peer, *peer_next; |
| 276 | |
| 277 | /* |
| 278 | * TAILQ_FOREACH_SAFE must be used here to avoid any |
| 279 | * memory access violation after peer is freed |
| 280 | */ |
| 281 | TAILQ_FOREACH_SAFE(peer, &pdev->peer_hash.bins[i], |
| 282 | hash_list_elem, peer_next) { |
| 283 | /* |
| 284 | * Don't remove the peer from the hash table - |
| 285 | * that would modify the list we are currently |
| 286 | * traversing, |
| 287 | * and it's not necessary anyway. |
| 288 | */ |
| 289 | /* |
| 290 | * Artificially adjust the peer's ref count to |
| 291 | * 1, so it will get deleted by |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 292 | * ol_txrx_peer_release_ref. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 293 | */ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 294 | qdf_atomic_init(&peer->ref_cnt); /* set to 0 */ |
Dustin Brown | 763f396 | 2018-01-04 14:05:42 -0800 | [diff] [blame] | 295 | ol_txrx_peer_get_ref(peer, |
Manjunathappa Prakash | 1253c3d | 2018-08-22 15:52:14 -0700 | [diff] [blame] | 296 | PEER_DEBUG_ID_OL_HASH_ERS); |
Dustin Brown | 763f396 | 2018-01-04 14:05:42 -0800 | [diff] [blame] | 297 | ol_txrx_peer_release_ref(peer, |
Manjunathappa Prakash | 1253c3d | 2018-08-22 15:52:14 -0700 | [diff] [blame] | 298 | PEER_DEBUG_ID_OL_HASH_ERS); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 299 | } |
| 300 | } |
| 301 | } |
| 302 | } |
| 303 | |
| 304 | /*=== function definitions for peer id --> peer object map ==================*/ |
| 305 | |
| 306 | static int ol_txrx_peer_find_map_attach(struct ol_txrx_pdev_t *pdev) |
| 307 | { |
| 308 | int max_peers, peer_map_size; |
| 309 | |
| 310 | /* allocate the peer ID -> peer object map */ |
| 311 | max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1; |
| 312 | peer_map_size = max_peers * sizeof(pdev->peer_id_to_obj_map[0]); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 313 | pdev->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 314 | if (!pdev->peer_id_to_obj_map) |
| 315 | return 1; /* failure */ |
| 316 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 317 | return 0; /* success */ |
| 318 | } |
| 319 | |
| 320 | static void ol_txrx_peer_find_map_detach(struct ol_txrx_pdev_t *pdev) |
| 321 | { |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 322 | qdf_mem_free(pdev->peer_id_to_obj_map); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 323 | } |
| 324 | |
Deepak Dhamdhere | b0d2dda | 2017-04-03 01:01:50 -0700 | [diff] [blame] | 325 | /** |
| 326 | * ol_txrx_peer_clear_map_peer() - Remove map entries that refer to a peer. |
| 327 | * @pdev: pdev handle |
| 328 | * @peer: peer for removing obj map entries |
| 329 | * |
| 330 | * Run through the entire peer_id_to_obj map and nullify all the entries |
| 331 | * that map to a particular peer. Called before deleting the peer object. |
| 332 | * |
| 333 | * Return: None |
| 334 | */ |
| 335 | void ol_txrx_peer_clear_map_peer(ol_txrx_pdev_handle pdev, |
| 336 | struct ol_txrx_peer_t *peer) |
| 337 | { |
| 338 | int max_peers; |
| 339 | int i; |
| 340 | |
| 341 | max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1; |
| 342 | |
| 343 | qdf_spin_lock_bh(&pdev->peer_map_unmap_lock); |
| 344 | for (i = 0; i < max_peers; i++) { |
| 345 | if (pdev->peer_id_to_obj_map[i].peer == peer) { |
| 346 | /* Found a map entry for this peer, clear it. */ |
| 347 | pdev->peer_id_to_obj_map[i].peer = NULL; |
| 348 | } |
| 349 | } |
| 350 | qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock); |
| 351 | } |
| 352 | |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 353 | /* |
| 354 | * ol_txrx_peer_find_add_id() - Add peer_id entry to peer |
| 355 | * |
| 356 | * @pdev: Handle to pdev object |
| 357 | * @peer_mac_addr: MAC address of peer provided by firmware |
| 358 | * @peer_id: peer_id provided by firmware |
| 359 | * |
| 360 | * Search for peer object for the MAC address, add the peer_id to |
| 361 | * its array of peer_id's and update the peer_id_to_obj map entry |
| 362 | * for that peer_id. Increment corresponding reference counts. |
| 363 | * |
Prakash Dhavali | 0d3f1d6 | 2016-11-20 23:48:24 -0800 | [diff] [blame] | 364 | * Riva/Pronto has one peer id for each peer. |
| 365 | * Peregrine/Rome has two peer id for each peer. |
| 366 | * iHelium has upto three peer id for each peer. |
| 367 | * |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 368 | * Return: None |
| 369 | */ |
| 370 | static inline void ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t *pdev, |
| 371 | uint8_t *peer_mac_addr, uint16_t peer_id) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 372 | { |
| 373 | struct ol_txrx_peer_t *peer; |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 374 | int status; |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 375 | int i; |
Mohit Khanna | b04dfcd | 2017-02-13 18:54:35 -0800 | [diff] [blame] | 376 | uint32_t peer_id_ref_cnt; |
| 377 | uint32_t peer_ref_cnt; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 378 | |
| 379 | /* check if there's already a peer object with this MAC address */ |
| 380 | peer = |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 381 | ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac_addr, |
| 382 | 1 /* is aligned */, 0, |
Manjunathappa Prakash | 1253c3d | 2018-08-22 15:52:14 -0700 | [diff] [blame] | 383 | PEER_DEBUG_ID_OL_PEER_MAP); |
Mohit Khanna | 47384bc | 2016-08-15 15:37:05 -0700 | [diff] [blame] | 384 | |
| 385 | if (!peer || peer_id == HTT_INVALID_PEER) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 386 | /* |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 387 | * Currently peer IDs are assigned for vdevs as well as peers. |
| 388 | * If the peer ID is for a vdev, then we will fail to find a |
| 389 | * peer with a matching MAC address. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 390 | */ |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 391 | ol_txrx_err("peer not found or peer ID is %d invalid", |
| 392 | peer_id); |
Deepak Dhamdhere | f918d42 | 2017-07-06 12:56:29 -0700 | [diff] [blame] | 393 | wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID, |
| 394 | DEBUG_PEER_MAP_EVENT, |
| 395 | peer_id, peer_mac_addr, |
| 396 | peer, 0, 0); |
| 397 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 398 | return; |
| 399 | } |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 400 | |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 401 | qdf_spin_lock(&pdev->peer_map_unmap_lock); |
| 402 | |
| 403 | /* peer's ref count was already incremented by |
| 404 | * peer_find_hash_find |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 405 | */ |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 406 | if (!pdev->peer_id_to_obj_map[peer_id].peer) { |
| 407 | pdev->peer_id_to_obj_map[peer_id].peer = peer; |
| 408 | qdf_atomic_init |
| 409 | (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt); |
| 410 | } |
| 411 | qdf_atomic_inc |
| 412 | (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt); |
| 413 | |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 414 | status = 1; |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 415 | |
| 416 | /* find a place in peer_id array and insert peer_id */ |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 417 | for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { |
| 418 | if (peer->peer_ids[i] == HTT_INVALID_PEER) { |
| 419 | peer->peer_ids[i] = peer_id; |
| 420 | status = 0; |
Mohit Khanna | 47384bc | 2016-08-15 15:37:05 -0700 | [diff] [blame] | 421 | break; |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 422 | } |
| 423 | } |
| 424 | |
Prakash Dhavali | 0d3f1d6 | 2016-11-20 23:48:24 -0800 | [diff] [blame] | 425 | if (qdf_atomic_read(&peer->fw_create_pending) == 1) { |
Prakash Dhavali | 0d3f1d6 | 2016-11-20 23:48:24 -0800 | [diff] [blame] | 426 | qdf_atomic_set(&peer->fw_create_pending, 0); |
Prakash Dhavali | 0d3f1d6 | 2016-11-20 23:48:24 -0800 | [diff] [blame] | 427 | } |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 428 | |
Mohit Khanna | 47384bc | 2016-08-15 15:37:05 -0700 | [diff] [blame] | 429 | qdf_spin_unlock(&pdev->peer_map_unmap_lock); |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 430 | |
Mohit Khanna | b04dfcd | 2017-02-13 18:54:35 -0800 | [diff] [blame] | 431 | peer_id_ref_cnt = qdf_atomic_read(&pdev-> |
| 432 | peer_id_to_obj_map[peer_id].peer_id_ref_cnt); |
| 433 | peer_ref_cnt = qdf_atomic_read(&peer->ref_cnt); |
| 434 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 435 | "%s: peer %pK ID %d peer_id[%d] peer_id_ref_cnt %d peer->ref_cnt %d", |
Mohit Khanna | b04dfcd | 2017-02-13 18:54:35 -0800 | [diff] [blame] | 436 | __func__, peer, peer_id, i, peer_id_ref_cnt, peer_ref_cnt); |
Deepak Dhamdhere | f918d42 | 2017-07-06 12:56:29 -0700 | [diff] [blame] | 437 | wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID, |
| 438 | DEBUG_PEER_MAP_EVENT, |
| 439 | peer_id, &peer->mac_addr.raw, peer, |
| 440 | peer_id_ref_cnt, |
| 441 | peer_ref_cnt); |
| 442 | |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 443 | |
| 444 | if (status) { |
| 445 | /* TBDXXX: assert for now */ |
| 446 | qdf_assert(0); |
| 447 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 448 | } |
| 449 | |
| 450 | /*=== allocation / deallocation function definitions ========================*/ |
| 451 | |
| 452 | int ol_txrx_peer_find_attach(struct ol_txrx_pdev_t *pdev) |
| 453 | { |
| 454 | if (ol_txrx_peer_find_map_attach(pdev)) |
| 455 | return 1; |
| 456 | if (ol_txrx_peer_find_hash_attach(pdev)) { |
| 457 | ol_txrx_peer_find_map_detach(pdev); |
| 458 | return 1; |
| 459 | } |
| 460 | return 0; /* success */ |
| 461 | } |
| 462 | |
| 463 | void ol_txrx_peer_find_detach(struct ol_txrx_pdev_t *pdev) |
| 464 | { |
| 465 | ol_txrx_peer_find_map_detach(pdev); |
| 466 | ol_txrx_peer_find_hash_detach(pdev); |
| 467 | } |
| 468 | |
| 469 | /*=== function definitions for message handling =============================*/ |
| 470 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 471 | #if defined(CONFIG_HL_SUPPORT) |
| 472 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 473 | void |
| 474 | ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev, |
| 475 | uint16_t peer_id, |
| 476 | uint8_t vdev_id, uint8_t *peer_mac_addr, int tx_ready) |
| 477 | { |
| 478 | ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 479 | if (!tx_ready) { |
| 480 | struct ol_txrx_peer_t *peer; |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 481 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 482 | peer = ol_txrx_peer_find_by_id(pdev, peer_id); |
| 483 | if (!peer) { |
| 484 | /* ol_txrx_peer_detach called before peer map arrived*/ |
| 485 | return; |
| 486 | } else { |
| 487 | if (tx_ready) { |
| 488 | int i; |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 489 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 490 | /* unpause all tx queues now, since the |
| 491 | * target is ready |
| 492 | */ |
| 493 | for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); |
| 494 | i++) |
| 495 | ol_txrx_peer_tid_unpause(peer, i); |
| 496 | |
| 497 | } else { |
| 498 | /* walk through paused mgmt queue, |
| 499 | * update tx descriptors |
| 500 | */ |
| 501 | ol_tx_queue_decs_reinit(peer, peer_id); |
| 502 | |
| 503 | /* keep non-mgmt tx queues paused until assoc |
| 504 | * is finished tx queues were paused in |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 505 | * ol_txrx_peer_attach |
| 506 | */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 507 | /* unpause tx mgmt queue */ |
| 508 | ol_txrx_peer_tid_unpause(peer, |
| 509 | HTT_TX_EXT_TID_MGMT); |
| 510 | } |
| 511 | } |
| 512 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 513 | } |
| 514 | |
| 515 | void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id) |
| 516 | { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 517 | struct ol_txrx_peer_t *peer; |
Srinivas Girigowda | 4d65ebe | 2017-10-13 21:41:42 -0700 | [diff] [blame] | 518 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 519 | peer = ol_txrx_peer_find_by_id(pdev, peer_id); |
| 520 | if (peer) { |
| 521 | int i; |
| 522 | /* |
| 523 | * Unpause all data tx queues now that the target is ready. |
| 524 | * The mgmt tx queue was not paused, so skip it. |
| 525 | */ |
| 526 | for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) { |
| 527 | if (i == HTT_TX_EXT_TID_MGMT) |
| 528 | continue; /* mgmt tx queue was not paused */ |
| 529 | |
| 530 | ol_txrx_peer_tid_unpause(peer, i); |
| 531 | } |
| 532 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 533 | } |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 534 | #else |
| 535 | |
| 536 | void |
| 537 | ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev, |
| 538 | uint16_t peer_id, |
| 539 | uint8_t vdev_id, |
| 540 | uint8_t *peer_mac_addr, |
| 541 | int tx_ready) |
| 542 | { |
| 543 | ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 544 | } |
| 545 | |
| 546 | void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id) |
| 547 | { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 548 | } |
| 549 | |
| 550 | #endif |
| 551 | |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 552 | /* |
| 553 | * ol_rx_peer_unmap_handler() - Handle peer unmap event from firmware |
| 554 | * |
| 555 | * @pdev: Handle to pdev pbject |
| 556 | * @peer_id: peer_id unmapped by firmware |
| 557 | * |
| 558 | * Decrement reference count for the peer_id in peer_id_to_obj_map, |
| 559 | * decrement reference count in corresponding peer object and clear the entry |
| 560 | * in peer's peer_ids array. |
| 561 | * In case of unmap events for a peer that is already deleted, just decrement |
| 562 | * del_peer_id_ref_cnt. |
| 563 | * |
| 564 | * Return: None |
| 565 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 566 | void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id) |
| 567 | { |
| 568 | struct ol_txrx_peer_t *peer; |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 569 | int i = 0; |
Poddar, Siddarth | 3e766e0 | 2017-01-04 16:51:54 +0530 | [diff] [blame] | 570 | int32_t ref_cnt; |
Nirav Shah | f099e5e | 2016-04-30 15:38:31 +0530 | [diff] [blame] | 571 | |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 572 | if (peer_id == HTT_INVALID_PEER) { |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 573 | ol_txrx_err( |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 574 | "invalid peer ID %d\n", peer_id); |
Deepak Dhamdhere | f918d42 | 2017-07-06 12:56:29 -0700 | [diff] [blame] | 575 | wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID, |
| 576 | DEBUG_PEER_UNMAP_EVENT, |
| 577 | peer_id, NULL, NULL, 0, 0x100); |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 578 | return; |
Nirav Shah | f099e5e | 2016-04-30 15:38:31 +0530 | [diff] [blame] | 579 | } |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 580 | |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 581 | qdf_spin_lock_bh(&pdev->peer_map_unmap_lock); |
Mohit Khanna | 47384bc | 2016-08-15 15:37:05 -0700 | [diff] [blame] | 582 | |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 583 | if (qdf_atomic_read( |
| 584 | &pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt)) { |
| 585 | /* This peer_id belongs to a peer already deleted */ |
| 586 | qdf_atomic_dec(&pdev->peer_id_to_obj_map[peer_id]. |
| 587 | del_peer_id_ref_cnt); |
Poddar, Siddarth | 3e766e0 | 2017-01-04 16:51:54 +0530 | [diff] [blame] | 588 | ref_cnt = qdf_atomic_read(&pdev->peer_id_to_obj_map[peer_id]. |
| 589 | del_peer_id_ref_cnt); |
Himanshu Agarwal | 1525bb9 | 2016-11-30 19:07:46 +0530 | [diff] [blame] | 590 | qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock); |
Deepak Dhamdhere | f918d42 | 2017-07-06 12:56:29 -0700 | [diff] [blame] | 591 | wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID, |
| 592 | DEBUG_PEER_UNMAP_EVENT, |
| 593 | peer_id, NULL, NULL, ref_cnt, 0x101); |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 594 | ol_txrx_dbg("peer already deleted, peer_id %d del_peer_id_ref_cnt %d", |
| 595 | peer_id, ref_cnt); |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 596 | return; |
| 597 | } |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 598 | peer = pdev->peer_id_to_obj_map[peer_id].peer; |
| 599 | |
| 600 | if (peer == NULL) { |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 601 | /* |
| 602 | * Currently peer IDs are assigned for vdevs as well as peers. |
| 603 | * If the peer ID is for a vdev, then the peer pointer stored |
| 604 | * in peer_id_to_obj_map will be NULL. |
| 605 | */ |
Himanshu Agarwal | 1525bb9 | 2016-11-30 19:07:46 +0530 | [diff] [blame] | 606 | qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock); |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 607 | ol_txrx_info("peer not found for peer_id %d", peer_id); |
Deepak Dhamdhere | f918d42 | 2017-07-06 12:56:29 -0700 | [diff] [blame] | 608 | wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID, |
| 609 | DEBUG_PEER_UNMAP_EVENT, |
| 610 | peer_id, NULL, NULL, 0, 0x102); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 611 | return; |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 612 | } |
| 613 | |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 614 | if (qdf_atomic_dec_and_test |
| 615 | (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) { |
| 616 | pdev->peer_id_to_obj_map[peer_id].peer = NULL; |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 617 | for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { |
| 618 | if (peer->peer_ids[i] == peer_id) { |
| 619 | peer->peer_ids[i] = HTT_INVALID_PEER; |
| 620 | break; |
| 621 | } |
| 622 | } |
| 623 | } |
Poddar, Siddarth | 3e766e0 | 2017-01-04 16:51:54 +0530 | [diff] [blame] | 624 | |
| 625 | ref_cnt = qdf_atomic_read |
| 626 | (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt); |
| 627 | |
| 628 | qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock); |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 629 | |
Deepak Dhamdhere | f918d42 | 2017-07-06 12:56:29 -0700 | [diff] [blame] | 630 | wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID, |
| 631 | DEBUG_PEER_UNMAP_EVENT, |
| 632 | peer_id, &peer->mac_addr.raw, peer, ref_cnt, |
| 633 | qdf_atomic_read(&peer->ref_cnt)); |
| 634 | |
Mohit Khanna | 37ffb29 | 2016-08-08 16:20:01 -0700 | [diff] [blame] | 635 | /* |
| 636 | * Remove a reference to the peer. |
| 637 | * If there are no more references, delete the peer object. |
| 638 | */ |
Manjunathappa Prakash | 1253c3d | 2018-08-22 15:52:14 -0700 | [diff] [blame] | 639 | ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP); |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 640 | |
Varun Reddy Yeturu | dd51e8d | 2017-05-14 14:51:13 -0700 | [diff] [blame] | 641 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 642 | "%s: peer_id %d peer %pK peer_id_ref_cnt %d", |
Mohit Khanna | babadb8 | 2017-02-21 18:54:19 -0800 | [diff] [blame] | 643 | __func__, peer_id, peer, ref_cnt); |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 644 | } |
| 645 | |
| 646 | /** |
| 647 | * ol_txrx_peer_remove_obj_map_entries() - Remove matching pdev peer map entries |
| 648 | * @pdev: pdev handle |
Deepak Dhamdhere | d40f4b1 | 2017-03-24 11:07:45 -0700 | [diff] [blame] | 649 | * @peer: peer for removing obj map entries |
| 650 | * |
| 651 | * Saves peer_id_ref_cnt to a different field and removes the link |
| 652 | * to peer object. It also decrements the peer reference count by |
| 653 | * the number of references removed. |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 654 | * |
| 655 | * Return: None |
| 656 | */ |
| 657 | void ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev, |
| 658 | struct ol_txrx_peer_t *peer) |
| 659 | { |
| 660 | int i; |
| 661 | uint16_t peer_id; |
| 662 | int32_t peer_id_ref_cnt; |
Deepak Dhamdhere | d40f4b1 | 2017-03-24 11:07:45 -0700 | [diff] [blame] | 663 | int32_t num_deleted_maps = 0; |
Deepak Dhamdhere | 66bb63d | 2017-10-27 01:48:01 -0700 | [diff] [blame] | 664 | uint16_t save_peer_ids[MAX_NUM_PEER_ID_PER_PEER]; |
Krunal Soni | 3cf1f1c | 2018-02-26 14:36:30 -0800 | [diff] [blame] | 665 | uint16_t save_peer_id_ref_cnt[MAX_NUM_PEER_ID_PER_PEER] = {0}; |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 666 | |
Deepak Dhamdhere | d40f4b1 | 2017-03-24 11:07:45 -0700 | [diff] [blame] | 667 | qdf_spin_lock_bh(&pdev->peer_map_unmap_lock); |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 668 | for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { |
| 669 | peer_id = peer->peer_ids[i]; |
Deepak Dhamdhere | 66bb63d | 2017-10-27 01:48:01 -0700 | [diff] [blame] | 670 | save_peer_ids[i] = HTT_INVALID_PEER; |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 671 | if (peer_id == HTT_INVALID_PEER || |
| 672 | pdev->peer_id_to_obj_map[peer_id].peer == NULL) { |
| 673 | /* unused peer_id, or object is already dereferenced */ |
| 674 | continue; |
| 675 | } |
| 676 | if (pdev->peer_id_to_obj_map[peer_id].peer != peer) { |
| 677 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 678 | QDF_TRACE_LEVEL_ERROR, |
| 679 | FL("peer pointer mismatch in peer_id_to_obj")); |
| 680 | continue; |
| 681 | } |
| 682 | peer_id_ref_cnt = qdf_atomic_read( |
| 683 | &pdev->peer_id_to_obj_map[peer_id]. |
| 684 | peer_id_ref_cnt); |
Deepak Dhamdhere | 66bb63d | 2017-10-27 01:48:01 -0700 | [diff] [blame] | 685 | save_peer_ids[i] = peer_id; |
| 686 | save_peer_id_ref_cnt[i] = peer_id_ref_cnt; |
| 687 | |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 688 | /* |
| 689 | * Transfer peer_id_ref_cnt into del_peer_id_ref_cnt so that |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 690 | * ol_txrx_peer_release_ref will decrement del_peer_id_ref_cnt |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 691 | * and any map events will increment peer_id_ref_cnt. Otherwise |
| 692 | * accounting will be messed up. |
| 693 | * |
| 694 | * Add operation will ensure that back to back roaming in the |
| 695 | * middle of unmap/map event sequence will be accounted for. |
| 696 | */ |
| 697 | qdf_atomic_add(peer_id_ref_cnt, |
| 698 | &pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt); |
| 699 | qdf_atomic_init(&pdev->peer_id_to_obj_map[peer_id]. |
| 700 | peer_id_ref_cnt); |
Deepak Dhamdhere | d40f4b1 | 2017-03-24 11:07:45 -0700 | [diff] [blame] | 701 | num_deleted_maps += peer_id_ref_cnt; |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 702 | pdev->peer_id_to_obj_map[peer_id].peer = NULL; |
| 703 | peer->peer_ids[i] = HTT_INVALID_PEER; |
| 704 | } |
Deepak Dhamdhere | d40f4b1 | 2017-03-24 11:07:45 -0700 | [diff] [blame] | 705 | qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock); |
| 706 | |
Deepak Dhamdhere | 66bb63d | 2017-10-27 01:48:01 -0700 | [diff] [blame] | 707 | /* Debug print the information after releasing bh spinlock */ |
| 708 | for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { |
| 709 | if (save_peer_ids[i] == HTT_INVALID_PEER) |
| 710 | continue; |
| 711 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, |
| 712 | FL("peer_id = %d, peer_id_ref_cnt = %d, index = %d"), |
| 713 | save_peer_ids[i], save_peer_id_ref_cnt[i], i); |
| 714 | } |
| 715 | |
Padma, Santhosh Kumar | 4cdbf7d | 2018-04-30 18:18:06 +0530 | [diff] [blame] | 716 | if (num_deleted_maps > qdf_atomic_read(&peer->ref_cnt)) { |
| 717 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
| 718 | FL("num_deleted_maps %d ref_cnt %d"), |
| 719 | num_deleted_maps, qdf_atomic_read(&peer->ref_cnt)); |
| 720 | QDF_BUG(0); |
| 721 | return; |
| 722 | } |
| 723 | |
Deepak Dhamdhere | d40f4b1 | 2017-03-24 11:07:45 -0700 | [diff] [blame] | 724 | while (num_deleted_maps-- > 0) |
Jianmin Zhu | 58b641e | 2018-09-10 16:57:23 +0800 | [diff] [blame] | 725 | ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 726 | } |
| 727 | |
| 728 | struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev) |
| 729 | { |
| 730 | struct ol_txrx_peer_t *peer; |
| 731 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 732 | qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 733 | /* |
| 734 | * Check the TXRX Peer is itself valid And also |
| 735 | * if HTT Peer ID has been setup for this peer |
| 736 | */ |
| 737 | if (vdev->last_real_peer |
| 738 | && vdev->last_real_peer->peer_ids[0] != HTT_INVALID_PEER_ID) { |
Frank Liu | 4362e46 | 2018-01-16 11:51:55 +0800 | [diff] [blame] | 739 | qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex); |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 740 | ol_txrx_peer_get_ref(vdev->last_real_peer, |
| 741 | PEER_DEBUG_ID_OL_INTERNAL); |
Frank Liu | 4362e46 | 2018-01-16 11:51:55 +0800 | [diff] [blame] | 742 | qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 743 | peer = vdev->last_real_peer; |
| 744 | } else { |
| 745 | peer = NULL; |
| 746 | } |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 747 | qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 748 | return peer; |
| 749 | } |
| 750 | |
Deepak Dhamdhere | b0d2dda | 2017-04-03 01:01:50 -0700 | [diff] [blame] | 751 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 752 | /*=== function definitions for debug ========================================*/ |
| 753 | |
| 754 | #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5 |
| 755 | void ol_txrx_peer_find_display(ol_txrx_pdev_handle pdev, int indent) |
| 756 | { |
| 757 | int i, max_peers; |
| 758 | |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 759 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 760 | "%*speer map:\n", indent, " "); |
| 761 | max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1; |
| 762 | for (i = 0; i < max_peers; i++) { |
Nirav Shah | f099e5e | 2016-04-30 15:38:31 +0530 | [diff] [blame] | 763 | if (pdev->peer_id_to_obj_map[i].peer) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 764 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 765 | "%*sid %d -> %pK\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 766 | indent + 4, " ", i, |
Nirav Shah | f099e5e | 2016-04-30 15:38:31 +0530 | [diff] [blame] | 767 | pdev->peer_id_to_obj_map[i].peer); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 768 | } |
| 769 | } |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 770 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 771 | "%*speer hash table:\n", indent, " "); |
| 772 | for (i = 0; i <= pdev->peer_hash.mask; i++) { |
| 773 | if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) { |
| 774 | struct ol_txrx_peer_t *peer; |
Yun Park | 512f3a1 | 2017-04-08 10:13:04 -0700 | [diff] [blame] | 775 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 776 | TAILQ_FOREACH(peer, &pdev->peer_hash.bins[i], |
| 777 | hash_list_elem) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 778 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 779 | QDF_TRACE_LEVEL_INFO_LOW, |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 780 | "%*shash idx %d -> %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 781 | indent + 4, " ", i, peer, |
| 782 | peer->mac_addr.raw[0], |
| 783 | peer->mac_addr.raw[1], |
| 784 | peer->mac_addr.raw[2], |
| 785 | peer->mac_addr.raw[3], |
| 786 | peer->mac_addr.raw[4], |
| 787 | peer->mac_addr.raw[5]); |
| 788 | } |
| 789 | } |
| 790 | } |
| 791 | } |
Deepak Dhamdhere | f74d6f8 | 2016-09-16 02:47:01 -0700 | [diff] [blame] | 792 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 793 | #endif /* if TXRX_DEBUG_LEVEL */ |