blob: f6c661715f6853f79a3389fce13e2ec1191771f0 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Sravan Kumar Kairam5c656342019-01-18 15:00:33 +05302 * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== includes ===*/
20/* header files for OS primitives */
21#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc, etc. */
Anurag Chouhan6d760662016-02-20 16:05:43 +053023#include <qdf_types.h> /* qdf_device_t, qdf_print */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080024/* header files for utilities */
Ashish Kumar Dhanotiya94ffbd12019-08-08 18:00:59 +053025#include "queue.h" /* TAILQ */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080026
27/* header files for configuration API */
28#include <ol_cfg.h> /* ol_cfg_max_peer_id */
29
30/* header files for our internal definitions */
31#include <ol_txrx_api.h> /* ol_txrx_pdev_t, etc. */
32#include <ol_txrx_dbg.h> /* TXRX_DEBUG_LEVEL */
33#include <ol_txrx_internal.h> /* ol_txrx_pdev_t, etc. */
Mohit Khannab7bec722017-11-10 11:43:44 -080034#include <ol_txrx.h> /* ol_txrx_peer_release_ref */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
36#include <ol_tx_queue.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070037#include "wlan_roam_debug.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038
39/*=== misc. / utility function definitions ==================================*/
40
Yun Park512f3a12017-04-08 10:13:04 -070041static int ol_txrx_log2_ceil(unsigned int value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080042{
43 /* need to switch to unsigned math so that negative values
44 * will right-shift towards 0 instead of -1
45 */
Yun Park512f3a12017-04-08 10:13:04 -070046 unsigned int tmp = value;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047 int log2 = -1;
48
49 if (value == 0) {
50 TXRX_ASSERT2(0);
51 return 0;
52 }
53
54 while (tmp) {
55 log2++;
56 tmp >>= 1;
57 }
58 if (1U << log2 != value)
59 log2++;
60
61 return log2;
62}
63
Mohit Khannab7bec722017-11-10 11:43:44 -080064int ol_txrx_peer_get_ref(struct ol_txrx_peer_t *peer,
65 enum peer_debug_id_type dbg_id)
Mohit Khannab04dfcd2017-02-13 18:54:35 -080066{
Mohit Khannab7bec722017-11-10 11:43:44 -080067 int refs_dbg_id;
68
69 if (!peer) {
70 ol_txrx_err("peer is null for ID %d", dbg_id);
71 return -EINVAL;
72 }
73
74 if (dbg_id >= PEER_DEBUG_ID_MAX || dbg_id < 0) {
75 ol_txrx_err("incorrect debug_id %d ", dbg_id);
76 return -EINVAL;
77 }
78
79 qdf_atomic_inc(&peer->ref_cnt);
80 qdf_atomic_inc(&peer->access_list[dbg_id]);
81 refs_dbg_id = qdf_atomic_read(&peer->access_list[dbg_id]);
Jingxiang Ge3badb982018-01-02 17:39:01 +080082
Mohit Khannab7bec722017-11-10 11:43:44 -080083 return refs_dbg_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -080084}
85
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080086/*=== function definitions for peer MAC addr --> peer object hash table =====*/
87
88/*
89 * TXRX_PEER_HASH_LOAD_FACTOR:
90 * Multiply by 2 and divide by 2^0 (shift by 0), then round up to a
91 * power of two.
92 * This provides at least twice as many bins in the peer hash table
93 * as there will be entries.
94 * Having substantially more bins than spaces minimizes the probability of
95 * having to compare MAC addresses.
96 * Because the MAC address comparison is fairly efficient, it is okay if the
97 * hash table is sparsely loaded, but it's generally better to use extra mem
98 * to keep the table sparse, to keep the lookups as fast as possible.
99 * An optimization would be to apply a more conservative loading factor for
100 * high latency, where the lookup happens during the tx classification of
101 * every tx frame, than for low-latency, where the lookup only happens
102 * during association, when the PEER_MAP message is received.
103 */
104#define TXRX_PEER_HASH_LOAD_MULT 2
105#define TXRX_PEER_HASH_LOAD_SHIFT 0
106
107static int ol_txrx_peer_find_hash_attach(struct ol_txrx_pdev_t *pdev)
108{
109 int i, hash_elems, log2;
110
111 /* allocate the peer MAC address -> peer object hash table */
112 hash_elems = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
113 hash_elems *= TXRX_PEER_HASH_LOAD_MULT;
114 hash_elems >>= TXRX_PEER_HASH_LOAD_SHIFT;
115 log2 = ol_txrx_log2_ceil(hash_elems);
116 hash_elems = 1 << log2;
117
118 pdev->peer_hash.mask = hash_elems - 1;
119 pdev->peer_hash.idx_bits = log2;
120 /* allocate an array of TAILQ peer object lists */
121 pdev->peer_hash.bins =
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530122 qdf_mem_malloc(hash_elems *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800123 sizeof(TAILQ_HEAD(anonymous_tail_q,
124 ol_txrx_peer_t)));
125 if (!pdev->peer_hash.bins)
126 return 1; /* failure */
127
128 for (i = 0; i < hash_elems; i++)
129 TAILQ_INIT(&pdev->peer_hash.bins[i]);
130
131 return 0; /* success */
132}
133
134static void ol_txrx_peer_find_hash_detach(struct ol_txrx_pdev_t *pdev)
135{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530136 qdf_mem_free(pdev->peer_hash.bins);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800137}
138
Krunal Soni9e54d982018-08-20 17:29:51 -0700139static inline unsigned int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800140ol_txrx_peer_find_hash_index(struct ol_txrx_pdev_t *pdev,
141 union ol_txrx_align_mac_addr_t *mac_addr)
142{
Yun Park512f3a12017-04-08 10:13:04 -0700143 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800144
145 index =
146 mac_addr->align2.bytes_ab ^
147 mac_addr->align2.bytes_cd ^ mac_addr->align2.bytes_ef;
148 index ^= index >> pdev->peer_hash.idx_bits;
149 index &= pdev->peer_hash.mask;
150 return index;
151}
152
153void
154ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t *pdev,
155 struct ol_txrx_peer_t *peer)
156{
Yun Park512f3a12017-04-08 10:13:04 -0700157 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800158
159 index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530160 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800161 /*
162 * It is important to add the new peer at the tail of the peer list
163 * with the bin index. Together with having the hash_find function
164 * search from head to tail, this ensures that if two entries with
165 * the same MAC address are stored, the one added first will be
166 * found first.
167 */
168 TAILQ_INSERT_TAIL(&pdev->peer_hash.bins[index], peer, hash_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530169 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170}
171
172struct ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev,
173 struct ol_txrx_vdev_t *vdev,
174 uint8_t *peer_mac_addr,
175 int mac_addr_is_aligned,
176 uint8_t check_valid)
177{
178 union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
Yun Park512f3a12017-04-08 10:13:04 -0700179 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800180 struct ol_txrx_peer_t *peer;
181
182 if (mac_addr_is_aligned) {
183 mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
184 } else {
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530185 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -0800186 peer_mac_addr, QDF_MAC_ADDR_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 mac_addr = &local_mac_addr_aligned;
188 }
189 index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530190 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191 TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
192 if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
193 0 && (check_valid == 0 || peer->valid)
194 && peer->vdev == vdev) {
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800195 /* found it */
Mohit Khannab7bec722017-11-10 11:43:44 -0800196 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530197 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198 return peer;
199 }
200 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530201 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800202 return NULL; /* failure */
203}
204
Mohit Khannababadb82017-02-21 18:54:19 -0800205struct ol_txrx_peer_t *
Mohit Khannab7bec722017-11-10 11:43:44 -0800206 ol_txrx_peer_find_hash_find_get_ref
Mohit Khannababadb82017-02-21 18:54:19 -0800207 (struct ol_txrx_pdev_t *pdev,
208 uint8_t *peer_mac_addr,
209 int mac_addr_is_aligned,
Mohit Khannab7bec722017-11-10 11:43:44 -0800210 u8 check_valid,
211 enum peer_debug_id_type dbg_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800212{
213 union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
Yun Park512f3a12017-04-08 10:13:04 -0700214 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800215 struct ol_txrx_peer_t *peer;
216
217 if (mac_addr_is_aligned) {
218 mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
219 } else {
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530220 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -0800221 peer_mac_addr, QDF_MAC_ADDR_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800222 mac_addr = &local_mac_addr_aligned;
223 }
224 index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530225 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226 TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
227 if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
228 0 && (check_valid == 0 || peer->valid)) {
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800229 /* found it */
Mohit Khannab7bec722017-11-10 11:43:44 -0800230 ol_txrx_peer_get_ref(peer, dbg_id);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530231 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232 return peer;
233 }
234 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530235 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800236 return NULL; /* failure */
237}
238
239void
240ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t *pdev,
241 struct ol_txrx_peer_t *peer)
242{
Yun Park512f3a12017-04-08 10:13:04 -0700243 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800244
245 index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
246 /*
247 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
248 * by the caller.
249 * The caller needs to hold the lock from the time the peer object's
250 * reference count is decremented and tested up through the time the
251 * reference to the peer object is removed from the hash table, by
252 * this function.
253 * Holding the lock only while removing the peer object reference
254 * from the hash table keeps the hash table consistent, but does not
255 * protect against a new HL tx context starting to use the peer object
256 * if it looks up the peer object from its MAC address just after the
257 * peer ref count is decremented to zero, but just before the peer
258 * object reference is removed from the hash table.
259 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530260 /* qdf_spin_lock_bh(&pdev->peer_ref_mutex); */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800261 TAILQ_REMOVE(&pdev->peer_hash.bins[index], peer, hash_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530262 /* qdf_spin_unlock_bh(&pdev->peer_ref_mutex); */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800263}
264
265void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev)
266{
Yun Park512f3a12017-04-08 10:13:04 -0700267 unsigned int i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800268 /*
269 * Not really necessary to take peer_ref_mutex lock - by this point,
270 * it's known that the pdev is no longer in use.
271 */
272
273 for (i = 0; i <= pdev->peer_hash.mask; i++) {
274 if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
275 struct ol_txrx_peer_t *peer, *peer_next;
276
277 /*
278 * TAILQ_FOREACH_SAFE must be used here to avoid any
279 * memory access violation after peer is freed
280 */
281 TAILQ_FOREACH_SAFE(peer, &pdev->peer_hash.bins[i],
282 hash_list_elem, peer_next) {
283 /*
284 * Don't remove the peer from the hash table -
285 * that would modify the list we are currently
286 * traversing,
287 * and it's not necessary anyway.
288 */
289 /*
290 * Artificially adjust the peer's ref count to
291 * 1, so it will get deleted by
Mohit Khannab7bec722017-11-10 11:43:44 -0800292 * ol_txrx_peer_release_ref.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800293 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530294 qdf_atomic_init(&peer->ref_cnt); /* set to 0 */
Dustin Brown763f3962018-01-04 14:05:42 -0800295 ol_txrx_peer_get_ref(peer,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -0700296 PEER_DEBUG_ID_OL_HASH_ERS);
Dustin Brown763f3962018-01-04 14:05:42 -0800297 ol_txrx_peer_release_ref(peer,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -0700298 PEER_DEBUG_ID_OL_HASH_ERS);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800299 }
300 }
301 }
302}
303
304/*=== function definitions for peer id --> peer object map ==================*/
305
306static int ol_txrx_peer_find_map_attach(struct ol_txrx_pdev_t *pdev)
307{
308 int max_peers, peer_map_size;
309
310 /* allocate the peer ID -> peer object map */
311 max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
312 peer_map_size = max_peers * sizeof(pdev->peer_id_to_obj_map[0]);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530313 pdev->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800314 if (!pdev->peer_id_to_obj_map)
315 return 1; /* failure */
316
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800317 return 0; /* success */
318}
319
320static void ol_txrx_peer_find_map_detach(struct ol_txrx_pdev_t *pdev)
321{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530322 qdf_mem_free(pdev->peer_id_to_obj_map);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800323}
324
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -0700325/**
326 * ol_txrx_peer_clear_map_peer() - Remove map entries that refer to a peer.
327 * @pdev: pdev handle
328 * @peer: peer for removing obj map entries
329 *
330 * Run through the entire peer_id_to_obj map and nullify all the entries
331 * that map to a particular peer. Called before deleting the peer object.
332 *
333 * Return: None
334 */
335void ol_txrx_peer_clear_map_peer(ol_txrx_pdev_handle pdev,
336 struct ol_txrx_peer_t *peer)
337{
338 int max_peers;
339 int i;
340
341 max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
342
343 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
344 for (i = 0; i < max_peers; i++) {
345 if (pdev->peer_id_to_obj_map[i].peer == peer) {
346 /* Found a map entry for this peer, clear it. */
347 pdev->peer_id_to_obj_map[i].peer = NULL;
348 }
349 }
350 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
351}
352
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700353/*
354 * ol_txrx_peer_find_add_id() - Add peer_id entry to peer
355 *
356 * @pdev: Handle to pdev object
357 * @peer_mac_addr: MAC address of peer provided by firmware
358 * @peer_id: peer_id provided by firmware
359 *
360 * Search for peer object for the MAC address, add the peer_id to
361 * its array of peer_id's and update the peer_id_to_obj map entry
362 * for that peer_id. Increment corresponding reference counts.
363 *
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -0800364 * Riva/Pronto has one peer id for each peer.
365 * Peregrine/Rome has two peer id for each peer.
366 * iHelium has upto three peer id for each peer.
367 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700368 * Return: None
369 */
370static inline void ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t *pdev,
371 uint8_t *peer_mac_addr, uint16_t peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800372{
373 struct ol_txrx_peer_t *peer;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700374 int status;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700375 int i;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800376 uint32_t peer_id_ref_cnt;
377 uint32_t peer_ref_cnt;
Alok Kumare1977442018-11-28 17:16:03 +0530378 u8 check_valid = 0;
379
380 if (pdev->enable_peer_unmap_conf_support)
381 check_valid = 1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800382
383 /* check if there's already a peer object with this MAC address */
384 peer =
Mohit Khannab7bec722017-11-10 11:43:44 -0800385 ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac_addr,
Alok Kumare1977442018-11-28 17:16:03 +0530386 1 /* is aligned */,
387 check_valid,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -0700388 PEER_DEBUG_ID_OL_PEER_MAP);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700389
390 if (!peer || peer_id == HTT_INVALID_PEER) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800391 /*
Mohit Khanna37ffb292016-08-08 16:20:01 -0700392 * Currently peer IDs are assigned for vdevs as well as peers.
393 * If the peer ID is for a vdev, then we will fail to find a
394 * peer with a matching MAC address.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395 */
Nirav Shah7c8c1712018-09-10 16:01:31 +0530396 ol_txrx_err("peer not found or peer ID is %d invalid",
397 peer_id);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700398 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
399 DEBUG_PEER_MAP_EVENT,
400 peer_id, peer_mac_addr,
401 peer, 0, 0);
402
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800403 return;
404 }
Mohit Khanna37ffb292016-08-08 16:20:01 -0700405
Mohit Khanna37ffb292016-08-08 16:20:01 -0700406 qdf_spin_lock(&pdev->peer_map_unmap_lock);
407
408 /* peer's ref count was already incremented by
409 * peer_find_hash_find
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800410 */
Mohit Khanna37ffb292016-08-08 16:20:01 -0700411 if (!pdev->peer_id_to_obj_map[peer_id].peer) {
412 pdev->peer_id_to_obj_map[peer_id].peer = peer;
413 qdf_atomic_init
414 (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
415 }
416 qdf_atomic_inc
417 (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
418
Mohit Khanna37ffb292016-08-08 16:20:01 -0700419 status = 1;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700420
421 /* find a place in peer_id array and insert peer_id */
Mohit Khanna37ffb292016-08-08 16:20:01 -0700422 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
423 if (peer->peer_ids[i] == HTT_INVALID_PEER) {
424 peer->peer_ids[i] = peer_id;
425 status = 0;
Mohit Khanna47384bc2016-08-15 15:37:05 -0700426 break;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700427 }
428 }
429
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -0800430 if (qdf_atomic_read(&peer->fw_create_pending) == 1) {
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -0800431 qdf_atomic_set(&peer->fw_create_pending, 0);
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -0800432 }
Mohit Khanna37ffb292016-08-08 16:20:01 -0700433
Mohit Khanna47384bc2016-08-15 15:37:05 -0700434 qdf_spin_unlock(&pdev->peer_map_unmap_lock);
Mohit Khanna37ffb292016-08-08 16:20:01 -0700435
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800436 peer_id_ref_cnt = qdf_atomic_read(&pdev->
437 peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
438 peer_ref_cnt = qdf_atomic_read(&peer->ref_cnt);
439 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700440 "%s: peer %pK ID %d peer_id[%d] peer_id_ref_cnt %d peer->ref_cnt %d",
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800441 __func__, peer, peer_id, i, peer_id_ref_cnt, peer_ref_cnt);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700442 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
443 DEBUG_PEER_MAP_EVENT,
444 peer_id, &peer->mac_addr.raw, peer,
445 peer_id_ref_cnt,
446 peer_ref_cnt);
447
Mohit Khanna37ffb292016-08-08 16:20:01 -0700448
449 if (status) {
450 /* TBDXXX: assert for now */
451 qdf_assert(0);
452 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800453}
454
455/*=== allocation / deallocation function definitions ========================*/
456
457int ol_txrx_peer_find_attach(struct ol_txrx_pdev_t *pdev)
458{
459 if (ol_txrx_peer_find_map_attach(pdev))
460 return 1;
461 if (ol_txrx_peer_find_hash_attach(pdev)) {
462 ol_txrx_peer_find_map_detach(pdev);
463 return 1;
464 }
465 return 0; /* success */
466}
467
468void ol_txrx_peer_find_detach(struct ol_txrx_pdev_t *pdev)
469{
470 ol_txrx_peer_find_map_detach(pdev);
471 ol_txrx_peer_find_hash_detach(pdev);
472}
473
Alok Kumar604b0332019-01-24 17:49:25 +0530474/**
475 * ol_txrx_peer_unmap_conf_handler() - send peer unmap conf cmd to FW
476 * @pdev: pdev_handle
477 * @peer_id: peer_id
478 *
479 * Return: None
480 */
481static inline void
482ol_txrx_peer_unmap_conf_handler(ol_txrx_pdev_handle pdev,
483 uint16_t peer_id)
484{
485 QDF_STATUS status = QDF_STATUS_E_FAILURE;
486
487 if (peer_id == HTT_INVALID_PEER) {
488 ol_txrx_err(
489 "invalid peer ID %d\n", peer_id);
490 return;
491 }
492
493 qdf_atomic_inc(&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt);
494
495 if (qdf_atomic_read(
496 &pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt) ==
497 pdev->peer_id_unmap_ref_cnt) {
498 ol_txrx_dbg("send unmap conf cmd: peer_id[%d] unmap_cnt[%d]",
499 peer_id, pdev->peer_id_unmap_ref_cnt);
500 status = pdev->peer_unmap_sync_cb(
501 DEBUG_INVALID_VDEV_ID,
502 1, &peer_id);
503
Rakshith Suresh Patkar5f9efa32019-02-04 13:26:56 +0530504 if (status == QDF_STATUS_SUCCESS ||
505 status == QDF_STATUS_E_BUSY) {
506 qdf_atomic_init(
507 &pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt);
508 } else {
509 qdf_atomic_set(
510 &pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt,
511 OL_TXRX_INVALID_PEER_UNMAP_COUNT);
Alok Kumar604b0332019-01-24 17:49:25 +0530512 ol_txrx_err("unable to send unmap conf cmd [%d]",
513 peer_id);
Rakshith Suresh Patkar5f9efa32019-02-04 13:26:56 +0530514 }
Alok Kumar604b0332019-01-24 17:49:25 +0530515
Alok Kumar604b0332019-01-24 17:49:25 +0530516 }
517}
518
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800519/*=== function definitions for message handling =============================*/
520
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530521#if defined(CONFIG_HL_SUPPORT)
522
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800523void
524ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
525 uint16_t peer_id,
526 uint8_t vdev_id, uint8_t *peer_mac_addr, int tx_ready)
527{
528 ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530529 if (!tx_ready) {
530 struct ol_txrx_peer_t *peer;
Yun Park512f3a12017-04-08 10:13:04 -0700531
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530532 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
533 if (!peer) {
534 /* ol_txrx_peer_detach called before peer map arrived*/
535 return;
536 } else {
537 if (tx_ready) {
538 int i;
Yun Park512f3a12017-04-08 10:13:04 -0700539
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530540 /* unpause all tx queues now, since the
541 * target is ready
542 */
543 for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs);
544 i++)
545 ol_txrx_peer_tid_unpause(peer, i);
546
547 } else {
548 /* walk through paused mgmt queue,
549 * update tx descriptors
550 */
551 ol_tx_queue_decs_reinit(peer, peer_id);
552
553 /* keep non-mgmt tx queues paused until assoc
554 * is finished tx queues were paused in
Yun Park512f3a12017-04-08 10:13:04 -0700555 * ol_txrx_peer_attach
556 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530557 /* unpause tx mgmt queue */
558 ol_txrx_peer_tid_unpause(peer,
559 HTT_TX_EXT_TID_MGMT);
560 }
561 }
562 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800563}
564
565void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
566{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530567 struct ol_txrx_peer_t *peer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700568
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530569 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
570 if (peer) {
571 int i;
572 /*
573 * Unpause all data tx queues now that the target is ready.
574 * The mgmt tx queue was not paused, so skip it.
575 */
576 for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
577 if (i == HTT_TX_EXT_TID_MGMT)
578 continue; /* mgmt tx queue was not paused */
579
580 ol_txrx_peer_tid_unpause(peer, i);
581 }
582 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800583}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530584#else
585
586void
587ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
588 uint16_t peer_id,
589 uint8_t vdev_id,
590 uint8_t *peer_mac_addr,
591 int tx_ready)
592{
593 ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530594}
595
596void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
597{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530598}
599
600#endif
601
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700602/*
603 * ol_rx_peer_unmap_handler() - Handle peer unmap event from firmware
604 *
605 * @pdev: Handle to pdev pbject
606 * @peer_id: peer_id unmapped by firmware
607 *
608 * Decrement reference count for the peer_id in peer_id_to_obj_map,
609 * decrement reference count in corresponding peer object and clear the entry
610 * in peer's peer_ids array.
611 * In case of unmap events for a peer that is already deleted, just decrement
612 * del_peer_id_ref_cnt.
613 *
614 * Return: None
615 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800616void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
617{
618 struct ol_txrx_peer_t *peer;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700619 int i = 0;
Poddar, Siddarth3e766e02017-01-04 16:51:54 +0530620 int32_t ref_cnt;
Nirav Shahf099e5e2016-04-30 15:38:31 +0530621
Mohit Khanna37ffb292016-08-08 16:20:01 -0700622 if (peer_id == HTT_INVALID_PEER) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530623 ol_txrx_err(
Nirav Shah7c8c1712018-09-10 16:01:31 +0530624 "invalid peer ID %d\n", peer_id);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700625 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
626 DEBUG_PEER_UNMAP_EVENT,
627 peer_id, NULL, NULL, 0, 0x100);
Mohit Khanna37ffb292016-08-08 16:20:01 -0700628 return;
Nirav Shahf099e5e2016-04-30 15:38:31 +0530629 }
Mohit Khanna37ffb292016-08-08 16:20:01 -0700630
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700631 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700632
Alok Kumar604b0332019-01-24 17:49:25 +0530633 /* send peer unmap conf cmd to fw for unmapped peer_ids */
634 if (pdev->enable_peer_unmap_conf_support &&
635 pdev->peer_unmap_sync_cb)
636 ol_txrx_peer_unmap_conf_handler(pdev, peer_id);
637
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700638 if (qdf_atomic_read(
639 &pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt)) {
640 /* This peer_id belongs to a peer already deleted */
641 qdf_atomic_dec(&pdev->peer_id_to_obj_map[peer_id].
642 del_peer_id_ref_cnt);
Poddar, Siddarth3e766e02017-01-04 16:51:54 +0530643 ref_cnt = qdf_atomic_read(&pdev->peer_id_to_obj_map[peer_id].
644 del_peer_id_ref_cnt);
Himanshu Agarwal1525bb92016-11-30 19:07:46 +0530645 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700646 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
647 DEBUG_PEER_UNMAP_EVENT,
648 peer_id, NULL, NULL, ref_cnt, 0x101);
Nirav Shah7c8c1712018-09-10 16:01:31 +0530649 ol_txrx_dbg("peer already deleted, peer_id %d del_peer_id_ref_cnt %d",
650 peer_id, ref_cnt);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700651 return;
652 }
Mohit Khanna37ffb292016-08-08 16:20:01 -0700653 peer = pdev->peer_id_to_obj_map[peer_id].peer;
654
Jeff Johnson6795c3a2019-03-18 13:43:04 -0700655 if (!peer) {
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700656 /*
657 * Currently peer IDs are assigned for vdevs as well as peers.
658 * If the peer ID is for a vdev, then the peer pointer stored
659 * in peer_id_to_obj_map will be NULL.
660 */
Himanshu Agarwal1525bb92016-11-30 19:07:46 +0530661 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
Nirav Shah7c8c1712018-09-10 16:01:31 +0530662 ol_txrx_info("peer not found for peer_id %d", peer_id);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700663 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
664 DEBUG_PEER_UNMAP_EVENT,
665 peer_id, NULL, NULL, 0, 0x102);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800666 return;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700667 }
668
Mohit Khanna37ffb292016-08-08 16:20:01 -0700669 if (qdf_atomic_dec_and_test
670 (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) {
671 pdev->peer_id_to_obj_map[peer_id].peer = NULL;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700672 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
673 if (peer->peer_ids[i] == peer_id) {
674 peer->peer_ids[i] = HTT_INVALID_PEER;
675 break;
676 }
677 }
678 }
Poddar, Siddarth3e766e02017-01-04 16:51:54 +0530679
680 ref_cnt = qdf_atomic_read
681 (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
682
683 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
Mohit Khanna37ffb292016-08-08 16:20:01 -0700684
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700685 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
686 DEBUG_PEER_UNMAP_EVENT,
687 peer_id, &peer->mac_addr.raw, peer, ref_cnt,
688 qdf_atomic_read(&peer->ref_cnt));
689
Mohit Khanna37ffb292016-08-08 16:20:01 -0700690 /*
691 * Remove a reference to the peer.
692 * If there are no more references, delete the peer object.
693 */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -0700694 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700695
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -0700696 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700697 "%s: peer_id %d peer %pK peer_id_ref_cnt %d",
Mohit Khannababadb82017-02-21 18:54:19 -0800698 __func__, peer_id, peer, ref_cnt);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700699}
700
701/**
702 * ol_txrx_peer_remove_obj_map_entries() - Remove matching pdev peer map entries
703 * @pdev: pdev handle
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700704 * @peer: peer for removing obj map entries
705 *
706 * Saves peer_id_ref_cnt to a different field and removes the link
707 * to peer object. It also decrements the peer reference count by
708 * the number of references removed.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700709 *
710 * Return: None
711 */
712void ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev,
713 struct ol_txrx_peer_t *peer)
714{
715 int i;
716 uint16_t peer_id;
717 int32_t peer_id_ref_cnt;
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700718 int32_t num_deleted_maps = 0;
Deepak Dhamdhere66bb63d2017-10-27 01:48:01 -0700719 uint16_t save_peer_ids[MAX_NUM_PEER_ID_PER_PEER];
Krunal Soni3cf1f1c2018-02-26 14:36:30 -0800720 uint16_t save_peer_id_ref_cnt[MAX_NUM_PEER_ID_PER_PEER] = {0};
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700721
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700722 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700723 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
724 peer_id = peer->peer_ids[i];
Deepak Dhamdhere66bb63d2017-10-27 01:48:01 -0700725 save_peer_ids[i] = HTT_INVALID_PEER;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700726 if (peer_id == HTT_INVALID_PEER ||
Jeff Johnson6795c3a2019-03-18 13:43:04 -0700727 !pdev->peer_id_to_obj_map[peer_id].peer) {
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700728 /* unused peer_id, or object is already dereferenced */
729 continue;
730 }
731 if (pdev->peer_id_to_obj_map[peer_id].peer != peer) {
732 QDF_TRACE(QDF_MODULE_ID_TXRX,
733 QDF_TRACE_LEVEL_ERROR,
734 FL("peer pointer mismatch in peer_id_to_obj"));
735 continue;
736 }
737 peer_id_ref_cnt = qdf_atomic_read(
738 &pdev->peer_id_to_obj_map[peer_id].
739 peer_id_ref_cnt);
Deepak Dhamdhere66bb63d2017-10-27 01:48:01 -0700740 save_peer_ids[i] = peer_id;
741 save_peer_id_ref_cnt[i] = peer_id_ref_cnt;
742
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700743 /*
744 * Transfer peer_id_ref_cnt into del_peer_id_ref_cnt so that
Mohit Khannab7bec722017-11-10 11:43:44 -0800745 * ol_txrx_peer_release_ref will decrement del_peer_id_ref_cnt
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700746 * and any map events will increment peer_id_ref_cnt. Otherwise
747 * accounting will be messed up.
748 *
749 * Add operation will ensure that back to back roaming in the
750 * middle of unmap/map event sequence will be accounted for.
751 */
752 qdf_atomic_add(peer_id_ref_cnt,
753 &pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt);
754 qdf_atomic_init(&pdev->peer_id_to_obj_map[peer_id].
755 peer_id_ref_cnt);
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700756 num_deleted_maps += peer_id_ref_cnt;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700757 pdev->peer_id_to_obj_map[peer_id].peer = NULL;
758 peer->peer_ids[i] = HTT_INVALID_PEER;
759 }
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700760 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
761
Deepak Dhamdhere66bb63d2017-10-27 01:48:01 -0700762 /* Debug print the information after releasing bh spinlock */
763 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
764 if (save_peer_ids[i] == HTT_INVALID_PEER)
765 continue;
766 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
767 FL("peer_id = %d, peer_id_ref_cnt = %d, index = %d"),
768 save_peer_ids[i], save_peer_id_ref_cnt[i], i);
769 }
770
Padma, Santhosh Kumar4cdbf7d2018-04-30 18:18:06 +0530771 if (num_deleted_maps > qdf_atomic_read(&peer->ref_cnt)) {
772 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
773 FL("num_deleted_maps %d ref_cnt %d"),
774 num_deleted_maps, qdf_atomic_read(&peer->ref_cnt));
775 QDF_BUG(0);
776 return;
777 }
778
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700779 while (num_deleted_maps-- > 0)
Jianmin Zhu58b641e2018-09-10 16:57:23 +0800780 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800781}
782
783struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev)
784{
785 struct ol_txrx_peer_t *peer;
786
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530787 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800788 /*
789 * Check the TXRX Peer is itself valid And also
790 * if HTT Peer ID has been setup for this peer
791 */
792 if (vdev->last_real_peer
793 && vdev->last_real_peer->peer_ids[0] != HTT_INVALID_PEER_ID) {
Frank Liu4362e462018-01-16 11:51:55 +0800794 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Mohit Khannab7bec722017-11-10 11:43:44 -0800795 ol_txrx_peer_get_ref(vdev->last_real_peer,
796 PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +0800797 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800798 peer = vdev->last_real_peer;
799 } else {
800 peer = NULL;
801 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530802 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800803 return peer;
804}
805
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -0700806
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800807/*=== function definitions for debug ========================================*/
808
809#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
810void ol_txrx_peer_find_display(ol_txrx_pdev_handle pdev, int indent)
811{
812 int i, max_peers;
813
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530814 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800815 "%*speer map:\n", indent, " ");
816 max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
817 for (i = 0; i < max_peers; i++) {
Nirav Shahf099e5e2016-04-30 15:38:31 +0530818 if (pdev->peer_id_to_obj_map[i].peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530819 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700820 "%*sid %d -> %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800821 indent + 4, " ", i,
Nirav Shahf099e5e2016-04-30 15:38:31 +0530822 pdev->peer_id_to_obj_map[i].peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800823 }
824 }
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530825 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800826 "%*speer hash table:\n", indent, " ");
827 for (i = 0; i <= pdev->peer_hash.mask; i++) {
828 if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
829 struct ol_txrx_peer_t *peer;
Yun Park512f3a12017-04-08 10:13:04 -0700830
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800831 TAILQ_FOREACH(peer, &pdev->peer_hash.bins[i],
832 hash_list_elem) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530833 QDF_TRACE(QDF_MODULE_ID_TXRX,
834 QDF_TRACE_LEVEL_INFO_LOW,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -0700835 "%*shash idx %d -> %pK ("QDF_MAC_ADDR_STR")\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800836 indent + 4, " ", i, peer,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -0700837 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800838 }
839 }
840 }
841}
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700842
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800843#endif /* if TXRX_DEBUG_LEVEL */