blob: 4b8abb1fa5c7c474cdd60a2b34235f5021d508c8 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Dustin Brown763f3962018-01-04 14:05:42 -08002 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== includes ===*/
20/* header files for OS primitives */
21#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc, etc. */
Anurag Chouhan6d760662016-02-20 16:05:43 +053023#include <qdf_types.h> /* qdf_device_t, qdf_print */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080024/* header files for utilities */
25#include <cds_queue.h> /* TAILQ */
26
27/* header files for configuration API */
28#include <ol_cfg.h> /* ol_cfg_max_peer_id */
29
30/* header files for our internal definitions */
31#include <ol_txrx_api.h> /* ol_txrx_pdev_t, etc. */
32#include <ol_txrx_dbg.h> /* TXRX_DEBUG_LEVEL */
33#include <ol_txrx_internal.h> /* ol_txrx_pdev_t, etc. */
Mohit Khannab7bec722017-11-10 11:43:44 -080034#include <ol_txrx.h> /* ol_txrx_peer_release_ref */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
36#include <ol_tx_queue.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070037#include "wlan_roam_debug.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038
39/*=== misc. / utility function definitions ==================================*/
40
Yun Park512f3a12017-04-08 10:13:04 -070041static int ol_txrx_log2_ceil(unsigned int value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080042{
43 /* need to switch to unsigned math so that negative values
44 * will right-shift towards 0 instead of -1
45 */
Yun Park512f3a12017-04-08 10:13:04 -070046 unsigned int tmp = value;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047 int log2 = -1;
48
49 if (value == 0) {
50 TXRX_ASSERT2(0);
51 return 0;
52 }
53
54 while (tmp) {
55 log2++;
56 tmp >>= 1;
57 }
58 if (1U << log2 != value)
59 log2++;
60
61 return log2;
62}
63
Mohit Khannab7bec722017-11-10 11:43:44 -080064int ol_txrx_peer_get_ref(struct ol_txrx_peer_t *peer,
65 enum peer_debug_id_type dbg_id)
Mohit Khannab04dfcd2017-02-13 18:54:35 -080066{
Mohit Khannab7bec722017-11-10 11:43:44 -080067 int refs_dbg_id;
68
69 if (!peer) {
70 ol_txrx_err("peer is null for ID %d", dbg_id);
71 return -EINVAL;
72 }
73
74 if (dbg_id >= PEER_DEBUG_ID_MAX || dbg_id < 0) {
75 ol_txrx_err("incorrect debug_id %d ", dbg_id);
76 return -EINVAL;
77 }
78
79 qdf_atomic_inc(&peer->ref_cnt);
80 qdf_atomic_inc(&peer->access_list[dbg_id]);
81 refs_dbg_id = qdf_atomic_read(&peer->access_list[dbg_id]);
Jingxiang Ge3badb982018-01-02 17:39:01 +080082
Mohit Khannab7bec722017-11-10 11:43:44 -080083 return refs_dbg_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -080084}
85
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080086/*=== function definitions for peer MAC addr --> peer object hash table =====*/
87
88/*
89 * TXRX_PEER_HASH_LOAD_FACTOR:
90 * Multiply by 2 and divide by 2^0 (shift by 0), then round up to a
91 * power of two.
92 * This provides at least twice as many bins in the peer hash table
93 * as there will be entries.
94 * Having substantially more bins than spaces minimizes the probability of
95 * having to compare MAC addresses.
96 * Because the MAC address comparison is fairly efficient, it is okay if the
97 * hash table is sparsely loaded, but it's generally better to use extra mem
98 * to keep the table sparse, to keep the lookups as fast as possible.
99 * An optimization would be to apply a more conservative loading factor for
100 * high latency, where the lookup happens during the tx classification of
101 * every tx frame, than for low-latency, where the lookup only happens
102 * during association, when the PEER_MAP message is received.
103 */
104#define TXRX_PEER_HASH_LOAD_MULT 2
105#define TXRX_PEER_HASH_LOAD_SHIFT 0
106
107static int ol_txrx_peer_find_hash_attach(struct ol_txrx_pdev_t *pdev)
108{
109 int i, hash_elems, log2;
110
111 /* allocate the peer MAC address -> peer object hash table */
112 hash_elems = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
113 hash_elems *= TXRX_PEER_HASH_LOAD_MULT;
114 hash_elems >>= TXRX_PEER_HASH_LOAD_SHIFT;
115 log2 = ol_txrx_log2_ceil(hash_elems);
116 hash_elems = 1 << log2;
117
118 pdev->peer_hash.mask = hash_elems - 1;
119 pdev->peer_hash.idx_bits = log2;
120 /* allocate an array of TAILQ peer object lists */
121 pdev->peer_hash.bins =
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530122 qdf_mem_malloc(hash_elems *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800123 sizeof(TAILQ_HEAD(anonymous_tail_q,
124 ol_txrx_peer_t)));
125 if (!pdev->peer_hash.bins)
126 return 1; /* failure */
127
128 for (i = 0; i < hash_elems; i++)
129 TAILQ_INIT(&pdev->peer_hash.bins[i]);
130
131 return 0; /* success */
132}
133
134static void ol_txrx_peer_find_hash_detach(struct ol_txrx_pdev_t *pdev)
135{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530136 qdf_mem_free(pdev->peer_hash.bins);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800137}
138
Krunal Soni9e54d982018-08-20 17:29:51 -0700139static inline unsigned int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800140ol_txrx_peer_find_hash_index(struct ol_txrx_pdev_t *pdev,
141 union ol_txrx_align_mac_addr_t *mac_addr)
142{
Yun Park512f3a12017-04-08 10:13:04 -0700143 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800144
145 index =
146 mac_addr->align2.bytes_ab ^
147 mac_addr->align2.bytes_cd ^ mac_addr->align2.bytes_ef;
148 index ^= index >> pdev->peer_hash.idx_bits;
149 index &= pdev->peer_hash.mask;
150 return index;
151}
152
153void
154ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t *pdev,
155 struct ol_txrx_peer_t *peer)
156{
Yun Park512f3a12017-04-08 10:13:04 -0700157 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800158
159 index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530160 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800161 /*
162 * It is important to add the new peer at the tail of the peer list
163 * with the bin index. Together with having the hash_find function
164 * search from head to tail, this ensures that if two entries with
165 * the same MAC address are stored, the one added first will be
166 * found first.
167 */
168 TAILQ_INSERT_TAIL(&pdev->peer_hash.bins[index], peer, hash_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530169 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170}
171
172struct ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev,
173 struct ol_txrx_vdev_t *vdev,
174 uint8_t *peer_mac_addr,
175 int mac_addr_is_aligned,
176 uint8_t check_valid)
177{
178 union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
Yun Park512f3a12017-04-08 10:13:04 -0700179 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800180 struct ol_txrx_peer_t *peer;
181
182 if (mac_addr_is_aligned) {
183 mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
184 } else {
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530185 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800186 peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
187 mac_addr = &local_mac_addr_aligned;
188 }
189 index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530190 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191 TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
192 if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
193 0 && (check_valid == 0 || peer->valid)
194 && peer->vdev == vdev) {
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800195 /* found it */
Mohit Khannab7bec722017-11-10 11:43:44 -0800196 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530197 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198 return peer;
199 }
200 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530201 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800202 return NULL; /* failure */
203}
204
Mohit Khannababadb82017-02-21 18:54:19 -0800205struct ol_txrx_peer_t *
Mohit Khannab7bec722017-11-10 11:43:44 -0800206 ol_txrx_peer_find_hash_find_get_ref
Mohit Khannababadb82017-02-21 18:54:19 -0800207 (struct ol_txrx_pdev_t *pdev,
208 uint8_t *peer_mac_addr,
209 int mac_addr_is_aligned,
Mohit Khannab7bec722017-11-10 11:43:44 -0800210 u8 check_valid,
211 enum peer_debug_id_type dbg_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800212{
213 union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
Yun Park512f3a12017-04-08 10:13:04 -0700214 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800215 struct ol_txrx_peer_t *peer;
216
217 if (mac_addr_is_aligned) {
218 mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
219 } else {
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530220 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800221 peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
222 mac_addr = &local_mac_addr_aligned;
223 }
224 index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530225 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226 TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
227 if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
228 0 && (check_valid == 0 || peer->valid)) {
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800229 /* found it */
Mohit Khannab7bec722017-11-10 11:43:44 -0800230 ol_txrx_peer_get_ref(peer, dbg_id);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530231 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232 return peer;
233 }
234 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530235 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800236 return NULL; /* failure */
237}
238
239void
240ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t *pdev,
241 struct ol_txrx_peer_t *peer)
242{
Yun Park512f3a12017-04-08 10:13:04 -0700243 unsigned int index;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800244
245 index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
246 /*
247 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
248 * by the caller.
249 * The caller needs to hold the lock from the time the peer object's
250 * reference count is decremented and tested up through the time the
251 * reference to the peer object is removed from the hash table, by
252 * this function.
253 * Holding the lock only while removing the peer object reference
254 * from the hash table keeps the hash table consistent, but does not
255 * protect against a new HL tx context starting to use the peer object
256 * if it looks up the peer object from its MAC address just after the
257 * peer ref count is decremented to zero, but just before the peer
258 * object reference is removed from the hash table.
259 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530260 /* qdf_spin_lock_bh(&pdev->peer_ref_mutex); */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800261 TAILQ_REMOVE(&pdev->peer_hash.bins[index], peer, hash_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530262 /* qdf_spin_unlock_bh(&pdev->peer_ref_mutex); */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800263}
264
265void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev)
266{
Yun Park512f3a12017-04-08 10:13:04 -0700267 unsigned int i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800268 /*
269 * Not really necessary to take peer_ref_mutex lock - by this point,
270 * it's known that the pdev is no longer in use.
271 */
272
273 for (i = 0; i <= pdev->peer_hash.mask; i++) {
274 if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
275 struct ol_txrx_peer_t *peer, *peer_next;
276
277 /*
278 * TAILQ_FOREACH_SAFE must be used here to avoid any
279 * memory access violation after peer is freed
280 */
281 TAILQ_FOREACH_SAFE(peer, &pdev->peer_hash.bins[i],
282 hash_list_elem, peer_next) {
283 /*
284 * Don't remove the peer from the hash table -
285 * that would modify the list we are currently
286 * traversing,
287 * and it's not necessary anyway.
288 */
289 /*
290 * Artificially adjust the peer's ref count to
291 * 1, so it will get deleted by
Mohit Khannab7bec722017-11-10 11:43:44 -0800292 * ol_txrx_peer_release_ref.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800293 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530294 qdf_atomic_init(&peer->ref_cnt); /* set to 0 */
Dustin Brown763f3962018-01-04 14:05:42 -0800295 ol_txrx_peer_get_ref(peer,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -0700296 PEER_DEBUG_ID_OL_HASH_ERS);
Dustin Brown763f3962018-01-04 14:05:42 -0800297 ol_txrx_peer_release_ref(peer,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -0700298 PEER_DEBUG_ID_OL_HASH_ERS);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800299 }
300 }
301 }
302}
303
304/*=== function definitions for peer id --> peer object map ==================*/
305
306static int ol_txrx_peer_find_map_attach(struct ol_txrx_pdev_t *pdev)
307{
308 int max_peers, peer_map_size;
309
310 /* allocate the peer ID -> peer object map */
311 max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
312 peer_map_size = max_peers * sizeof(pdev->peer_id_to_obj_map[0]);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530313 pdev->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800314 if (!pdev->peer_id_to_obj_map)
315 return 1; /* failure */
316
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800317 return 0; /* success */
318}
319
320static void ol_txrx_peer_find_map_detach(struct ol_txrx_pdev_t *pdev)
321{
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530322 qdf_mem_free(pdev->peer_id_to_obj_map);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800323}
324
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -0700325/**
326 * ol_txrx_peer_clear_map_peer() - Remove map entries that refer to a peer.
327 * @pdev: pdev handle
328 * @peer: peer for removing obj map entries
329 *
330 * Run through the entire peer_id_to_obj map and nullify all the entries
331 * that map to a particular peer. Called before deleting the peer object.
332 *
333 * Return: None
334 */
335void ol_txrx_peer_clear_map_peer(ol_txrx_pdev_handle pdev,
336 struct ol_txrx_peer_t *peer)
337{
338 int max_peers;
339 int i;
340
341 max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
342
343 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
344 for (i = 0; i < max_peers; i++) {
345 if (pdev->peer_id_to_obj_map[i].peer == peer) {
346 /* Found a map entry for this peer, clear it. */
347 pdev->peer_id_to_obj_map[i].peer = NULL;
348 }
349 }
350 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
351}
352
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700353/*
354 * ol_txrx_peer_find_add_id() - Add peer_id entry to peer
355 *
356 * @pdev: Handle to pdev object
357 * @peer_mac_addr: MAC address of peer provided by firmware
358 * @peer_id: peer_id provided by firmware
359 *
360 * Search for peer object for the MAC address, add the peer_id to
361 * its array of peer_id's and update the peer_id_to_obj map entry
362 * for that peer_id. Increment corresponding reference counts.
363 *
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -0800364 * Riva/Pronto has one peer id for each peer.
365 * Peregrine/Rome has two peer id for each peer.
366 * iHelium has upto three peer id for each peer.
367 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700368 * Return: None
369 */
370static inline void ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t *pdev,
371 uint8_t *peer_mac_addr, uint16_t peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800372{
373 struct ol_txrx_peer_t *peer;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700374 int status;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700375 int i;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800376 uint32_t peer_id_ref_cnt;
377 uint32_t peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800378
379 /* check if there's already a peer object with this MAC address */
380 peer =
Mohit Khannab7bec722017-11-10 11:43:44 -0800381 ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac_addr,
382 1 /* is aligned */, 0,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -0700383 PEER_DEBUG_ID_OL_PEER_MAP);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700384
385 if (!peer || peer_id == HTT_INVALID_PEER) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 /*
Mohit Khanna37ffb292016-08-08 16:20:01 -0700387 * Currently peer IDs are assigned for vdevs as well as peers.
388 * If the peer ID is for a vdev, then we will fail to find a
389 * peer with a matching MAC address.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800390 */
Nirav Shah7c8c1712018-09-10 16:01:31 +0530391 ol_txrx_err("peer not found or peer ID is %d invalid",
392 peer_id);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700393 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
394 DEBUG_PEER_MAP_EVENT,
395 peer_id, peer_mac_addr,
396 peer, 0, 0);
397
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398 return;
399 }
Mohit Khanna37ffb292016-08-08 16:20:01 -0700400
Mohit Khanna37ffb292016-08-08 16:20:01 -0700401 qdf_spin_lock(&pdev->peer_map_unmap_lock);
402
403 /* peer's ref count was already incremented by
404 * peer_find_hash_find
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405 */
Mohit Khanna37ffb292016-08-08 16:20:01 -0700406 if (!pdev->peer_id_to_obj_map[peer_id].peer) {
407 pdev->peer_id_to_obj_map[peer_id].peer = peer;
408 qdf_atomic_init
409 (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
410 }
411 qdf_atomic_inc
412 (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
413
Mohit Khanna37ffb292016-08-08 16:20:01 -0700414 status = 1;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700415
416 /* find a place in peer_id array and insert peer_id */
Mohit Khanna37ffb292016-08-08 16:20:01 -0700417 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
418 if (peer->peer_ids[i] == HTT_INVALID_PEER) {
419 peer->peer_ids[i] = peer_id;
420 status = 0;
Mohit Khanna47384bc2016-08-15 15:37:05 -0700421 break;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700422 }
423 }
424
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -0800425 if (qdf_atomic_read(&peer->fw_create_pending) == 1) {
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -0800426 qdf_atomic_set(&peer->fw_create_pending, 0);
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -0800427 }
Mohit Khanna37ffb292016-08-08 16:20:01 -0700428
Mohit Khanna47384bc2016-08-15 15:37:05 -0700429 qdf_spin_unlock(&pdev->peer_map_unmap_lock);
Mohit Khanna37ffb292016-08-08 16:20:01 -0700430
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800431 peer_id_ref_cnt = qdf_atomic_read(&pdev->
432 peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
433 peer_ref_cnt = qdf_atomic_read(&peer->ref_cnt);
434 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700435 "%s: peer %pK ID %d peer_id[%d] peer_id_ref_cnt %d peer->ref_cnt %d",
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800436 __func__, peer, peer_id, i, peer_id_ref_cnt, peer_ref_cnt);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700437 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
438 DEBUG_PEER_MAP_EVENT,
439 peer_id, &peer->mac_addr.raw, peer,
440 peer_id_ref_cnt,
441 peer_ref_cnt);
442
Mohit Khanna37ffb292016-08-08 16:20:01 -0700443
444 if (status) {
445 /* TBDXXX: assert for now */
446 qdf_assert(0);
447 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448}
449
450/*=== allocation / deallocation function definitions ========================*/
451
452int ol_txrx_peer_find_attach(struct ol_txrx_pdev_t *pdev)
453{
454 if (ol_txrx_peer_find_map_attach(pdev))
455 return 1;
456 if (ol_txrx_peer_find_hash_attach(pdev)) {
457 ol_txrx_peer_find_map_detach(pdev);
458 return 1;
459 }
460 return 0; /* success */
461}
462
463void ol_txrx_peer_find_detach(struct ol_txrx_pdev_t *pdev)
464{
465 ol_txrx_peer_find_map_detach(pdev);
466 ol_txrx_peer_find_hash_detach(pdev);
467}
468
469/*=== function definitions for message handling =============================*/
470
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530471#if defined(CONFIG_HL_SUPPORT)
472
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800473void
474ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
475 uint16_t peer_id,
476 uint8_t vdev_id, uint8_t *peer_mac_addr, int tx_ready)
477{
478 ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530479 if (!tx_ready) {
480 struct ol_txrx_peer_t *peer;
Yun Park512f3a12017-04-08 10:13:04 -0700481
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530482 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
483 if (!peer) {
484 /* ol_txrx_peer_detach called before peer map arrived*/
485 return;
486 } else {
487 if (tx_ready) {
488 int i;
Yun Park512f3a12017-04-08 10:13:04 -0700489
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530490 /* unpause all tx queues now, since the
491 * target is ready
492 */
493 for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs);
494 i++)
495 ol_txrx_peer_tid_unpause(peer, i);
496
497 } else {
498 /* walk through paused mgmt queue,
499 * update tx descriptors
500 */
501 ol_tx_queue_decs_reinit(peer, peer_id);
502
503 /* keep non-mgmt tx queues paused until assoc
504 * is finished tx queues were paused in
Yun Park512f3a12017-04-08 10:13:04 -0700505 * ol_txrx_peer_attach
506 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530507 /* unpause tx mgmt queue */
508 ol_txrx_peer_tid_unpause(peer,
509 HTT_TX_EXT_TID_MGMT);
510 }
511 }
512 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800513}
514
515void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
516{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530517 struct ol_txrx_peer_t *peer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700518
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530519 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
520 if (peer) {
521 int i;
522 /*
523 * Unpause all data tx queues now that the target is ready.
524 * The mgmt tx queue was not paused, so skip it.
525 */
526 for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
527 if (i == HTT_TX_EXT_TID_MGMT)
528 continue; /* mgmt tx queue was not paused */
529
530 ol_txrx_peer_tid_unpause(peer, i);
531 }
532 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530534#else
535
536void
537ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
538 uint16_t peer_id,
539 uint8_t vdev_id,
540 uint8_t *peer_mac_addr,
541 int tx_ready)
542{
543 ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530544}
545
546void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
547{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530548}
549
550#endif
551
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700552/*
553 * ol_rx_peer_unmap_handler() - Handle peer unmap event from firmware
554 *
555 * @pdev: Handle to pdev pbject
556 * @peer_id: peer_id unmapped by firmware
557 *
558 * Decrement reference count for the peer_id in peer_id_to_obj_map,
559 * decrement reference count in corresponding peer object and clear the entry
560 * in peer's peer_ids array.
561 * In case of unmap events for a peer that is already deleted, just decrement
562 * del_peer_id_ref_cnt.
563 *
564 * Return: None
565 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800566void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
567{
568 struct ol_txrx_peer_t *peer;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700569 int i = 0;
Poddar, Siddarth3e766e02017-01-04 16:51:54 +0530570 int32_t ref_cnt;
Nirav Shahf099e5e2016-04-30 15:38:31 +0530571
Mohit Khanna37ffb292016-08-08 16:20:01 -0700572 if (peer_id == HTT_INVALID_PEER) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530573 ol_txrx_err(
Nirav Shah7c8c1712018-09-10 16:01:31 +0530574 "invalid peer ID %d\n", peer_id);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700575 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
576 DEBUG_PEER_UNMAP_EVENT,
577 peer_id, NULL, NULL, 0, 0x100);
Mohit Khanna37ffb292016-08-08 16:20:01 -0700578 return;
Nirav Shahf099e5e2016-04-30 15:38:31 +0530579 }
Mohit Khanna37ffb292016-08-08 16:20:01 -0700580
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700581 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700582
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700583 if (qdf_atomic_read(
584 &pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt)) {
585 /* This peer_id belongs to a peer already deleted */
586 qdf_atomic_dec(&pdev->peer_id_to_obj_map[peer_id].
587 del_peer_id_ref_cnt);
Poddar, Siddarth3e766e02017-01-04 16:51:54 +0530588 ref_cnt = qdf_atomic_read(&pdev->peer_id_to_obj_map[peer_id].
589 del_peer_id_ref_cnt);
Himanshu Agarwal1525bb92016-11-30 19:07:46 +0530590 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700591 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
592 DEBUG_PEER_UNMAP_EVENT,
593 peer_id, NULL, NULL, ref_cnt, 0x101);
Nirav Shah7c8c1712018-09-10 16:01:31 +0530594 ol_txrx_dbg("peer already deleted, peer_id %d del_peer_id_ref_cnt %d",
595 peer_id, ref_cnt);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700596 return;
597 }
Mohit Khanna37ffb292016-08-08 16:20:01 -0700598 peer = pdev->peer_id_to_obj_map[peer_id].peer;
599
600 if (peer == NULL) {
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700601 /*
602 * Currently peer IDs are assigned for vdevs as well as peers.
603 * If the peer ID is for a vdev, then the peer pointer stored
604 * in peer_id_to_obj_map will be NULL.
605 */
Himanshu Agarwal1525bb92016-11-30 19:07:46 +0530606 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
Nirav Shah7c8c1712018-09-10 16:01:31 +0530607 ol_txrx_info("peer not found for peer_id %d", peer_id);
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700608 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
609 DEBUG_PEER_UNMAP_EVENT,
610 peer_id, NULL, NULL, 0, 0x102);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800611 return;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700612 }
613
Mohit Khanna37ffb292016-08-08 16:20:01 -0700614 if (qdf_atomic_dec_and_test
615 (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) {
616 pdev->peer_id_to_obj_map[peer_id].peer = NULL;
Mohit Khanna37ffb292016-08-08 16:20:01 -0700617 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
618 if (peer->peer_ids[i] == peer_id) {
619 peer->peer_ids[i] = HTT_INVALID_PEER;
620 break;
621 }
622 }
623 }
Poddar, Siddarth3e766e02017-01-04 16:51:54 +0530624
625 ref_cnt = qdf_atomic_read
626 (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
627
628 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
Mohit Khanna37ffb292016-08-08 16:20:01 -0700629
Deepak Dhamdheref918d422017-07-06 12:56:29 -0700630 wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
631 DEBUG_PEER_UNMAP_EVENT,
632 peer_id, &peer->mac_addr.raw, peer, ref_cnt,
633 qdf_atomic_read(&peer->ref_cnt));
634
Mohit Khanna37ffb292016-08-08 16:20:01 -0700635 /*
636 * Remove a reference to the peer.
637 * If there are no more references, delete the peer object.
638 */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -0700639 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700640
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -0700641 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700642 "%s: peer_id %d peer %pK peer_id_ref_cnt %d",
Mohit Khannababadb82017-02-21 18:54:19 -0800643 __func__, peer_id, peer, ref_cnt);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700644}
645
646/**
647 * ol_txrx_peer_remove_obj_map_entries() - Remove matching pdev peer map entries
648 * @pdev: pdev handle
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700649 * @peer: peer for removing obj map entries
650 *
651 * Saves peer_id_ref_cnt to a different field and removes the link
652 * to peer object. It also decrements the peer reference count by
653 * the number of references removed.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700654 *
655 * Return: None
656 */
657void ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev,
658 struct ol_txrx_peer_t *peer)
659{
660 int i;
661 uint16_t peer_id;
662 int32_t peer_id_ref_cnt;
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700663 int32_t num_deleted_maps = 0;
Deepak Dhamdhere66bb63d2017-10-27 01:48:01 -0700664 uint16_t save_peer_ids[MAX_NUM_PEER_ID_PER_PEER];
Krunal Soni3cf1f1c2018-02-26 14:36:30 -0800665 uint16_t save_peer_id_ref_cnt[MAX_NUM_PEER_ID_PER_PEER] = {0};
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700666
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700667 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700668 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
669 peer_id = peer->peer_ids[i];
Deepak Dhamdhere66bb63d2017-10-27 01:48:01 -0700670 save_peer_ids[i] = HTT_INVALID_PEER;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700671 if (peer_id == HTT_INVALID_PEER ||
672 pdev->peer_id_to_obj_map[peer_id].peer == NULL) {
673 /* unused peer_id, or object is already dereferenced */
674 continue;
675 }
676 if (pdev->peer_id_to_obj_map[peer_id].peer != peer) {
677 QDF_TRACE(QDF_MODULE_ID_TXRX,
678 QDF_TRACE_LEVEL_ERROR,
679 FL("peer pointer mismatch in peer_id_to_obj"));
680 continue;
681 }
682 peer_id_ref_cnt = qdf_atomic_read(
683 &pdev->peer_id_to_obj_map[peer_id].
684 peer_id_ref_cnt);
Deepak Dhamdhere66bb63d2017-10-27 01:48:01 -0700685 save_peer_ids[i] = peer_id;
686 save_peer_id_ref_cnt[i] = peer_id_ref_cnt;
687
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700688 /*
689 * Transfer peer_id_ref_cnt into del_peer_id_ref_cnt so that
Mohit Khannab7bec722017-11-10 11:43:44 -0800690 * ol_txrx_peer_release_ref will decrement del_peer_id_ref_cnt
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700691 * and any map events will increment peer_id_ref_cnt. Otherwise
692 * accounting will be messed up.
693 *
694 * Add operation will ensure that back to back roaming in the
695 * middle of unmap/map event sequence will be accounted for.
696 */
697 qdf_atomic_add(peer_id_ref_cnt,
698 &pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt);
699 qdf_atomic_init(&pdev->peer_id_to_obj_map[peer_id].
700 peer_id_ref_cnt);
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700701 num_deleted_maps += peer_id_ref_cnt;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700702 pdev->peer_id_to_obj_map[peer_id].peer = NULL;
703 peer->peer_ids[i] = HTT_INVALID_PEER;
704 }
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700705 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
706
Deepak Dhamdhere66bb63d2017-10-27 01:48:01 -0700707 /* Debug print the information after releasing bh spinlock */
708 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
709 if (save_peer_ids[i] == HTT_INVALID_PEER)
710 continue;
711 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
712 FL("peer_id = %d, peer_id_ref_cnt = %d, index = %d"),
713 save_peer_ids[i], save_peer_id_ref_cnt[i], i);
714 }
715
Padma, Santhosh Kumar4cdbf7d2018-04-30 18:18:06 +0530716 if (num_deleted_maps > qdf_atomic_read(&peer->ref_cnt)) {
717 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
718 FL("num_deleted_maps %d ref_cnt %d"),
719 num_deleted_maps, qdf_atomic_read(&peer->ref_cnt));
720 QDF_BUG(0);
721 return;
722 }
723
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -0700724 while (num_deleted_maps-- > 0)
Jianmin Zhu58b641e2018-09-10 16:57:23 +0800725 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800726}
727
728struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev)
729{
730 struct ol_txrx_peer_t *peer;
731
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530732 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733 /*
734 * Check the TXRX Peer is itself valid And also
735 * if HTT Peer ID has been setup for this peer
736 */
737 if (vdev->last_real_peer
738 && vdev->last_real_peer->peer_ids[0] != HTT_INVALID_PEER_ID) {
Frank Liu4362e462018-01-16 11:51:55 +0800739 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Mohit Khannab7bec722017-11-10 11:43:44 -0800740 ol_txrx_peer_get_ref(vdev->last_real_peer,
741 PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +0800742 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800743 peer = vdev->last_real_peer;
744 } else {
745 peer = NULL;
746 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530747 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800748 return peer;
749}
750
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -0700751
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800752/*=== function definitions for debug ========================================*/
753
754#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
755void ol_txrx_peer_find_display(ol_txrx_pdev_handle pdev, int indent)
756{
757 int i, max_peers;
758
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530759 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800760 "%*speer map:\n", indent, " ");
761 max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
762 for (i = 0; i < max_peers; i++) {
Nirav Shahf099e5e2016-04-30 15:38:31 +0530763 if (pdev->peer_id_to_obj_map[i].peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530764 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700765 "%*sid %d -> %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800766 indent + 4, " ", i,
Nirav Shahf099e5e2016-04-30 15:38:31 +0530767 pdev->peer_id_to_obj_map[i].peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800768 }
769 }
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530770 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800771 "%*speer hash table:\n", indent, " ");
772 for (i = 0; i <= pdev->peer_hash.mask; i++) {
773 if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
774 struct ol_txrx_peer_t *peer;
Yun Park512f3a12017-04-08 10:13:04 -0700775
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800776 TAILQ_FOREACH(peer, &pdev->peer_hash.bins[i],
777 hash_list_elem) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530778 QDF_TRACE(QDF_MODULE_ID_TXRX,
779 QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700780 "%*shash idx %d -> %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800781 indent + 4, " ", i, peer,
782 peer->mac_addr.raw[0],
783 peer->mac_addr.raw[1],
784 peer->mac_addr.raw[2],
785 peer->mac_addr.raw[3],
786 peer->mac_addr.raw[4],
787 peer->mac_addr.raw[5]);
788 }
789 }
790 }
791}
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700792
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800793#endif /* if TXRX_DEBUG_LEVEL */