blob: 8f5a650b522bd484fd64bf5f9d0ddeac0626c4b9 [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Rakesh Pillaiae0f6012020-01-02 11:03:09 +05302 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
Harilakshmi Deshkumar1ea21092017-05-08 21:16:27 +053016 * PERFORMANCE OF THIS SOFTWARE.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070017 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Balamurugan Mahalingamf72cb1f2018-06-25 12:18:34 +053021#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070022#include "dp_htt.h"
23#include "dp_types.h"
24#include "dp_internal.h"
Jeff Johnson2cb8fc72016-12-17 10:45:08 -080025#include "dp_peer.h"
Lin Baif1c577e2018-05-22 20:45:42 +080026#include "dp_rx_defrag.h"
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +053027#include "dp_rx.h"
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070028#include <hal_api.h>
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -080029#include <hal_reo.h>
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080030#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080031#include <wlan_cfg.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070032
nobeljdebe2b32019-04-23 11:18:47 -070033#ifdef WLAN_TX_PKT_CAPTURE_ENH
34#include "dp_tx_capture.h"
35#endif
36
Pramod Simhab17d0672017-03-06 17:20:13 -080037static inline void
38dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 uint8_t valid)
40{
41 params->u.upd_queue_params.update_svld = 1;
42 params->u.upd_queue_params.svld = valid;
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +053043 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
44 "%s: Setting SSN valid bit to %d",
45 __func__, valid);
Pramod Simhab17d0672017-03-06 17:20:13 -080046}
Pramod Simhab17d0672017-03-06 17:20:13 -080047
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070048static inline int dp_peer_find_mac_addr_cmp(
49 union dp_align_mac_addr *mac_addr1,
50 union dp_align_mac_addr *mac_addr2)
51{
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070052 /*
53 * Intentionally use & rather than &&.
54 * because the operands are binary rather than generic boolean,
55 * the functionality is equivalent.
56 * Using && has the advantage of short-circuited evaluation,
57 * but using & has the advantage of no conditional branching,
58 * which is a more significant benefit.
59 */
Amir Patelcb990262019-05-28 15:12:48 +053060 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
61 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070062}
63
Tallapragada Kalyanc7413082019-03-07 21:22:10 +053064static int dp_peer_ast_table_attach(struct dp_soc *soc)
65{
66 uint32_t max_ast_index;
67
68 max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
69 /* allocate ast_table for ast entry to ast_index map */
70 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
71 "\n<=== cfg max ast idx %d ====>", max_ast_index);
72 soc->ast_table = qdf_mem_malloc(max_ast_index *
73 sizeof(struct dp_ast_entry *));
74 if (!soc->ast_table) {
75 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
76 "%s: ast_table memory allocation failed", __func__);
77 return QDF_STATUS_E_NOMEM;
78 }
79 return 0; /* success */
80}
81
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070082static int dp_peer_find_map_attach(struct dp_soc *soc)
83{
84 uint32_t max_peers, peer_map_size;
85
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +053086 max_peers = soc->max_peers;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070087 /* allocate the peer ID -> peer object map */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +053088 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
89 "\n<=== cfg max peer id %d ====>", max_peers);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070090 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
91 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
92 if (!soc->peer_id_to_obj_map) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +053093 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
94 "%s: peer map memory allocation failed", __func__);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070095 return QDF_STATUS_E_NOMEM;
96 }
97
98 /*
99 * The peer_id_to_obj_map doesn't really need to be initialized,
100 * since elements are only used after they have been individually
101 * initialized.
102 * However, it is convenient for debugging to have all elements
103 * that are not in use set to 0.
104 */
105 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700106 return 0; /* success */
107}
108
Amir Patelcb990262019-05-28 15:12:48 +0530109static int dp_log2_ceil(unsigned int value)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700110{
Amir Patelcb990262019-05-28 15:12:48 +0530111 unsigned int tmp = value;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700112 int log2 = -1;
113
114 while (tmp) {
115 log2++;
116 tmp >>= 1;
117 }
118 if (1 << log2 != value)
119 log2++;
120 return log2;
121}
122
123static int dp_peer_find_add_id_to_obj(
124 struct dp_peer *peer,
125 uint16_t peer_id)
126{
127 int i;
128
129 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
130 if (peer->peer_ids[i] == HTT_INVALID_PEER) {
131 peer->peer_ids[i] = peer_id;
132 return 0; /* success */
133 }
134 }
135 return QDF_STATUS_E_FAILURE; /* failure */
136}
137
138#define DP_PEER_HASH_LOAD_MULT 2
139#define DP_PEER_HASH_LOAD_SHIFT 0
140
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530141#define DP_AST_HASH_LOAD_MULT 2
142#define DP_AST_HASH_LOAD_SHIFT 0
143
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700144static int dp_peer_find_hash_attach(struct dp_soc *soc)
145{
146 int i, hash_elems, log2;
147
148 /* allocate the peer MAC address -> peer object hash table */
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +0530149 hash_elems = soc->max_peers;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700150 hash_elems *= DP_PEER_HASH_LOAD_MULT;
151 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
152 log2 = dp_log2_ceil(hash_elems);
153 hash_elems = 1 << log2;
154
155 soc->peer_hash.mask = hash_elems - 1;
156 soc->peer_hash.idx_bits = log2;
157 /* allocate an array of TAILQ peer object lists */
158 soc->peer_hash.bins = qdf_mem_malloc(
159 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
160 if (!soc->peer_hash.bins)
161 return QDF_STATUS_E_NOMEM;
162
163 for (i = 0; i < hash_elems; i++)
164 TAILQ_INIT(&soc->peer_hash.bins[i]);
165
166 return 0;
167}
168
169static void dp_peer_find_hash_detach(struct dp_soc *soc)
170{
phadimanb1007502019-04-03 15:21:53 +0530171 if (soc->peer_hash.bins) {
172 qdf_mem_free(soc->peer_hash.bins);
173 soc->peer_hash.bins = NULL;
174 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700175}
176
177static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
178 union dp_align_mac_addr *mac_addr)
179{
180 unsigned index;
181
182 index =
183 mac_addr->align2.bytes_ab ^
184 mac_addr->align2.bytes_cd ^
185 mac_addr->align2.bytes_ef;
186 index ^= index >> soc->peer_hash.idx_bits;
187 index &= soc->peer_hash.mask;
188 return index;
189}
190
191
192void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
193{
194 unsigned index;
195
196 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
197 qdf_spin_lock_bh(&soc->peer_ref_mutex);
198 /*
199 * It is important to add the new peer at the tail of the peer list
200 * with the bin index. Together with having the hash_find function
201 * search from head to tail, this ensures that if two entries with
202 * the same MAC address are stored, the one added first will be
203 * found first.
204 */
205 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
206 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
207}
208
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530209#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530210/*
211 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
212 * @soc: SoC handle
213 *
214 * Return: None
215 */
216static int dp_peer_ast_hash_attach(struct dp_soc *soc)
217{
218 int i, hash_elems, log2;
Tallapragada Kalyanc7413082019-03-07 21:22:10 +0530219 unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530220
Tallapragada Kalyanc7413082019-03-07 21:22:10 +0530221 hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530222 DP_AST_HASH_LOAD_SHIFT);
223
224 log2 = dp_log2_ceil(hash_elems);
225 hash_elems = 1 << log2;
226
227 soc->ast_hash.mask = hash_elems - 1;
228 soc->ast_hash.idx_bits = log2;
229
Tallapragada Kalyanc7413082019-03-07 21:22:10 +0530230 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
231 "ast hash_elems: %d, max_ast_idx: %d",
232 hash_elems, max_ast_idx);
233
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530234 /* allocate an array of TAILQ peer object lists */
235 soc->ast_hash.bins = qdf_mem_malloc(
236 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
237 dp_ast_entry)));
238
239 if (!soc->ast_hash.bins)
240 return QDF_STATUS_E_NOMEM;
241
242 for (i = 0; i < hash_elems; i++)
243 TAILQ_INIT(&soc->ast_hash.bins[i]);
244
245 return 0;
246}
247
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530248/*
249 * dp_peer_ast_cleanup() - cleanup the references
250 * @soc: SoC handle
251 * @ast: ast entry
252 *
253 * Return: None
254 */
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530255static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
256 struct dp_ast_entry *ast)
257{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530258 txrx_ast_free_cb cb = ast->callback;
259 void *cookie = ast->cookie;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530260
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530261 /* Call the callbacks to free up the cookie */
262 if (cb) {
263 ast->callback = NULL;
264 ast->cookie = NULL;
265 cb(soc->ctrl_psoc,
Akshay Kosigia870c612019-07-08 23:10:30 +0530266 dp_soc_to_cdp_soc(soc),
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530267 cookie,
268 CDP_TXRX_AST_DELETE_IN_PROGRESS);
269 }
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530270}
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530271
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530272/*
273 * dp_peer_ast_hash_detach() - Free AST Hash table
274 * @soc: SoC handle
275 *
276 * Return: None
277 */
278static void dp_peer_ast_hash_detach(struct dp_soc *soc)
279{
Chaithanya Garrepalli157543d2018-07-09 17:42:59 +0530280 unsigned int index;
281 struct dp_ast_entry *ast, *ast_next;
282
283 if (!soc->ast_hash.mask)
284 return;
285
phadimanb1007502019-04-03 15:21:53 +0530286 if (!soc->ast_hash.bins)
287 return;
288
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530289 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli157543d2018-07-09 17:42:59 +0530290 for (index = 0; index <= soc->ast_hash.mask; index++) {
291 if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
292 TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
293 hash_list_elem, ast_next) {
294 TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
295 hash_list_elem);
296 dp_peer_ast_cleanup(soc, ast);
297 qdf_mem_free(ast);
298 }
299 }
300 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530301 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli157543d2018-07-09 17:42:59 +0530302
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530303 qdf_mem_free(soc->ast_hash.bins);
phadimanb1007502019-04-03 15:21:53 +0530304 soc->ast_hash.bins = NULL;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530305}
306
307/*
308 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
309 * @soc: SoC handle
310 *
311 * Return: AST hash
312 */
313static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
314 union dp_align_mac_addr *mac_addr)
315{
316 uint32_t index;
317
318 index =
319 mac_addr->align2.bytes_ab ^
320 mac_addr->align2.bytes_cd ^
321 mac_addr->align2.bytes_ef;
322 index ^= index >> soc->ast_hash.idx_bits;
323 index &= soc->ast_hash.mask;
324 return index;
325}
326
327/*
328 * dp_peer_ast_hash_add() - Add AST entry into hash table
329 * @soc: SoC handle
330 *
331 * This function adds the AST entry into SoC AST hash table
332 * It assumes caller has taken the ast lock to protect the access to this table
333 *
334 * Return: None
335 */
336static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
337 struct dp_ast_entry *ase)
338{
339 uint32_t index;
340
341 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
342 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
343}
344
345/*
346 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
347 * @soc: SoC handle
348 *
349 * This function removes the AST entry from soc AST hash table
350 * It assumes caller has taken the ast lock to protect the access to this table
351 *
352 * Return: None
353 */
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +0530354void dp_peer_ast_hash_remove(struct dp_soc *soc,
355 struct dp_ast_entry *ase)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530356{
357 unsigned index;
358 struct dp_ast_entry *tmpase;
359 int found = 0;
360
361 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
362 /* Check if tail is not empty before delete*/
363 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
364
365 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
366 if (tmpase == ase) {
367 found = 1;
368 break;
369 }
370 }
371
372 QDF_ASSERT(found);
373 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
374}
375
376/*
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530377 * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
378 * @soc: SoC handle
379 * @peer: peer handle
380 * @ast_mac_addr: mac address
381 *
382 * It assumes caller has taken the ast lock to protect the access to ast list
383 *
384 * Return: AST entry
385 */
386struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
387 struct dp_peer *peer,
388 uint8_t *ast_mac_addr)
389{
390 struct dp_ast_entry *ast_entry = NULL;
391 union dp_align_mac_addr *mac_addr =
392 (union dp_align_mac_addr *)ast_mac_addr;
393
394 TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
395 if (!dp_peer_find_mac_addr_cmp(mac_addr,
396 &ast_entry->mac_addr)) {
397 return ast_entry;
398 }
399 }
400
401 return NULL;
402}
403
404/*
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530405 * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530406 * @soc: SoC handle
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530407 *
408 * It assumes caller has taken the ast lock to protect the access to
409 * AST hash table
410 *
411 * Return: AST entry
412 */
413struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
414 uint8_t *ast_mac_addr,
415 uint8_t pdev_id)
416{
417 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
418 uint32_t index;
419 struct dp_ast_entry *ase;
420
421 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800422 ast_mac_addr, QDF_MAC_ADDR_SIZE);
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530423 mac_addr = &local_mac_addr_aligned;
424
425 index = dp_peer_ast_hash_index(soc, mac_addr);
426 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
427 if ((pdev_id == ase->pdev_id) &&
428 !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
429 return ase;
430 }
431 }
432
433 return NULL;
434}
435
436/*
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530437 * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530438 * @soc: SoC handle
439 *
440 * It assumes caller has taken the ast lock to protect the access to
441 * AST hash table
442 *
443 * Return: AST entry
444 */
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530445struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
446 uint8_t *ast_mac_addr)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530447{
448 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
449 unsigned index;
450 struct dp_ast_entry *ase;
451
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530452 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800453 ast_mac_addr, QDF_MAC_ADDR_SIZE);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530454 mac_addr = &local_mac_addr_aligned;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530455
456 index = dp_peer_ast_hash_index(soc, mac_addr);
457 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
458 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
459 return ase;
460 }
461 }
462
463 return NULL;
464}
465
466/*
467 * dp_peer_map_ast() - Map the ast entry with HW AST Index
468 * @soc: SoC handle
469 * @peer: peer to which ast node belongs
470 * @mac_addr: MAC address of ast node
471 * @hw_peer_id: HW AST Index returned by target in peer map event
472 * @vdev_id: vdev id for VAP to which the peer belongs to
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +0530473 * @ast_hash: ast hash value in HW
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530474 *
475 * Return: None
476 */
477static inline void dp_peer_map_ast(struct dp_soc *soc,
478 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +0530479 uint8_t vdev_id, uint16_t ast_hash)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530480{
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530481 struct dp_ast_entry *ast_entry = NULL;
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530482 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530483
484 if (!peer) {
485 return;
486 }
487
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530488 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Shivani Sonia5707a42020-01-08 16:42:08 +0530489 "%s: peer %pK ID %d vid %d mac %pM",
490 __func__, peer, hw_peer_id, vdev_id, mac_addr);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530491
492 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530493
494 ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
495
496 if (ast_entry) {
497 ast_entry->ast_idx = hw_peer_id;
498 soc->ast_table[hw_peer_id] = ast_entry;
499 ast_entry->is_active = TRUE;
500 peer_type = ast_entry->type;
501 ast_entry->ast_hash_value = ast_hash;
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530502 ast_entry->is_mapped = TRUE;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530503 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530504
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530505 if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530506 if (soc->cdp_soc.ol_ops->peer_map_event) {
507 soc->cdp_soc.ol_ops->peer_map_event(
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +0530508 soc->ctrl_psoc, peer->peer_ids[0],
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530509 hw_peer_id, vdev_id,
Radha krishna Simha Jigurud359eb42018-09-16 13:56:34 +0530510 mac_addr, peer_type, ast_hash);
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530511 }
512 } else {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530513 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
514 "AST entry not found");
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530515 }
516
517 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530518 return;
519}
520
Akshay Kosigieec6db92019-07-02 14:25:54 +0530521void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
Akshay Kosigi4002f762019-07-08 23:04:36 +0530522 struct cdp_soc *dp_soc,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530523 void *cookie,
524 enum cdp_ast_free_status status)
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +0530525{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530526 struct dp_ast_free_cb_params *param =
527 (struct dp_ast_free_cb_params *)cookie;
528 struct dp_soc *soc = (struct dp_soc *)dp_soc;
529 struct dp_peer *peer = NULL;
530
531 if (status != CDP_TXRX_AST_DELETED) {
532 qdf_mem_free(cookie);
533 return;
534 }
535
536 peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
537 0, param->vdev_id);
538 if (peer) {
539 dp_peer_add_ast(soc, peer,
540 &param->mac_addr.raw[0],
541 param->type,
542 param->flags);
543 dp_peer_unref_delete(peer);
544 }
545 qdf_mem_free(cookie);
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +0530546}
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +0530547
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530548/*
549 * dp_peer_add_ast() - Allocate and add AST entry into peer list
550 * @soc: SoC handle
551 * @peer: peer to which ast node belongs
552 * @mac_addr: MAC address of ast node
553 * @is_self: Is this base AST entry with peer mac address
554 *
Jeff Johnsonbd6e61f2018-05-06 17:11:15 -0700555 * This API is used by WDS source port learning function to
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530556 * add a new AST entry into peer AST list
557 *
558 * Return: 0 if new entry is allocated,
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530559 * -1 if entry add failed
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530560 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530561int dp_peer_add_ast(struct dp_soc *soc,
562 struct dp_peer *peer,
563 uint8_t *mac_addr,
564 enum cdp_txrx_ast_entry_type type,
565 uint32_t flags)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530566{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530567 struct dp_ast_entry *ast_entry = NULL;
Prathyusha Guduribd4fd7a2019-10-01 19:29:20 +0530568 struct dp_vdev *vdev = NULL, *tmp_vdev = NULL;
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530569 struct dp_pdev *pdev = NULL;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530570 uint8_t next_node_mac[6];
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530571 int ret = -1;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530572 txrx_ast_free_cb cb = NULL;
573 void *cookie = NULL;
Chaithanya Garrepalli09837d22019-09-09 15:01:10 +0530574 struct dp_peer *tmp_peer = NULL;
Chaithanya Garrepallicf0b4e22019-09-21 23:01:21 +0530575 bool is_peer_found = false;
576
Prathyusha Guduribd4fd7a2019-10-01 19:29:20 +0530577 vdev = peer->vdev;
578 if (!vdev) {
579 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
580 FL("Peers vdev is NULL"));
581 QDF_ASSERT(0);
582 return ret;
583 }
584
585 pdev = vdev->pdev;
586
Chaithanya Garrepallicf0b4e22019-09-21 23:01:21 +0530587 tmp_peer = dp_peer_find_hash_find(soc, mac_addr, 0,
588 DP_VDEV_ALL);
589 if (tmp_peer) {
Prathyusha Guduribd4fd7a2019-10-01 19:29:20 +0530590 tmp_vdev = tmp_peer->vdev;
591 if (!tmp_vdev) {
592 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
593 FL("Peers vdev is NULL"));
594 QDF_ASSERT(0);
595 dp_peer_unref_delete(tmp_peer);
596 return ret;
597 }
598 if (tmp_vdev->pdev->pdev_id == pdev->pdev_id)
599 is_peer_found = true;
600
Chaithanya Garrepallicf0b4e22019-09-21 23:01:21 +0530601 dp_peer_unref_delete(tmp_peer);
Chaithanya Garrepallicf0b4e22019-09-21 23:01:21 +0530602 }
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530603
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +0530604 qdf_spin_lock_bh(&soc->ast_lock);
605 if (peer->delete_in_progress) {
606 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530607 return ret;
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +0530608 }
Ruchi, Agrawal93bcf122018-10-26 13:56:34 +0530609
phadimand2e88e32019-01-23 12:58:43 +0530610 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
phadimane9fb5472018-10-30 16:53:05 +0530611 "%s: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
612 __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
613 peer->mac_addr.raw, peer, mac_addr);
614
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530615
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530616 /* fw supports only 2 times the max_peers ast entries */
617 if (soc->num_ast_entries >=
618 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
619 qdf_spin_unlock_bh(&soc->ast_lock);
620 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
621 FL("Max ast entries reached"));
622 return ret;
623 }
624
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530625 /* If AST entry already exists , just return from here
626 * ast entry with same mac address can exist on different radios
627 * if ast_override support is enabled use search by pdev in this
628 * case
629 */
630 if (soc->ast_override_support) {
631 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
632 pdev->pdev_id);
633 if (ast_entry) {
Tallapragada Kalyan9e4b36f2019-05-02 13:22:34 +0530634 if ((type == CDP_TXRX_AST_TYPE_MEC) &&
635 (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
636 ast_entry->is_active = TRUE;
637
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +0530638 qdf_spin_unlock_bh(&soc->ast_lock);
639 return 0;
640 }
Chaithanya Garrepallicf0b4e22019-09-21 23:01:21 +0530641 if (is_peer_found) {
Rathees kumar Chinannane03a81b2019-10-10 15:00:21 +0530642 /* During WDS to static roaming, peer is added
643 * to the list before static AST entry create.
644 * So, allow AST entry for STATIC type
645 * even if peer is present
646 */
647 if (type != CDP_TXRX_AST_TYPE_STATIC) {
648 qdf_spin_unlock_bh(&soc->ast_lock);
649 return 0;
650 }
Chaithanya Garrepalli09837d22019-09-09 15:01:10 +0530651 }
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530652 } else {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530653 /* For HWMWDS_SEC entries can be added for same mac address
654 * do not check for existing entry
655 */
656 if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
657 goto add_ast_entry;
658
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530659 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +0530660
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530661 if (ast_entry) {
Pamidipati, Vijayb113bbc2019-01-22 22:06:36 +0530662 if ((type == CDP_TXRX_AST_TYPE_MEC) &&
663 (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530664 ast_entry->is_active = TRUE;
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530665
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530666 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
667 !ast_entry->delete_in_progress) {
668 qdf_spin_unlock_bh(&soc->ast_lock);
669 return 0;
670 }
671
672 /* Add for HMWDS entry we cannot be ignored if there
673 * is AST entry with same mac address
674 *
675 * if ast entry exists with the requested mac address
676 * send a delete command and register callback which
677 * can take care of adding HMWDS ast enty on delete
678 * confirmation from target
679 */
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +0530680 if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530681 struct dp_ast_free_cb_params *param = NULL;
682
683 if (ast_entry->type ==
684 CDP_TXRX_AST_TYPE_WDS_HM_SEC)
685 goto add_ast_entry;
686
687 /* save existing callback */
688 if (ast_entry->callback) {
689 cb = ast_entry->callback;
690 cookie = ast_entry->cookie;
691 }
692
693 param = qdf_mem_malloc(sizeof(*param));
694 if (!param) {
695 QDF_TRACE(QDF_MODULE_ID_TXRX,
696 QDF_TRACE_LEVEL_ERROR,
697 "Allocation failed");
698 qdf_spin_unlock_bh(&soc->ast_lock);
699 return ret;
700 }
701
702 qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800703 QDF_MAC_ADDR_SIZE);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530704 qdf_mem_copy(&param->peer_mac_addr.raw[0],
705 &peer->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800706 QDF_MAC_ADDR_SIZE);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530707 param->type = type;
708 param->flags = flags;
709 param->vdev_id = vdev->vdev_id;
710 ast_entry->callback = dp_peer_free_hmwds_cb;
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530711 ast_entry->pdev_id = vdev->pdev->pdev_id;
712 ast_entry->type = type;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530713 ast_entry->cookie = (void *)param;
714 if (!ast_entry->delete_in_progress)
715 dp_peer_del_ast(soc, ast_entry);
716 }
717
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +0530718 /* Modify an already existing AST entry from type
719 * WDS to MEC on promption. This serves as a fix when
720 * backbone of interfaces are interchanged wherein
Nandha Kishore Easwaran8dd440d2018-11-30 15:02:20 +0530721 * wds entr becomes its own MEC. The entry should be
722 * replaced only when the ast_entry peer matches the
723 * peer received in mec event. This additional check
724 * is needed in wds repeater cases where a multicast
725 * packet from station to the root via the repeater
726 * should not remove the wds entry.
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +0530727 */
728 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
Nandha Kishore Easwaran8dd440d2018-11-30 15:02:20 +0530729 (type == CDP_TXRX_AST_TYPE_MEC) &&
730 (ast_entry->peer == peer)) {
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +0530731 ast_entry->is_active = FALSE;
732 dp_peer_del_ast(soc, ast_entry);
733 }
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530734 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530735
736 /* Call the saved callback*/
737 if (cb) {
Akshay Kosigi4002f762019-07-08 23:04:36 +0530738 cb(soc->ctrl_psoc,
Akshay Kosigia870c612019-07-08 23:10:30 +0530739 dp_soc_to_cdp_soc(soc),
Akshay Kosigi4002f762019-07-08 23:04:36 +0530740 cookie,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530741 CDP_TXRX_AST_DELETE_IN_PROGRESS);
742 }
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530743 return 0;
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +0530744 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530745 }
746
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530747add_ast_entry:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530748 ast_entry = (struct dp_ast_entry *)
749 qdf_mem_malloc(sizeof(struct dp_ast_entry));
750
751 if (!ast_entry) {
752 qdf_spin_unlock_bh(&soc->ast_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530753 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
754 FL("fail to allocate ast_entry"));
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530755 QDF_ASSERT(0);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530756 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530757 }
758
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800759 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530760 ast_entry->pdev_id = vdev->pdev->pdev_id;
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530761 ast_entry->is_mapped = false;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530762 ast_entry->delete_in_progress = false;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530763
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530764 switch (type) {
765 case CDP_TXRX_AST_TYPE_STATIC:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530766 peer->self_ast_entry = ast_entry;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530767 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530768 if (peer->vdev->opmode == wlan_op_mode_sta)
769 ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530770 break;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530771 case CDP_TXRX_AST_TYPE_SELF:
772 peer->self_ast_entry = ast_entry;
773 ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
774 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530775 case CDP_TXRX_AST_TYPE_WDS:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530776 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530777 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530778 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530779 case CDP_TXRX_AST_TYPE_WDS_HM:
780 ast_entry->next_hop = 1;
781 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
782 break;
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530783 case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
784 ast_entry->next_hop = 1;
785 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
786 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530787 case CDP_TXRX_AST_TYPE_MEC:
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530788 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530789 ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530790 break;
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530791 case CDP_TXRX_AST_TYPE_DA:
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530792 peer = peer->vdev->vap_bss_peer;
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530793 ast_entry->next_hop = 1;
794 ast_entry->type = CDP_TXRX_AST_TYPE_DA;
795 break;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530796 default:
797 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
798 FL("Incorrect AST entry type"));
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530799 }
800
801 ast_entry->is_active = TRUE;
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530802 DP_STATS_INC(soc, ast.added, 1);
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530803 soc->num_ast_entries++;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530804 dp_peer_ast_hash_add(soc, ast_entry);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530805
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530806 ast_entry->peer = peer;
807
808 if (type == CDP_TXRX_AST_TYPE_MEC)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530809 qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
Ruchi, Agrawald536f882018-03-02 15:51:23 +0530810 else
811 qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530812
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530813 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530814
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530815 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530816 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530817 (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
818 (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530819 if (QDF_STATUS_SUCCESS ==
820 soc->cdp_soc.ol_ops->peer_add_wds_entry(
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +0530821 soc->ctrl_psoc,
822 peer->vdev->vdev_id,
823 peer->mac_addr.raw,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530824 mac_addr,
825 next_node_mac,
Subhranil Choudhury22434e52020-01-13 16:21:34 +0530826 flags,
827 ast_entry->type)) {
Chaithanya Garrepalli58e7c5e2019-04-02 16:55:16 +0530828 qdf_spin_unlock_bh(&soc->ast_lock);
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530829 return 0;
Chaithanya Garrepalli58e7c5e2019-04-02 16:55:16 +0530830 }
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530831 }
832
Chaithanya Garrepalli58e7c5e2019-04-02 16:55:16 +0530833 qdf_spin_unlock_bh(&soc->ast_lock);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530834 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530835}
836
837/*
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +0530838 * dp_peer_free_ast_entry() - Free up the ast entry memory
839 * @soc: SoC handle
840 * @ast_entry: Address search entry
841 *
842 * This API is used to free up the memory associated with
843 * AST entry.
844 *
845 * Return: None
846 */
847void dp_peer_free_ast_entry(struct dp_soc *soc,
848 struct dp_ast_entry *ast_entry)
849{
850 /*
851 * NOTE: Ensure that call to this API is done
852 * after soc->ast_lock is taken
853 */
854 ast_entry->callback = NULL;
855 ast_entry->cookie = NULL;
856
857 DP_STATS_INC(soc, ast.deleted, 1);
858 dp_peer_ast_hash_remove(soc, ast_entry);
859 dp_peer_ast_cleanup(soc, ast_entry);
860 qdf_mem_free(ast_entry);
861 soc->num_ast_entries--;
862}
863
864/*
865 * dp_peer_unlink_ast_entry() - Free up the ast entry memory
866 * @soc: SoC handle
867 * @ast_entry: Address search entry
868 *
869 * This API is used to remove/unlink AST entry from the peer list
870 * and hash list.
871 *
872 * Return: None
873 */
874void dp_peer_unlink_ast_entry(struct dp_soc *soc,
875 struct dp_ast_entry *ast_entry)
876{
877 /*
878 * NOTE: Ensure that call to this API is done
879 * after soc->ast_lock is taken
880 */
881 struct dp_peer *peer = ast_entry->peer;
882
883 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
884
885 if (ast_entry == peer->self_ast_entry)
886 peer->self_ast_entry = NULL;
887
888 /*
889 * release the reference only if it is mapped
890 * to ast_table
891 */
892 if (ast_entry->is_mapped)
893 soc->ast_table[ast_entry->ast_idx] = NULL;
894
895 ast_entry->peer = NULL;
896}
897
898/*
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530899 * dp_peer_del_ast() - Delete and free AST entry
900 * @soc: SoC handle
901 * @ast_entry: AST entry of the node
902 *
903 * This function removes the AST entry from peer and soc tables
904 * It assumes caller has taken the ast lock to protect the access to these
905 * tables
906 *
907 * Return: None
908 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530909void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530910{
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +0530911 struct dp_peer *peer;
912
913 if (!ast_entry)
914 return;
915
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +0530916 if (ast_entry->delete_in_progress)
917 return;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530918
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +0530919 ast_entry->delete_in_progress = true;
920
921 peer = ast_entry->peer;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530922 dp_peer_ast_send_wds_del(soc, ast_entry);
923
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +0530924 /* Remove SELF and STATIC entries in teardown itself */
925 if (!ast_entry->next_hop)
926 dp_peer_unlink_ast_entry(soc, ast_entry);
927
Tallapragada Kalyan9e4b36f2019-05-02 13:22:34 +0530928 if (ast_entry->is_mapped)
929 soc->ast_table[ast_entry->ast_idx] = NULL;
930
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +0530931 /* if peer map v2 is enabled we are not freeing ast entry
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530932 * here and it is supposed to be freed in unmap event (after
933 * we receive delete confirmation from target)
934 *
935 * if peer_id is invalid we did not get the peer map event
936 * for the peer free ast entry from here only in this case
937 */
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530938
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +0530939 /* For HM_SEC and SELF type we do not receive unmap event
940 * free ast_entry from here it self
941 */
942 if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
943 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
944 return;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530945
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +0530946 /* for WDS secondary entry ast_entry->next_hop would be set so
947 * unlinking has to be done explicitly here.
948 * As this entry is not a mapped entry unmap notification from
949 * FW wil not come. Hence unlinkling is done right here.
950 */
951 if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
952 dp_peer_unlink_ast_entry(soc, ast_entry);
Pamidipati, Vijay3eab5b12018-08-23 16:00:44 +0530953
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +0530954 dp_peer_free_ast_entry(soc, ast_entry);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530955}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530956
957/*
958 * dp_peer_update_ast() - Delete and free AST entry
959 * @soc: SoC handle
960 * @peer: peer to which ast node belongs
961 * @ast_entry: AST entry of the node
962 * @flags: wds or hmwds
963 *
964 * This function update the AST entry to the roamed peer and soc tables
965 * It assumes caller has taken the ast lock to protect the access to these
966 * tables
967 *
968 * Return: 0 if ast entry is updated successfully
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530969 * -1 failure
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530970 */
971int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
972 struct dp_ast_entry *ast_entry, uint32_t flags)
973{
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530974 int ret = -1;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530975 struct dp_peer *old_peer;
976
phadimand2e88e32019-01-23 12:58:43 +0530977 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
phadimane9fb5472018-10-30 16:53:05 +0530978 "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
979 __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
980 peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
981 peer->mac_addr.raw);
982
Chaithanya Garrepalli1a39da42019-06-10 12:27:59 +0530983 /* Do not send AST update in below cases
984 * 1) Ast entry delete has already triggered
985 * 2) Peer delete is already triggered
986 * 3) We did not get the HTT map for create event
987 */
988 if (ast_entry->delete_in_progress || peer->delete_in_progress ||
989 !ast_entry->is_mapped)
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530990 return ret;
991
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530992 if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530993 (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
994 (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
995 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530996 return 0;
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530997
syed touqeer pasha8a0928b2019-03-01 18:06:50 +0530998 /*
999 * Avoids flood of WMI update messages sent to FW for same peer.
1000 */
1001 if (qdf_unlikely(ast_entry->peer == peer) &&
1002 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
Chaithanya Garrepalli6bc82632019-09-13 18:31:51 +05301003 (ast_entry->peer->vdev == peer->vdev) &&
syed touqeer pasha8a0928b2019-03-01 18:06:50 +05301004 (ast_entry->is_active))
1005 return 0;
1006
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +05301007 old_peer = ast_entry->peer;
1008 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301009
1010 ast_entry->peer = peer;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +05301011 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1012 ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +05301013 ast_entry->is_active = TRUE;
1014 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
1015
Pamidipati, Vijayd578db12018-04-09 23:03:12 +05301016 ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301017 soc->ctrl_psoc,
1018 peer->vdev->vdev_id,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301019 ast_entry->mac_addr.raw,
1020 peer->mac_addr.raw,
Pamidipati, Vijayd578db12018-04-09 23:03:12 +05301021 flags);
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +05301022
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301023 return ret;
1024}
1025
1026/*
1027 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
1028 * @soc: SoC handle
1029 * @ast_entry: AST entry of the node
1030 *
1031 * This function gets the pdev_id from the ast entry.
1032 *
1033 * Return: (uint8_t) pdev_id
1034 */
1035uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1036 struct dp_ast_entry *ast_entry)
1037{
1038 return ast_entry->pdev_id;
1039}
1040
1041/*
1042 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
1043 * @soc: SoC handle
1044 * @ast_entry: AST entry of the node
1045 *
1046 * This function gets the next hop from the ast entry.
1047 *
1048 * Return: (uint8_t) next_hop
1049 */
1050uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1051 struct dp_ast_entry *ast_entry)
1052{
1053 return ast_entry->next_hop;
1054}
1055
1056/*
1057 * dp_peer_ast_set_type() - set type from the ast entry
1058 * @soc: SoC handle
1059 * @ast_entry: AST entry of the node
1060 *
1061 * This function sets the type in the ast entry.
1062 *
1063 * Return:
1064 */
1065void dp_peer_ast_set_type(struct dp_soc *soc,
1066 struct dp_ast_entry *ast_entry,
1067 enum cdp_txrx_ast_entry_type type)
1068{
1069 ast_entry->type = type;
1070}
1071
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001072#else
1073int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301074 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
1075 uint32_t flags)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001076{
1077 return 1;
1078}
1079
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301080void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001081{
1082}
1083
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301084int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1085 struct dp_ast_entry *ast_entry, uint32_t flags)
1086{
1087 return 1;
1088}
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001089
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +05301090struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1091 uint8_t *ast_mac_addr)
1092{
1093 return NULL;
1094}
1095
1096struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1097 uint8_t *ast_mac_addr,
1098 uint8_t pdev_id)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001099{
1100 return NULL;
1101}
1102
1103static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1104{
1105 return 0;
1106}
1107
1108static inline void dp_peer_map_ast(struct dp_soc *soc,
1109 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301110 uint8_t vdev_id, uint16_t ast_hash)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001111{
1112 return;
1113}
1114
1115static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1116{
1117}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301118
1119void dp_peer_ast_set_type(struct dp_soc *soc,
1120 struct dp_ast_entry *ast_entry,
1121 enum cdp_txrx_ast_entry_type type)
1122{
1123}
1124
1125uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1126 struct dp_ast_entry *ast_entry)
1127{
1128 return 0xff;
1129}
1130
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301131uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1132 struct dp_ast_entry *ast_entry)
1133{
1134 return 0xff;
1135}
Amir Patelcb990262019-05-28 15:12:48 +05301136
1137int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1138 struct dp_ast_entry *ast_entry, uint32_t flags)
1139{
1140 return 1;
1141}
1142
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001143#endif
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301144
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301145void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1146 struct dp_ast_entry *ast_entry)
1147{
1148 struct dp_peer *peer = ast_entry->peer;
1149 struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1150
Chaithanya Garrepalli9ff4c542019-01-07 23:03:09 +05301151 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1152 "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1153 __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1154 peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1155 ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1156
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +05301157 if (ast_entry->next_hop) {
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301158 cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
1159 peer->vdev->vdev_id,
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +05301160 ast_entry->mac_addr.raw,
1161 ast_entry->type);
1162 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301163
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301164}
1165
Tallapragada Kalyan17254ed2019-06-14 18:13:51 +05301166/**
1167 * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
1168 * @soc: soc handle
1169 * @peer: peer handle
1170 * @mac_addr: mac address of the AST entry to searc and delete
1171 *
1172 * find the ast entry from the peer list using the mac address and free
1173 * the entry.
1174 *
1175 * Return: SUCCESS or NOENT
1176 */
1177static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
1178 struct dp_peer *peer,
1179 uint8_t *mac_addr)
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301180{
Tallapragada Kalyan17254ed2019-06-14 18:13:51 +05301181 struct dp_ast_entry *ast_entry;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301182 void *cookie = NULL;
1183 txrx_ast_free_cb cb = NULL;
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301184
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +05301185 /*
1186 * release the reference only if it is mapped
1187 * to ast_table
1188 */
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301189
1190 qdf_spin_lock_bh(&soc->ast_lock);
Tallapragada Kalyan17254ed2019-06-14 18:13:51 +05301191
1192 ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
1193 if (!ast_entry) {
1194 qdf_spin_unlock_bh(&soc->ast_lock);
1195 return QDF_STATUS_E_NOENT;
1196 } else if (ast_entry->is_mapped) {
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +05301197 soc->ast_table[ast_entry->ast_idx] = NULL;
Tallapragada Kalyan17254ed2019-06-14 18:13:51 +05301198 }
Tallapragada Kalyan887fb5d2018-10-24 18:27:58 +05301199
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301200 cb = ast_entry->callback;
1201 cookie = ast_entry->cookie;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301202
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301203
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +05301204 dp_peer_unlink_ast_entry(soc, ast_entry);
1205 dp_peer_free_ast_entry(soc, ast_entry);
1206
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301207 qdf_spin_unlock_bh(&soc->ast_lock);
1208
1209 if (cb) {
1210 cb(soc->ctrl_psoc,
Akshay Kosigia870c612019-07-08 23:10:30 +05301211 dp_soc_to_cdp_soc(soc),
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301212 cookie,
1213 CDP_TXRX_AST_DELETED);
1214 }
Tallapragada Kalyan17254ed2019-06-14 18:13:51 +05301215
1216 return QDF_STATUS_SUCCESS;
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301217}
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301218
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05301219struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001220 uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001221{
1222 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1223 unsigned index;
1224 struct dp_peer *peer;
1225
1226 if (mac_addr_is_aligned) {
1227 mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1228 } else {
1229 qdf_mem_copy(
1230 &local_mac_addr_aligned.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08001231 peer_mac_addr, QDF_MAC_ADDR_SIZE);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001232 mac_addr = &local_mac_addr_aligned;
1233 }
1234 index = dp_peer_find_hash_index(soc, mac_addr);
1235 qdf_spin_lock_bh(&soc->peer_ref_mutex);
1236 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001237 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05301238 ((peer->vdev->vdev_id == vdev_id) ||
1239 (vdev_id == DP_VDEV_ALL))) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001240 /* found it - increment the ref count before releasing
1241 * the lock
1242 */
1243 qdf_atomic_inc(&peer->ref_cnt);
1244 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1245 return peer;
1246 }
1247 }
1248 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1249 return NULL; /* failure */
1250}
1251
1252void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1253{
1254 unsigned index;
1255 struct dp_peer *tmppeer = NULL;
1256 int found = 0;
1257
1258 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1259 /* Check if tail is not empty before delete*/
1260 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1261 /*
1262 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1263 * by the caller.
1264 * The caller needs to hold the lock from the time the peer object's
1265 * reference count is decremented and tested up through the time the
1266 * reference to the peer object is removed from the hash table, by
1267 * this function.
1268 * Holding the lock only while removing the peer object reference
1269 * from the hash table keeps the hash table consistent, but does not
1270 * protect against a new HL tx context starting to use the peer object
1271 * if it looks up the peer object from its MAC address just after the
1272 * peer ref count is decremented to zero, but just before the peer
1273 * object reference is removed from the hash table.
1274 */
1275 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1276 if (tmppeer == peer) {
1277 found = 1;
1278 break;
1279 }
1280 }
1281 QDF_ASSERT(found);
1282 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1283}
1284
1285void dp_peer_find_hash_erase(struct dp_soc *soc)
1286{
1287 int i;
1288
1289 /*
1290 * Not really necessary to take peer_ref_mutex lock - by this point,
1291 * it's known that the soc is no longer in use.
1292 */
1293 for (i = 0; i <= soc->peer_hash.mask; i++) {
1294 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1295 struct dp_peer *peer, *peer_next;
1296
1297 /*
1298 * TAILQ_FOREACH_SAFE must be used here to avoid any
1299 * memory access violation after peer is freed
1300 */
1301 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1302 hash_list_elem, peer_next) {
1303 /*
1304 * Don't remove the peer from the hash table -
1305 * that would modify the list we are currently
1306 * traversing, and it's not necessary anyway.
1307 */
1308 /*
1309 * Artificially adjust the peer's ref count to
1310 * 1, so it will get deleted by
1311 * dp_peer_unref_delete.
1312 */
1313 /* set to zero */
1314 qdf_atomic_init(&peer->ref_cnt);
1315 /* incr to one */
1316 qdf_atomic_inc(&peer->ref_cnt);
1317 dp_peer_unref_delete(peer);
1318 }
1319 }
1320 }
1321}
1322
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301323static void dp_peer_ast_table_detach(struct dp_soc *soc)
1324{
phadimanb1007502019-04-03 15:21:53 +05301325 if (soc->ast_table) {
1326 qdf_mem_free(soc->ast_table);
1327 soc->ast_table = NULL;
1328 }
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301329}
1330
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001331static void dp_peer_find_map_detach(struct dp_soc *soc)
1332{
phadimanb1007502019-04-03 15:21:53 +05301333 if (soc->peer_id_to_obj_map) {
1334 qdf_mem_free(soc->peer_id_to_obj_map);
1335 soc->peer_id_to_obj_map = NULL;
1336 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001337}
1338
1339int dp_peer_find_attach(struct dp_soc *soc)
1340{
1341 if (dp_peer_find_map_attach(soc))
1342 return 1;
1343
1344 if (dp_peer_find_hash_attach(soc)) {
1345 dp_peer_find_map_detach(soc);
1346 return 1;
1347 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301348
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301349 if (dp_peer_ast_table_attach(soc)) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301350 dp_peer_find_hash_detach(soc);
1351 dp_peer_find_map_detach(soc);
1352 return 1;
1353 }
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301354
1355 if (dp_peer_ast_hash_attach(soc)) {
1356 dp_peer_ast_table_detach(soc);
1357 dp_peer_find_hash_detach(soc);
1358 dp_peer_find_map_detach(soc);
1359 return 1;
1360 }
1361
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001362 return 0; /* success */
1363}
1364
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05301365void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001366 union hal_reo_status *reo_status)
1367{
1368 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1369 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1370
Debasis Das7a081362019-08-27 13:40:21 +05301371 if (queue_status->header.status == HAL_REO_CMD_DRAIN)
1372 return;
1373
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001374 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
Venkata Sharath Chandra Manchalac61826c2019-05-14 22:24:25 -07001375 DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1376 queue_status->header.status, rx_tid->tid);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001377 return;
1378 }
1379
Venkata Sharath Chandra Manchalac61826c2019-05-14 22:24:25 -07001380 DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1381 "ssn: %d\n"
1382 "curr_idx : %d\n"
1383 "pn_31_0 : %08x\n"
1384 "pn_63_32 : %08x\n"
1385 "pn_95_64 : %08x\n"
1386 "pn_127_96 : %08x\n"
1387 "last_rx_enq_tstamp : %08x\n"
1388 "last_rx_deq_tstamp : %08x\n"
1389 "rx_bitmap_31_0 : %08x\n"
1390 "rx_bitmap_63_32 : %08x\n"
1391 "rx_bitmap_95_64 : %08x\n"
1392 "rx_bitmap_127_96 : %08x\n"
1393 "rx_bitmap_159_128 : %08x\n"
1394 "rx_bitmap_191_160 : %08x\n"
1395 "rx_bitmap_223_192 : %08x\n"
1396 "rx_bitmap_255_224 : %08x\n",
1397 rx_tid->tid,
1398 queue_status->ssn, queue_status->curr_idx,
1399 queue_status->pn_31_0, queue_status->pn_63_32,
1400 queue_status->pn_95_64, queue_status->pn_127_96,
1401 queue_status->last_rx_enq_tstamp,
1402 queue_status->last_rx_deq_tstamp,
1403 queue_status->rx_bitmap_31_0,
1404 queue_status->rx_bitmap_63_32,
1405 queue_status->rx_bitmap_95_64,
1406 queue_status->rx_bitmap_127_96,
1407 queue_status->rx_bitmap_159_128,
1408 queue_status->rx_bitmap_191_160,
1409 queue_status->rx_bitmap_223_192,
1410 queue_status->rx_bitmap_255_224);
Karunakar Dasineni3da08112017-06-15 14:42:39 -07001411
Venkata Sharath Chandra Manchalac61826c2019-05-14 22:24:25 -07001412 DP_PRINT_STATS(
1413 "curr_mpdu_cnt : %d\n"
1414 "curr_msdu_cnt : %d\n"
1415 "fwd_timeout_cnt : %d\n"
1416 "fwd_bar_cnt : %d\n"
1417 "dup_cnt : %d\n"
1418 "frms_in_order_cnt : %d\n"
1419 "bar_rcvd_cnt : %d\n"
1420 "mpdu_frms_cnt : %d\n"
1421 "msdu_frms_cnt : %d\n"
1422 "total_byte_cnt : %d\n"
1423 "late_recv_mpdu_cnt : %d\n"
1424 "win_jump_2k : %d\n"
1425 "hole_cnt : %d\n",
1426 queue_status->curr_mpdu_cnt,
1427 queue_status->curr_msdu_cnt,
1428 queue_status->fwd_timeout_cnt,
1429 queue_status->fwd_bar_cnt,
1430 queue_status->dup_cnt,
1431 queue_status->frms_in_order_cnt,
1432 queue_status->bar_rcvd_cnt,
1433 queue_status->mpdu_frms_cnt,
1434 queue_status->msdu_frms_cnt,
1435 queue_status->total_cnt,
1436 queue_status->late_recv_mpdu_cnt,
1437 queue_status->win_jump_2k,
1438 queue_status->hole_cnt);
sumedh baikadye3947bd2017-11-29 19:19:25 -08001439
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001440 DP_PRINT_STATS("Addba Req : %d\n"
1441 "Addba Resp : %d\n"
1442 "Addba Resp success : %d\n"
1443 "Addba Resp failed : %d\n"
1444 "Delba Req received : %d\n"
1445 "Delba Tx success : %d\n"
1446 "Delba Tx Fail : %d\n"
1447 "BA window size : %d\n"
1448 "Pn size : %d\n",
1449 rx_tid->num_of_addba_req,
1450 rx_tid->num_of_addba_resp,
1451 rx_tid->num_addba_rsp_success,
1452 rx_tid->num_addba_rsp_failed,
1453 rx_tid->num_of_delba_req,
1454 rx_tid->delba_tx_success_cnt,
1455 rx_tid->delba_tx_fail_cnt,
1456 rx_tid->ba_win_size,
1457 rx_tid->pn_size);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001458}
1459
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301460static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301461 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1462 uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001463{
1464 struct dp_peer *peer;
1465
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05301466 QDF_ASSERT(peer_id <= soc->max_peers);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001467 /* check if there's already a peer object with this MAC address */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001468 peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1469 0 /* is aligned */, vdev_id);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301470 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Shivani Sonia5707a42020-01-08 16:42:08 +05301471 "%s: peer %pK ID %d vid %d mac %pM",
1472 __func__, peer, peer_id, vdev_id, peer_mac_addr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001473
1474 if (peer) {
1475 /* peer's ref count was already incremented by
1476 * peer_find_hash_find
1477 */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301478 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001479 "%s: ref_cnt: %d", __func__,
1480 qdf_atomic_read(&peer->ref_cnt));
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301481 if (!soc->peer_id_to_obj_map[peer_id])
1482 soc->peer_id_to_obj_map[peer_id] = peer;
1483 else {
1484 /* Peer map event came for peer_id which
1485 * is already mapped, this is not expected
1486 */
1487 QDF_ASSERT(0);
1488 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001489
1490 if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1491 /* TBDXXX: assert for now */
1492 QDF_ASSERT(0);
1493 }
1494
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301495 return peer;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301496 }
1497
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301498 return NULL;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301499}
1500
1501/**
1502 * dp_rx_peer_map_handler() - handle peer map event from firmware
1503 * @soc_handle - genereic soc handle
1504 * @peeri_id - peer_id from firmware
1505 * @hw_peer_id - ast index for this peer
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301506 * @vdev_id - vdev ID
1507 * @peer_mac_addr - mac address of the peer
1508 * @ast_hash - ast hash value
1509 * @is_wds - flag to indicate peer map event for WDS ast entry
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301510 *
1511 * associate the peer_id that firmware provided with peer entry
1512 * and update the ast table in the host with the hw_peer_id.
1513 *
1514 * Return: none
1515 */
1516
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001517void
Akshay Kosigi8a753142019-06-27 14:17:08 +05301518dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301519 uint16_t hw_peer_id, uint8_t vdev_id,
1520 uint8_t *peer_mac_addr, uint16_t ast_hash,
1521 uint8_t is_wds)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001522{
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301523 struct dp_peer *peer = NULL;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301524 enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301525
Shivani Sonia5707a42020-01-08 16:42:08 +05301526 dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %pM, vdev_id %d",
1527 soc, peer_id, hw_peer_id,
1528 peer_mac_addr, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001529
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301530 /* Peer map event for WDS ast entry get the peer from
1531 * obj map
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301532 */
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301533 if (is_wds) {
1534 peer = soc->peer_id_to_obj_map[peer_id];
Tallapragada Kalyan17254ed2019-06-14 18:13:51 +05301535 /*
1536 * In certain cases like Auth attack on a repeater
1537 * can result in the number of ast_entries falling
1538 * in the same hash bucket to exceed the max_skid
1539 * length supported by HW in root AP. In these cases
1540 * the FW will return the hw_peer_id (ast_index) as
1541 * 0xffff indicating HW could not add the entry in
1542 * its table. Host has to delete the entry from its
1543 * table in these cases.
1544 */
1545 if (hw_peer_id == HTT_INVALID_PEER) {
1546 DP_STATS_INC(soc, ast.map_err, 1);
1547 if (!dp_peer_ast_free_entry_by_mac(soc,
1548 peer,
1549 peer_mac_addr))
1550 return;
1551
1552 dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1553 peer, peer->peer_ids[0],
1554 peer->mac_addr.raw, peer_mac_addr, vdev_id,
1555 is_wds);
1556
1557 return;
1558 }
1559
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301560 } else {
Tallapragada Kalyan17254ed2019-06-14 18:13:51 +05301561 /*
1562 * It's the responsibility of the CP and FW to ensure
1563 * that peer is created successfully. Ideally DP should
1564 * not hit the below condition for directly assocaited
1565 * peers.
1566 */
1567 if ((hw_peer_id < 0) ||
1568 (hw_peer_id >=
1569 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1570 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1571 "invalid hw_peer_id: %d", hw_peer_id);
1572 qdf_assert_always(0);
1573 }
1574
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301575 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301576 hw_peer_id, vdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301577
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301578 if (peer) {
Chaitanya Kiran Godavarthi70aeda12019-02-01 17:32:48 +05301579 if (wlan_op_mode_sta == peer->vdev->opmode &&
1580 qdf_mem_cmp(peer->mac_addr.raw,
1581 peer->vdev->mac_addr.raw,
1582 QDF_MAC_ADDR_SIZE) != 0) {
1583 dp_info("STA vdev bss_peer!!!!");
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301584 peer->bss_peer = 1;
1585 peer->vdev->vap_bss_peer = peer;
Venkata Sharath Chandra Manchalaa12702b2020-01-17 14:46:19 -08001586 qdf_mem_copy(peer->vdev->vap_bss_peer_mac_addr,
1587 peer->mac_addr.raw,
1588 QDF_MAC_ADDR_SIZE);
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301589 }
1590
Subhranil Choudhury59857162019-09-19 13:33:13 +05301591 if (peer->vdev->opmode == wlan_op_mode_sta) {
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301592 peer->vdev->bss_ast_hash = ast_hash;
Subhranil Choudhury59857162019-09-19 13:33:13 +05301593 peer->vdev->bss_ast_idx = hw_peer_id;
1594 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301595
1596 /* Add ast entry incase self ast entry is
1597 * deleted due to DP CP sync issue
1598 *
1599 * self_ast_entry is modified in peer create
1600 * and peer unmap path which cannot run in
1601 * parllel with peer map, no lock need before
1602 * referring it
1603 */
1604 if (!peer->self_ast_entry) {
Mohit Khanna02553142019-04-11 17:49:27 -07001605 dp_info("Add self ast from map %pM",
1606 peer_mac_addr);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301607 dp_peer_add_ast(soc, peer,
1608 peer_mac_addr,
1609 type, 0);
1610 }
1611
sumedh baikady68450ab2018-03-23 18:36:29 -07001612 }
Anish Nataraj0dae6762018-03-02 22:31:45 +05301613 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301614 dp_peer_map_ast(soc, peer, peer_mac_addr,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301615 hw_peer_id, vdev_id, ast_hash);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001616}
1617
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301618/**
1619 * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1620 * @soc_handle - genereic soc handle
1621 * @peeri_id - peer_id from firmware
1622 * @vdev_id - vdev ID
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301623 * @mac_addr - mac address of the peer or wds entry
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301624 * @is_wds - flag to indicate peer map event for WDS ast entry
1625 *
1626 * Return: none
1627 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001628void
Akshay Kosigi8a753142019-06-27 14:17:08 +05301629dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301630 uint8_t vdev_id, uint8_t *mac_addr,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301631 uint8_t is_wds)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001632{
1633 struct dp_peer *peer;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001634 uint8_t i;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301635
1636 peer = __dp_peer_find_by_id(soc, peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001637
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001638 /*
1639 * Currently peer IDs are assigned for vdevs as well as peers.
1640 * If the peer ID is for a vdev, then the peer pointer stored
1641 * in peer_id_to_obj_map will be NULL.
1642 */
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301643 if (!peer) {
Mohit Khanna02553142019-04-11 17:49:27 -07001644 dp_err("Received unmap event for invalid peer_id %u", peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001645 return;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301646 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001647
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301648 /* If V2 Peer map messages are enabled AST entry has to be freed here
1649 */
Radha Krishna Simha Jiguru64b48482019-12-23 17:09:41 +05301650 if (is_wds) {
Tallapragada Kalyan17254ed2019-06-14 18:13:51 +05301651 if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr))
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301652 return;
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05301653
Mohit Khanna02553142019-04-11 17:49:27 -07001654 dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1655 peer, peer->peer_ids[0],
1656 peer->mac_addr.raw, mac_addr, vdev_id,
1657 is_wds);
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05301658
1659 return;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301660 }
1661
Mohit Khanna02553142019-04-11 17:49:27 -07001662 dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301663 soc, peer_id, peer);
1664
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001665 soc->peer_id_to_obj_map[peer_id] = NULL;
1666 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1667 if (peer->peer_ids[i] == peer_id) {
1668 peer->peer_ids[i] = HTT_INVALID_PEER;
1669 break;
1670 }
1671 }
1672
Mainak Send13ed3e2019-12-24 14:52:01 +05301673 /*
1674 * Reset ast flow mapping table
1675 */
1676 dp_peer_reset_flowq_map(peer);
1677
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301678 if (soc->cdp_soc.ol_ops->peer_unmap_event) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05301679 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
Subhranil Choudhury9bcfecf2019-02-28 13:41:45 +05301680 peer_id, vdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301681 }
1682
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001683 /*
1684 * Remove a reference to the peer.
1685 * If there are no more references, delete the peer object.
1686 */
1687 dp_peer_unref_delete(peer);
1688}
1689
1690void
1691dp_peer_find_detach(struct dp_soc *soc)
1692{
1693 dp_peer_find_map_detach(soc);
1694 dp_peer_find_hash_detach(soc);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301695 dp_peer_ast_hash_detach(soc);
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301696 dp_peer_ast_table_detach(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001697}
1698
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001699static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1700 union hal_reo_status *reo_status)
1701{
1702 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001703
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001704 if ((reo_status->rx_queue_status.header.status !=
1705 HAL_REO_CMD_SUCCESS) &&
1706 (reo_status->rx_queue_status.header.status !=
1707 HAL_REO_CMD_DRAIN)) {
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001708 /* Should not happen normally. Just print error for now */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301709 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1710 "%s: Rx tid HW desc update failed(%d): tid %d",
1711 __func__,
1712 reo_status->rx_queue_status.header.status,
1713 rx_tid->tid);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001714 }
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001715}
1716
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001717/*
Leo Chang5ea93a42016-11-03 12:39:49 -07001718 * dp_find_peer_by_addr - find peer instance by mac address
1719 * @dev: physical device instance
1720 * @peer_mac_addr: peer mac address
Leo Chang5ea93a42016-11-03 12:39:49 -07001721 *
1722 * Return: peer instance pointer
1723 */
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05301724void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr)
Leo Chang5ea93a42016-11-03 12:39:49 -07001725{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001726 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07001727 struct dp_peer *peer;
1728
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05301729 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05301730
Leo Chang5ea93a42016-11-03 12:39:49 -07001731 if (!peer)
1732 return NULL;
1733
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05301734 dp_verbose_debug("peer %pK mac: %pM", peer,
1735 peer->mac_addr.raw);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001736
1737 /* ref_cnt is incremented inside dp_peer_find_hash_find().
1738 * Decrement it here.
1739 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05301740 dp_peer_unref_delete(peer);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001741
Leo Chang5ea93a42016-11-03 12:39:49 -07001742 return peer;
1743}
1744
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001745static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
1746{
1747 struct ol_if_ops *ol_ops = NULL;
1748 bool is_roaming = false;
1749 uint8_t vdev_id = -1;
Vevek Venkatesanaf776982019-09-12 03:43:08 +05301750 struct cdp_soc_t *soc;
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001751
1752 if (!peer) {
1753 dp_info("Peer is NULL. No roaming possible");
1754 return false;
1755 }
Vevek Venkatesanaf776982019-09-12 03:43:08 +05301756
1757 soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001758 ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
1759
1760 if (ol_ops && ol_ops->is_roam_inprogress) {
Vevek Venkatesanaf776982019-09-12 03:43:08 +05301761 dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001762 is_roaming = ol_ops->is_roam_inprogress(vdev_id);
1763 }
1764
1765 dp_info("peer: %pM, vdev_id: %d, is_roaming: %d",
1766 peer->mac_addr.raw, vdev_id, is_roaming);
1767
1768 return is_roaming;
1769}
1770
Mohit Khanna82382b32019-12-09 19:15:27 -08001771QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001772 ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001773{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001774 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1775 struct dp_soc *soc = peer->vdev->pdev->soc;
1776 struct hal_reo_cmd_params params;
1777
1778 qdf_mem_zero(&params, sizeof(params));
1779
1780 params.std.need_status = 1;
1781 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1782 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1783 params.u.upd_queue_params.update_ba_window_size = 1;
1784 params.u.upd_queue_params.ba_window_size = ba_window_size;
1785
1786 if (start_seq < IEEE80211_SEQ_MAX) {
1787 params.u.upd_queue_params.update_ssn = 1;
1788 params.u.upd_queue_params.ssn = start_seq;
sumedh baikadyc0bd0be2019-08-08 17:52:24 -07001789 } else {
1790 dp_set_ssn_valid_flag(&params, 0);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001791 }
Rakesh Pillaiae0f6012020-01-02 11:03:09 +05301792
1793 if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1794 dp_rx_tid_update_cb, rx_tid)) {
1795 dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
1796 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
1797 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001798
1799 rx_tid->ba_win_size = ba_window_size;
Gyanranjan Hazarika7f9c0502018-07-25 23:26:16 -07001800
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001801 if (dp_get_peer_vdev_roaming_in_progress(peer))
1802 return QDF_STATUS_E_PERM;
1803
1804 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
Rakesh Pillai9498cd72019-04-05 18:43:47 +05301805 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301806 soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
Rakesh Pillai9498cd72019-04-05 18:43:47 +05301807 peer->vdev->vdev_id, peer->mac_addr.raw,
1808 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
Sravan Kumar Kairam4f6b8f52019-03-18 14:53:06 +05301809
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001810 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001811}
1812
1813/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001814 * dp_reo_desc_free() - Callback free reo descriptor memory after
1815 * HW cache flush
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001816 *
1817 * @soc: DP SOC handle
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001818 * @cb_ctxt: Callback context
1819 * @reo_status: REO command status
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001820 */
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001821static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1822 union hal_reo_status *reo_status)
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001823{
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001824 struct reo_desc_list_node *freedesc =
1825 (struct reo_desc_list_node *)cb_ctxt;
1826 struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001827
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001828 if ((reo_status->fl_cache_status.header.status !=
1829 HAL_REO_CMD_SUCCESS) &&
1830 (reo_status->fl_cache_status.header.status !=
1831 HAL_REO_CMD_DRAIN)) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301832 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1833 "%s: Rx tid HW desc flush failed(%d): tid %d",
1834 __func__,
1835 reo_status->rx_queue_status.header.status,
1836 freedesc->rx_tid.tid);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001837 }
chenguo8df4d462018-12-19 16:33:14 +08001838 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1839 "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1840 (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001841 qdf_mem_unmap_nbytes_single(soc->osdev,
1842 rx_tid->hw_qdesc_paddr,
1843 QDF_DMA_BIDIRECTIONAL,
1844 rx_tid->hw_qdesc_alloc_size);
1845 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1846 qdf_mem_free(freedesc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001847}
1848
Nandha Kishore Easwaranb7c18842019-12-24 10:36:37 +05301849#if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001850/* Hawkeye emulation requires bus address to be >= 0x50000000 */
1851static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1852{
1853 if (dma_addr < 0x50000000)
1854 return QDF_STATUS_E_FAILURE;
1855 else
1856 return QDF_STATUS_SUCCESS;
1857}
1858#else
1859static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1860{
1861 return QDF_STATUS_SUCCESS;
1862}
1863#endif
1864
1865
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001866/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001867 * dp_rx_tid_setup_wifi3() – Setup receive TID state
1868 * @peer: Datapath peer handle
1869 * @tid: TID
1870 * @ba_window_size: BlockAck window size
1871 * @start_seq: Starting sequence number
1872 *
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001873 * Return: QDF_STATUS code
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001874 */
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001875QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
1876 uint32_t ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001877{
1878 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1879 struct dp_vdev *vdev = peer->vdev;
1880 struct dp_soc *soc = vdev->pdev->soc;
1881 uint32_t hw_qdesc_size;
1882 uint32_t hw_qdesc_align;
1883 int hal_pn_type;
1884 void *hw_qdesc_vaddr;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001885 uint32_t alloc_tries = 0;
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07001886 QDF_STATUS err = QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001887
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05301888 if (peer->delete_in_progress ||
1889 !qdf_atomic_read(&peer->is_default_route_set))
Karunakar Dasineni372647d2018-01-15 22:27:39 -08001890 return QDF_STATUS_E_FAILURE;
1891
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001892 rx_tid->ba_win_size = ba_window_size;
Jeff Johnsona8edf332019-03-18 09:51:52 -07001893 if (rx_tid->hw_qdesc_vaddr_unaligned)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001894 return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1895 start_seq);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001896 rx_tid->delba_tx_status = 0;
1897 rx_tid->ppdu_id_2k = 0;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001898 rx_tid->num_of_addba_req = 0;
1899 rx_tid->num_of_delba_req = 0;
1900 rx_tid->num_of_addba_resp = 0;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001901 rx_tid->num_addba_rsp_failed = 0;
1902 rx_tid->num_addba_rsp_success = 0;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001903 rx_tid->delba_tx_success_cnt = 0;
1904 rx_tid->delba_tx_fail_cnt = 0;
1905 rx_tid->statuscode = 0;
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001906
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001907 /* TODO: Allocating HW queue descriptors based on max BA window size
1908 * for all QOS TIDs so that same descriptor can be used later when
1909 * ADDBA request is recevied. This should be changed to allocate HW
1910 * queue descriptors based on BA window size being negotiated (0 for
1911 * non BA cases), and reallocate when BA window size changes and also
1912 * send WMI message to FW to change the REO queue descriptor in Rx
1913 * peer entry as part of dp_rx_tid_update.
1914 */
1915 if (tid != DP_NON_QOS_TID)
1916 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001917 HAL_RX_MAX_BA_WINDOW, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001918 else
1919 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001920 ba_window_size, tid);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001921
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001922 hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1923 /* To avoid unnecessary extra allocation for alignment, try allocating
1924 * exact size and see if we already have aligned address.
1925 */
1926 rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001927
1928try_desc_alloc:
1929 rx_tid->hw_qdesc_vaddr_unaligned =
1930 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001931
1932 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301933 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1934 "%s: Rx tid HW desc alloc failed: tid %d",
1935 __func__, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001936 return QDF_STATUS_E_NOMEM;
1937 }
1938
1939 if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1940 hw_qdesc_align) {
1941 /* Address allocated above is not alinged. Allocate extra
1942 * memory for alignment
1943 */
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001944 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001945 rx_tid->hw_qdesc_vaddr_unaligned =
Pramod Simha6b23f752017-03-30 11:54:18 -07001946 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1947 hw_qdesc_align - 1);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001948
1949 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301950 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1951 "%s: Rx tid HW desc alloc failed: tid %d",
1952 __func__, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001953 return QDF_STATUS_E_NOMEM;
1954 }
1955
Pramod Simha6b23f752017-03-30 11:54:18 -07001956 hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1957 rx_tid->hw_qdesc_vaddr_unaligned,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001958 hw_qdesc_align);
Pramod Simha6b23f752017-03-30 11:54:18 -07001959
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301960 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1961 "%s: Total Size %d Aligned Addr %pK",
1962 __func__, rx_tid->hw_qdesc_alloc_size,
1963 hw_qdesc_vaddr);
Pramod Simha6b23f752017-03-30 11:54:18 -07001964
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001965 } else {
1966 hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001967 }
1968
1969 /* TODO: Ensure that sec_type is set before ADDBA is received.
1970 * Currently this is set based on htt indication
1971 * HTT_T2H_MSG_TYPE_SEC_IND from target
1972 */
1973 switch (peer->security[dp_sec_ucast].sec_type) {
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301974 case cdp_sec_type_tkip_nomic:
1975 case cdp_sec_type_aes_ccmp:
1976 case cdp_sec_type_aes_ccmp_256:
1977 case cdp_sec_type_aes_gcmp:
1978 case cdp_sec_type_aes_gcmp_256:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001979 hal_pn_type = HAL_PN_WPA;
1980 break;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301981 case cdp_sec_type_wapi:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001982 if (vdev->opmode == wlan_op_mode_ap)
1983 hal_pn_type = HAL_PN_WAPI_EVEN;
1984 else
1985 hal_pn_type = HAL_PN_WAPI_UNEVEN;
1986 break;
1987 default:
1988 hal_pn_type = HAL_PN_NONE;
1989 break;
1990 }
1991
1992 hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1993 hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1994
Pramod Simha6b23f752017-03-30 11:54:18 -07001995 qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001996 QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
Pramod Simha6b23f752017-03-30 11:54:18 -07001997 &(rx_tid->hw_qdesc_paddr));
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001998
Pramod Simha6b23f752017-03-30 11:54:18 -07001999 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08002000 QDF_STATUS_SUCCESS) {
nobeljfdfe7ea2018-06-19 18:08:25 -07002001 if (alloc_tries++ < 10) {
2002 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2003 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08002004 goto try_desc_alloc;
nobeljfdfe7ea2018-06-19 18:08:25 -07002005 } else {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302006 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2007 "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
2008 __func__, tid);
nobeljfdfe7ea2018-06-19 18:08:25 -07002009 err = QDF_STATUS_E_NOMEM;
2010 goto error;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08002011 }
2012 }
2013
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07002014 if (dp_get_peer_vdev_roaming_in_progress(peer)) {
2015 err = QDF_STATUS_E_PERM;
2016 goto error;
2017 }
2018
Leo Chang5ea93a42016-11-03 12:39:49 -07002019 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
nobeljfdfe7ea2018-06-19 18:08:25 -07002020 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05302021 soc->ctrl_psoc,
2022 peer->vdev->pdev->pdev_id,
2023 peer->vdev->vdev_id,
nobeljfdfe7ea2018-06-19 18:08:25 -07002024 peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
2025 1, ba_window_size)) {
2026 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2027 "%s: Failed to send reo queue setup to FW - tid %d\n",
2028 __func__, tid);
2029 err = QDF_STATUS_E_FAILURE;
2030 goto error;
2031 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002032 }
2033 return 0;
nobeljfdfe7ea2018-06-19 18:08:25 -07002034error:
Jeff Johnsona8edf332019-03-18 09:51:52 -07002035 if (rx_tid->hw_qdesc_vaddr_unaligned) {
nobeljfdfe7ea2018-06-19 18:08:25 -07002036 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
2037 QDF_STATUS_SUCCESS)
2038 qdf_mem_unmap_nbytes_single(
2039 soc->osdev,
2040 rx_tid->hw_qdesc_paddr,
2041 QDF_DMA_BIDIRECTIONAL,
2042 rx_tid->hw_qdesc_alloc_size);
2043 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
2044 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2045 }
2046 return err;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002047}
2048
Nisha Menon98c4dd42019-11-01 18:54:19 -07002049#ifdef REO_DESC_DEFER_FREE
2050/*
2051 * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
2052 * desc back to freelist and defer the deletion
2053 *
2054 * @soc: DP SOC handle
2055 * @desc: Base descriptor to be freed
2056 * @reo_status: REO command status
2057 */
2058static void dp_reo_desc_clean_up(struct dp_soc *soc,
2059 struct reo_desc_list_node *desc,
2060 union hal_reo_status *reo_status)
2061{
2062 desc->free_ts = qdf_get_system_timestamp();
2063 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2064 qdf_list_insert_back(&soc->reo_desc_freelist,
2065 (qdf_list_node_t *)desc);
2066}
2067
2068#else
2069/*
2070 * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
2071 * cache fails free the base REO desc anyway
2072 *
2073 * @soc: DP SOC handle
2074 * @desc: Base descriptor to be freed
2075 * @reo_status: REO command status
2076 */
2077static void dp_reo_desc_clean_up(struct dp_soc *soc,
2078 struct reo_desc_list_node *desc,
2079 union hal_reo_status *reo_status)
2080{
2081 if (reo_status) {
2082 qdf_mem_zero(reo_status, sizeof(*reo_status));
2083 reo_status->fl_cache_status.header.status = 0;
2084 dp_reo_desc_free(soc, (void *)desc, reo_status);
2085 }
2086}
2087#endif
2088
2089/*
2090 * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
2091 * cmd and re-insert desc into free list if send fails.
2092 *
2093 * @soc: DP SOC handle
2094 * @desc: desc with resend update cmd flag set
2095 * @rx_tid: Desc RX tid associated with update cmd for resetting
2096 * valid field to 0 in h/w
2097 */
2098static void dp_resend_update_reo_cmd(struct dp_soc *soc,
2099 struct reo_desc_list_node *desc,
2100 struct dp_rx_tid *rx_tid)
2101{
2102 struct hal_reo_cmd_params params;
2103
2104 qdf_mem_zero(&params, sizeof(params));
2105 params.std.need_status = 1;
2106 params.std.addr_lo =
2107 rx_tid->hw_qdesc_paddr & 0xffffffff;
2108 params.std.addr_hi =
2109 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2110 params.u.upd_queue_params.update_vld = 1;
2111 params.u.upd_queue_params.vld = 0;
2112 desc->resend_update_reo_cmd = false;
2113 /*
2114 * If the cmd send fails then set resend_update_reo_cmd flag
2115 * and insert the desc at the end of the free list to retry.
2116 */
2117 if (dp_reo_send_cmd(soc,
2118 CMD_UPDATE_RX_REO_QUEUE,
2119 &params,
2120 dp_rx_tid_delete_cb,
2121 (void *)desc)
2122 != QDF_STATUS_SUCCESS) {
2123 desc->resend_update_reo_cmd = true;
2124 desc->free_ts = qdf_get_system_timestamp();
2125 qdf_list_insert_back(&soc->reo_desc_freelist,
2126 (qdf_list_node_t *)desc);
Rakesh Pillaiae0f6012020-01-02 11:03:09 +05302127 dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
Nisha Menon98c4dd42019-11-01 18:54:19 -07002128 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2129 }
2130}
2131
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002132/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002133 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
2134 * after deleting the entries (ie., setting valid=0)
2135 *
2136 * @soc: DP SOC handle
2137 * @cb_ctxt: Callback context
2138 * @reo_status: REO command status
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002139 */
Nisha Menon98c4dd42019-11-01 18:54:19 -07002140void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
2141 union hal_reo_status *reo_status)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002142{
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002143 struct reo_desc_list_node *freedesc =
2144 (struct reo_desc_list_node *)cb_ctxt;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002145 uint32_t list_size;
2146 struct reo_desc_list_node *desc;
2147 unsigned long curr_ts = qdf_get_system_timestamp();
2148 uint32_t desc_size, tot_desc_size;
2149 struct hal_reo_cmd_params params;
2150
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08002151 if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
2152 qdf_mem_zero(reo_status, sizeof(*reo_status));
2153 reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
2154 dp_reo_desc_free(soc, (void *)freedesc, reo_status);
2155 return;
2156 } else if (reo_status->rx_queue_status.header.status !=
2157 HAL_REO_CMD_SUCCESS) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002158 /* Should not happen normally. Just print error for now */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302159 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2160 "%s: Rx tid HW desc deletion failed(%d): tid %d",
2161 __func__,
2162 reo_status->rx_queue_status.header.status,
2163 freedesc->rx_tid.tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002164 }
2165
Houston Hoffman41b912c2017-08-30 14:27:51 -07002166 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Aditya Sathishded018e2018-07-02 16:25:21 +05302167 "%s: rx_tid: %d status: %d", __func__,
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002168 freedesc->rx_tid.tid,
2169 reo_status->rx_queue_status.header.status);
Krishna Kumaar Natarajan1741dc42017-01-26 19:24:48 -08002170
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002171 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2172 freedesc->free_ts = curr_ts;
2173 qdf_list_insert_back_size(&soc->reo_desc_freelist,
2174 (qdf_list_node_t *)freedesc, &list_size);
2175
2176 while ((qdf_list_peek_front(&soc->reo_desc_freelist,
2177 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
2178 ((list_size >= REO_DESC_FREELIST_SIZE) ||
Nisha Menon98c4dd42019-11-01 18:54:19 -07002179 (curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
2180 (desc->resend_update_reo_cmd && list_size))) {
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002181 struct dp_rx_tid *rx_tid;
2182
2183 qdf_list_remove_front(&soc->reo_desc_freelist,
2184 (qdf_list_node_t **)&desc);
2185 list_size--;
2186 rx_tid = &desc->rx_tid;
2187
Nisha Menon98c4dd42019-11-01 18:54:19 -07002188 /* First process descs with resend_update_reo_cmd set */
2189 if (desc->resend_update_reo_cmd) {
2190 dp_resend_update_reo_cmd(soc, desc, rx_tid);
2191 continue;
2192 }
2193
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002194 /* Flush and invalidate REO descriptor from HW cache: Base and
2195 * extension descriptors should be flushed separately */
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07002196 tot_desc_size = rx_tid->hw_qdesc_alloc_size;
2197 /* Get base descriptor size by passing non-qos TID */
2198 desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
2199 DP_NON_QOS_TID);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002200
2201 /* Flush reo extension descriptors */
2202 while ((tot_desc_size -= desc_size) > 0) {
2203 qdf_mem_zero(&params, sizeof(params));
2204 params.std.addr_lo =
2205 ((uint64_t)(rx_tid->hw_qdesc_paddr) +
2206 tot_desc_size) & 0xffffffff;
2207 params.std.addr_hi =
2208 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2209
2210 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2211 CMD_FLUSH_CACHE,
2212 &params,
2213 NULL,
2214 NULL)) {
Rakesh Pillaiae0f6012020-01-02 11:03:09 +05302215 dp_err_rl("fail to send CMD_CACHE_FLUSH:"
2216 "tid %d desc %pK", rx_tid->tid,
2217 (void *)(rx_tid->hw_qdesc_paddr));
2218 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002219 }
2220 }
2221
2222 /* Flush base descriptor */
2223 qdf_mem_zero(&params, sizeof(params));
2224 params.std.need_status = 1;
2225 params.std.addr_lo =
2226 (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
2227 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2228
2229 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2230 CMD_FLUSH_CACHE,
2231 &params,
2232 dp_reo_desc_free,
2233 (void *)desc)) {
2234 union hal_reo_status reo_status;
2235 /*
2236 * If dp_reo_send_cmd return failure, related TID queue desc
2237 * should be unmapped. Also locally reo_desc, together with
2238 * TID queue desc also need to be freed accordingly.
2239 *
2240 * Here invoke desc_free function directly to do clean up.
Nisha Menon98c4dd42019-11-01 18:54:19 -07002241 *
2242 * In case of MCL path add the desc back to the free
2243 * desc list and defer deletion.
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002244 */
Venkata Sharath Chandra Manchalaea6518b2019-10-25 18:03:25 -07002245 dp_err_log("%s: fail to send REO cmd to flush cache: tid %d",
2246 __func__, rx_tid->tid);
Nisha Menon98c4dd42019-11-01 18:54:19 -07002247 dp_reo_desc_clean_up(soc, desc, &reo_status);
Rakesh Pillaiae0f6012020-01-02 11:03:09 +05302248 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002249 }
2250 }
2251 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002252}
2253
2254/*
2255 * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2256 * @peer: Datapath peer handle
2257 * @tid: TID
2258 *
2259 * Return: 0 on success, error code on failure
2260 */
Jeff Johnson416168b2017-01-06 09:42:43 -08002261static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002262{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002263 struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2264 struct dp_soc *soc = peer->vdev->pdev->soc;
2265 struct hal_reo_cmd_params params;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002266 struct reo_desc_list_node *freedesc =
2267 qdf_mem_malloc(sizeof(*freedesc));
Lin Baifca76402017-12-11 15:03:49 +08002268
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002269 if (!freedesc) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302270 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2271 "%s: malloc failed for freedesc: tid %d",
2272 __func__, tid);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08002273 return -ENOMEM;
2274 }
2275
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002276 freedesc->rx_tid = *rx_tid;
Nisha Menon98c4dd42019-11-01 18:54:19 -07002277 freedesc->resend_update_reo_cmd = false;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002278
2279 qdf_mem_zero(&params, sizeof(params));
2280
Karunakar Dasineni6a526752018-08-02 08:56:19 -07002281 params.std.need_status = 1;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002282 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2283 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2284 params.u.upd_queue_params.update_vld = 1;
2285 params.u.upd_queue_params.vld = 0;
2286
Nisha Menon98c4dd42019-11-01 18:54:19 -07002287 if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2288 dp_rx_tid_delete_cb, (void *)freedesc)
2289 != QDF_STATUS_SUCCESS) {
2290 /* Defer the clean up to the call back context */
2291 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2292 freedesc->free_ts = qdf_get_system_timestamp();
2293 freedesc->resend_update_reo_cmd = true;
2294 qdf_list_insert_front(&soc->reo_desc_freelist,
2295 (qdf_list_node_t *)freedesc);
2296 DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
2297 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2298 dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
2299 }
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002300
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08002301 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2302 rx_tid->hw_qdesc_alloc_size = 0;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002303 rx_tid->hw_qdesc_paddr = 0;
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08002304
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002305 return 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002306}
2307
Pramod Simhab17d0672017-03-06 17:20:13 -08002308#ifdef DP_LFR
2309static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2310{
2311 int tid;
2312
2313 for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2314 dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302315 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2316 "Setting up TID %d for peer %pK peer->local_id %d",
2317 tid, peer, peer->local_id);
Pramod Simhab17d0672017-03-06 17:20:13 -08002318 }
2319}
2320#else
2321static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2322#endif
nobeljdebe2b32019-04-23 11:18:47 -07002323
2324#ifndef WLAN_TX_PKT_CAPTURE_ENH
2325/*
2326 * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
2327 * @peer: Datapath peer
2328 *
2329 */
2330static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
2331{
2332}
2333
2334/*
2335 * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
2336 * @peer: Datapath peer
2337 *
2338 */
2339static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
2340{
2341}
2342
2343/*
2344 * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
2345 * @vdev: Datapath vdev
2346 * @peer: Datapath peer
2347 *
2348 */
2349static inline void
2350dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
2351{
2352}
2353#endif
2354
2355/*
2356 * dp_peer_tx_init() – Initialize receive TID state
2357 * @pdev: Datapath pdev
2358 * @peer: Datapath peer
2359 *
2360 */
2361void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2362{
2363 dp_peer_tid_queue_init(peer);
2364 dp_peer_update_80211_hdr(peer->vdev, peer);
2365}
2366
2367/*
2368 * dp_peer_tx_cleanup() – Deinitialize receive TID state
2369 * @vdev: Datapath vdev
2370 * @peer: Datapath peer
2371 *
2372 */
2373static inline void
2374dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2375{
2376 dp_peer_tid_queue_cleanup(peer);
2377}
2378
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002379/*
2380 * dp_peer_rx_init() – Initialize receive TID state
2381 * @pdev: Datapath pdev
2382 * @peer: Datapath peer
2383 *
2384 */
2385void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2386{
2387 int tid;
2388 struct dp_rx_tid *rx_tid;
2389 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2390 rx_tid = &peer->rx_tid[tid];
2391 rx_tid->array = &rx_tid->base;
2392 rx_tid->base.head = rx_tid->base.tail = NULL;
2393 rx_tid->tid = tid;
2394 rx_tid->defrag_timeout_ms = 0;
2395 rx_tid->ba_win_size = 0;
2396 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2397
2398 rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2399 rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002400 }
2401
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002402 peer->active_ba_session_cnt = 0;
2403 peer->hw_buffer_size = 0;
2404 peer->kill_256_sessions = 0;
2405
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002406 /* Setup default (non-qos) rx tid queue */
2407 dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002408
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002409 /* Setup rx tid queue for TID 0.
2410 * Other queues will be setup on receiving first packet, which will cause
2411 * NULL REO queue error
2412 */
2413 dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2414
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002415 /*
Pramod Simhab17d0672017-03-06 17:20:13 -08002416 * Setup the rest of TID's to handle LFR
2417 */
2418 dp_peer_setup_remaining_tids(peer);
2419
2420 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002421 * Set security defaults: no PN check, no security. The target may
2422 * send a HTT SEC_IND message to overwrite these defaults.
2423 */
2424 peer->security[dp_sec_ucast].sec_type =
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302425 peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002426}
2427
2428/*
2429 * dp_peer_rx_cleanup() – Cleanup receive TID state
2430 * @vdev: Datapath vdev
2431 * @peer: Datapath peer
Sravan Kumar Kairam1e8591a2019-08-07 20:06:52 +05302432 * @reuse: Peer reference reuse
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002433 *
2434 */
Sravan Kumar Kairam1e8591a2019-08-07 20:06:52 +05302435void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002436{
2437 int tid;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002438 uint32_t tid_delete_mask = 0;
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07002439
Saket Jhace8c3102019-10-10 19:48:09 -07002440 dp_info("Remove tids for peer: %pK", peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002441 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002442 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2443
2444 qdf_spin_lock_bh(&rx_tid->tid_lock);
Saket Jhace8c3102019-10-10 19:48:09 -07002445 if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
Lin Baif1c577e2018-05-22 20:45:42 +08002446 /* Cleanup defrag related resource */
2447 dp_rx_defrag_waitlist_remove(peer, tid);
2448 dp_rx_reorder_flush_frag(peer, tid);
Karunakar Dasinenif8ec0cb2019-01-29 13:07:05 -08002449 }
2450
2451 if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2452 dp_rx_tid_delete_wifi3(peer, tid);
Lin Baif1c577e2018-05-22 20:45:42 +08002453
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002454 tid_delete_mask |= (1 << tid);
2455 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002456 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002457 }
2458#ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2459 if (soc->ol_ops->peer_rx_reorder_queue_remove) {
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05302460 soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
2461 peer->vdev->pdev->pdev_id,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002462 peer->vdev->vdev_id, peer->mac_addr.raw,
2463 tid_delete_mask);
2464 }
2465#endif
Sravan Kumar Kairam1e8591a2019-08-07 20:06:52 +05302466 if (!reuse)
2467 for (tid = 0; tid < DP_MAX_TIDS; tid++)
2468 qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002469}
2470
nobelj7b0e2732019-05-31 00:19:07 -07002471#ifdef FEATURE_PERPKT_INFO
2472/*
2473 * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2474 * @peer: Datapath peer
2475 *
2476 * return: void
2477 */
2478void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2479{
2480 qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
2481 sizeof(struct cdp_delayed_tx_completion_ppdu_user));
2482 peer->last_delayed_ba = false;
2483 peer->last_delayed_ba_ppduid = 0;
2484}
2485#else
2486/*
2487 * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
2488 * @peer: Datapath peer
2489 *
2490 * return: void
2491 */
2492void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
2493{
2494}
2495#endif
2496
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002497/*
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002498 * dp_peer_cleanup() – Cleanup peer information
2499 * @vdev: Datapath vdev
2500 * @peer: Datapath peer
Sravan Kumar Kairam1e8591a2019-08-07 20:06:52 +05302501 * @reuse: Peer reference reuse
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002502 *
2503 */
Sravan Kumar Kairam1e8591a2019-08-07 20:06:52 +05302504void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002505{
nobeljdebe2b32019-04-23 11:18:47 -07002506 dp_peer_tx_cleanup(vdev, peer);
2507
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002508 /* cleanup the Rx reorder queues for this peer */
Sravan Kumar Kairam1e8591a2019-08-07 20:06:52 +05302509 dp_peer_rx_cleanup(vdev, peer, reuse);
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002510}
2511
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002512/* dp_teardown_256_ba_session() - Teardown sessions using 256
2513 * window size when a request with
2514 * 64 window size is received.
2515 * This is done as a WAR since HW can
2516 * have only one setting per peer (64 or 256).
sumedh baikady61cbe852018-10-09 11:04:34 -07002517 * For HKv2, we use per tid buffersize setting
2518 * for 0 to per_tid_basize_max_tid. For tid
2519 * more than per_tid_basize_max_tid we use HKv1
2520 * method.
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002521 * @peer: Datapath peer
2522 *
2523 * Return: void
2524 */
2525static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2526{
2527 uint8_t delba_rcode = 0;
2528 int tid;
2529 struct dp_rx_tid *rx_tid = NULL;
2530
sumedh baikady61cbe852018-10-09 11:04:34 -07002531 tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2532 for (; tid < DP_MAX_TIDS; tid++) {
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002533 rx_tid = &peer->rx_tid[tid];
2534 qdf_spin_lock_bh(&rx_tid->tid_lock);
2535
2536 if (rx_tid->ba_win_size <= 64) {
2537 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2538 continue;
2539 } else {
2540 if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2541 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2542 /* send delba */
2543 if (!rx_tid->delba_tx_status) {
2544 rx_tid->delba_tx_retry++;
2545 rx_tid->delba_tx_status = 1;
2546 rx_tid->delba_rcode =
2547 IEEE80211_REASON_QOS_SETUP_REQUIRED;
2548 delba_rcode = rx_tid->delba_rcode;
2549
2550 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Venkata Sharath Chandra Manchalaa6c04702019-06-20 15:27:58 -07002551 if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2552 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
Pavankumar Nandeshwar715fdc32019-10-03 20:51:01 +05302553 peer->vdev->pdev->soc->ctrl_psoc,
2554 peer->vdev->vdev_id,
2555 peer->mac_addr.raw,
2556 tid, delba_rcode);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002557 } else {
2558 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2559 }
2560 } else {
2561 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2562 }
2563 }
2564 }
2565}
2566
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002567/*
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002568* dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002569*
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302570* @soc: Datapath soc handle
2571* @peer_mac: Datapath peer mac address
2572* @vdev_id: id of atapath vdev
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002573* @tid: TID number
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002574* @status: tx completion status
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002575* Return: 0 on success, error code on failure
2576*/
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302577int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
2578 uint8_t *peer_mac,
2579 uint16_t vdev_id,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002580 uint8_t tid, int status)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002581{
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302582 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2583 peer_mac, 0, vdev_id);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002584 struct dp_rx_tid *rx_tid = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002585
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002586 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302587 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002588 "%s: Peer is NULL!\n", __func__);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302589 goto fail;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002590 }
2591 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002592 qdf_spin_lock_bh(&rx_tid->tid_lock);
2593 if (status) {
2594 rx_tid->num_addba_rsp_failed++;
Tiger Yu1e974a92019-12-09 10:38:08 +08002595 dp_rx_tid_update_wifi3(peer, tid, 1,
2596 IEEE80211_SEQ_MAX);
2597 rx_tid->ba_status = DP_RX_BA_INACTIVE;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002598 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Varun Reddy Yeturu8119e122019-07-27 14:14:02 -07002599 dp_err("RxTid- %d addba rsp tx completion failed", tid);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302600
2601 goto success;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002602 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002603
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002604 rx_tid->num_addba_rsp_success++;
2605 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2606 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302607 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002608 "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2609 __func__, tid);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302610 goto fail;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002611 }
2612
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05302613 if (!qdf_atomic_read(&peer->is_default_route_set)) {
2614 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302615 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05302616 "%s: default route is not set for peer: %pM",
2617 __func__, peer->mac_addr.raw);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302618 goto fail;
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05302619 }
2620
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002621 /* First Session */
2622 if (peer->active_ba_session_cnt == 0) {
2623 if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2624 peer->hw_buffer_size = 256;
2625 else
2626 peer->hw_buffer_size = 64;
2627 }
2628
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002629 rx_tid->ba_status = DP_RX_BA_ACTIVE;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002630
2631 peer->active_ba_session_cnt++;
2632
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002633 qdf_spin_unlock_bh(&rx_tid->tid_lock);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002634
2635 /* Kill any session having 256 buffer size
2636 * when 64 buffer size request is received.
2637 * Also, latch on to 64 as new buffer size.
2638 */
2639 if (peer->kill_256_sessions) {
2640 dp_teardown_256_ba_sessions(peer);
2641 peer->kill_256_sessions = 0;
2642 }
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302643
2644success:
2645 dp_peer_unref_delete(peer);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002646 return QDF_STATUS_SUCCESS;
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302647
2648fail:
2649 if (peer)
2650 dp_peer_unref_delete(peer);
2651
2652 return QDF_STATUS_E_FAILURE;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002653}
2654
2655/*
2656* dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2657*
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302658* @soc: Datapath soc handle
2659* @peer_mac: Datapath peer mac address
2660* @vdev_id: id of atapath vdev
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002661* @tid: TID number
2662* @dialogtoken: output dialogtoken
2663* @statuscode: output dialogtoken
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002664* @buffersize: Output BA window size
2665* @batimeout: Output BA timeout
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002666*/
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302667QDF_STATUS
2668dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2669 uint16_t vdev_id, uint8_t tid,
2670 uint8_t *dialogtoken, uint16_t *statuscode,
2671 uint16_t *buffersize, uint16_t *batimeout)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002672{
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002673 struct dp_rx_tid *rx_tid = NULL;
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302674 QDF_STATUS status = QDF_STATUS_SUCCESS;
2675 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2676 peer_mac, 0, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002677
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002678 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302679 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002680 "%s: Peer is NULL!\n", __func__);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302681 status = QDF_STATUS_E_FAILURE;
2682 goto fail;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002683 }
2684 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002685 qdf_spin_lock_bh(&rx_tid->tid_lock);
sumedh baikadye3947bd2017-11-29 19:19:25 -08002686 rx_tid->num_of_addba_resp++;
Jeff Johnson97a1cc52018-05-06 15:28:56 -07002687 /* setup ADDBA response parameters */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002688 *dialogtoken = rx_tid->dialogtoken;
2689 *statuscode = rx_tid->statuscode;
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002690 *buffersize = rx_tid->ba_win_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002691 *batimeout = 0;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002692 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302693
2694fail:
2695 if (peer)
2696 dp_peer_unref_delete(peer);
2697
2698 return status;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002699}
2700
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002701/* dp_check_ba_buffersize() - Check buffer size in request
2702 * and latch onto this size based on
2703 * size used in first active session.
2704 * @peer: Datapath peer
2705 * @tid: Tid
2706 * @buffersize: Block ack window size
2707 *
2708 * Return: void
2709 */
2710static void dp_check_ba_buffersize(struct dp_peer *peer,
2711 uint16_t tid,
2712 uint16_t buffersize)
2713{
2714 struct dp_rx_tid *rx_tid = NULL;
2715
2716 rx_tid = &peer->rx_tid[tid];
sumedh baikady61cbe852018-10-09 11:04:34 -07002717 if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2718 tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002719 rx_tid->ba_win_size = buffersize;
sumedh baikady61cbe852018-10-09 11:04:34 -07002720 return;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002721 } else {
sumedh baikady61cbe852018-10-09 11:04:34 -07002722 if (peer->active_ba_session_cnt == 0) {
2723 rx_tid->ba_win_size = buffersize;
2724 } else {
2725 if (peer->hw_buffer_size == 64) {
2726 if (buffersize <= 64)
2727 rx_tid->ba_win_size = buffersize;
2728 else
2729 rx_tid->ba_win_size = peer->hw_buffer_size;
2730 } else if (peer->hw_buffer_size == 256) {
2731 if (buffersize > 64) {
2732 rx_tid->ba_win_size = buffersize;
2733 } else {
2734 rx_tid->ba_win_size = buffersize;
2735 peer->hw_buffer_size = 64;
2736 peer->kill_256_sessions = 1;
2737 }
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002738 }
2739 }
2740 }
2741}
2742
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002743/*
2744 * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2745 *
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302746 * @soc: Datapath soc handle
2747 * @peer_mac: Datapath peer mac address
2748 * @vdev_id: id of atapath vdev
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002749 * @dialogtoken: dialogtoken from ADDBA frame
2750 * @tid: TID number
2751 * @batimeout: BA timeout
2752 * @buffersize: BA window size
2753 * @startseqnum: Start seq. number received in BA sequence control
2754 *
2755 * Return: 0 on success, error code on failure
2756 */
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302757int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
2758 uint8_t *peer_mac,
2759 uint16_t vdev_id,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002760 uint8_t dialogtoken,
2761 uint16_t tid, uint16_t batimeout,
2762 uint16_t buffersize,
2763 uint16_t startseqnum)
2764{
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302765 QDF_STATUS status = QDF_STATUS_SUCCESS;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002766 struct dp_rx_tid *rx_tid = NULL;
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302767 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2768 peer_mac, 0, vdev_id);
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002769
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002770 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302771 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002772 "%s: Peer is NULL!\n", __func__);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302773 status = QDF_STATUS_E_FAILURE;
2774 goto fail;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002775 }
2776 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002777 qdf_spin_lock_bh(&rx_tid->tid_lock);
2778 rx_tid->num_of_addba_req++;
2779 if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
sumedh baikady6d66c7e2019-02-28 15:20:18 -08002780 rx_tid->hw_qdesc_vaddr_unaligned)) {
sumedh baikadyc7738482019-04-02 18:14:46 -07002781 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002782 rx_tid->ba_status = DP_RX_BA_INACTIVE;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002783 peer->active_ba_session_cnt--;
sumedh baikadyc7738482019-04-02 18:14:46 -07002784 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302785 "%s: Rx Tid- %d hw qdesc is already setup",
2786 __func__, tid);
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002787 }
2788
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002789 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2790 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302791 status = QDF_STATUS_E_FAILURE;
2792 goto fail;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002793 }
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002794 dp_check_ba_buffersize(peer, tid, buffersize);
2795
sumedh baikady61cbe852018-10-09 11:04:34 -07002796 if (dp_rx_tid_setup_wifi3(peer, tid,
2797 rx_tid->ba_win_size, startseqnum)) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002798 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2799 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302800 status = QDF_STATUS_E_FAILURE;
2801 goto fail;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002802 }
2803 rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2804
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002805 rx_tid->dialogtoken = dialogtoken;
2806 rx_tid->startseqnum = startseqnum;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002807
2808 if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2809 rx_tid->statuscode = rx_tid->userstatuscode;
2810 else
2811 rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2812
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002813 qdf_spin_unlock_bh(&rx_tid->tid_lock);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002814
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302815fail:
2816 if (peer)
2817 dp_peer_unref_delete(peer);
2818
2819 return status;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002820}
2821
2822/*
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002823* dp_set_addba_response() – Set a user defined ADDBA response status code
2824*
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302825* @soc: Datapath soc handle
2826* @peer_mac: Datapath peer mac address
2827* @vdev_id: id of atapath vdev
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002828* @tid: TID number
2829* @statuscode: response status code to be set
2830*/
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302831QDF_STATUS
2832dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2833 uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002834{
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302835 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2836 peer_mac, 0, vdev_id);
2837 struct dp_rx_tid *rx_tid;
2838 QDF_STATUS status = QDF_STATUS_SUCCESS;
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002839
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302840 if (!peer || peer->delete_in_progress) {
2841 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2842 "%s: Peer is NULL!\n", __func__);
2843 status = QDF_STATUS_E_FAILURE;
2844 goto fail;
2845 }
2846
2847 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002848 qdf_spin_lock_bh(&rx_tid->tid_lock);
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002849 rx_tid->userstatuscode = statuscode;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002850 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302851fail:
2852 if (peer)
2853 dp_peer_unref_delete(peer);
2854
2855 return status;
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002856}
2857
2858/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002859* dp_rx_delba_process_wifi3() – Process DELBA from peer
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302860* @soc: Datapath soc handle
2861* @peer_mac: Datapath peer mac address
2862* @vdev_id: id of atapath vdev
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002863* @tid: TID number
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002864* @reasoncode: Reason code received in DELBA frame
2865*
2866* Return: 0 on success, error code on failure
2867*/
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302868int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2869 uint16_t vdev_id, int tid, uint16_t reasoncode)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002870{
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302871 QDF_STATUS status = QDF_STATUS_SUCCESS;
2872 struct dp_rx_tid *rx_tid;
2873 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2874 peer_mac, 0, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002875
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302876 if (!peer || peer->delete_in_progress) {
2877 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2878 "%s: Peer is NULL!\n", __func__);
2879 status = QDF_STATUS_E_FAILURE;
2880 goto fail;
2881 }
2882 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002883 qdf_spin_lock_bh(&rx_tid->tid_lock);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002884 if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2885 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002886 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302887 status = QDF_STATUS_E_FAILURE;
2888 goto fail;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002889 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002890 /* TODO: See if we can delete the existing REO queue descriptor and
2891 * replace with a new one without queue extenstion descript to save
2892 * memory
2893 */
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002894 rx_tid->delba_rcode = reasoncode;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002895 rx_tid->num_of_delba_req++;
sumedh baikadyc7738482019-04-02 18:14:46 -07002896 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002897
2898 rx_tid->ba_status = DP_RX_BA_INACTIVE;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002899 peer->active_ba_session_cnt--;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002900 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302901fail:
2902 if (peer)
2903 dp_peer_unref_delete(peer);
2904
2905 return status;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002906}
2907
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002908/*
2909 * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2910 *
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302911 * @soc: Datapath soc handle
2912 * @peer_mac: Datapath peer mac address
2913 * @vdev_id: id of atapath vdev
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002914 * @tid: TID number
2915 * @status: tx completion status
2916 * Return: 0 on success, error code on failure
2917 */
2918
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302919int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
2920 uint16_t vdev_id,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002921 uint8_t tid, int status)
2922{
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302923 QDF_STATUS ret = QDF_STATUS_SUCCESS;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002924 struct dp_rx_tid *rx_tid = NULL;
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302925 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
2926 peer_mac, 0, vdev_id);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002927
2928 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302929 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002930 "%s: Peer is NULL!", __func__);
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302931 ret = QDF_STATUS_E_FAILURE;
2932 goto end;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002933 }
2934 rx_tid = &peer->rx_tid[tid];
2935 qdf_spin_lock_bh(&rx_tid->tid_lock);
2936 if (status) {
2937 rx_tid->delba_tx_fail_cnt++;
2938 if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2939 rx_tid->delba_tx_retry = 0;
2940 rx_tid->delba_tx_status = 0;
2941 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2942 } else {
2943 rx_tid->delba_tx_retry++;
2944 rx_tid->delba_tx_status = 1;
2945 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Venkata Sharath Chandra Manchalaa6c04702019-06-20 15:27:58 -07002946 if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
2947 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
Pavankumar Nandeshwar715fdc32019-10-03 20:51:01 +05302948 peer->vdev->pdev->soc->ctrl_psoc,
2949 peer->vdev->vdev_id,
2950 peer->mac_addr.raw, tid,
Venkata Sharath Chandra Manchalaa6c04702019-06-20 15:27:58 -07002951 rx_tid->delba_rcode);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002952 }
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302953 goto end;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002954 } else {
2955 rx_tid->delba_tx_success_cnt++;
2956 rx_tid->delba_tx_retry = 0;
2957 rx_tid->delba_tx_status = 0;
2958 }
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002959 if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
sumedh baikadyc7738482019-04-02 18:14:46 -07002960 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002961 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2962 peer->active_ba_session_cnt--;
2963 }
2964 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
sumedh baikadyc7738482019-04-02 18:14:46 -07002965 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002966 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2967 }
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002968 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2969
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302970end:
2971 if (peer)
2972 dp_peer_unref_delete(peer);
2973
2974 return ret;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002975}
2976
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302977/**
2978 * dp_set_pn_check_wifi3() - enable PN check in REO for security
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302979 * @soc: Datapath soc handle
2980 * @peer_mac: Datapath peer mac address
2981 * @vdev_id: id of atapath vdev
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302982 * @vdev: Datapath vdev
2983 * @pdev - data path device instance
2984 * @sec_type - security type
2985 * @rx_pn - Receive pn starting number
2986 *
2987 */
2988
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302989QDF_STATUS
2990dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
2991 uint8_t *peer_mac, enum cdp_sec_type sec_type,
2992 uint32_t *rx_pn)
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302993{
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302994 struct dp_pdev *pdev;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302995 int i;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002996 uint8_t pn_size;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302997 struct hal_reo_cmd_params params;
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05302998 QDF_STATUS status = QDF_STATUS_SUCCESS;
2999 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3000 peer_mac, 0, vdev_id);
3001 struct dp_vdev *vdev =
3002 dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
3003 vdev_id);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303004
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05303005 if (!vdev || !peer || peer->delete_in_progress) {
3006 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3007 "%s: Peer is NULL!\n", __func__);
3008 status = QDF_STATUS_E_FAILURE;
3009 goto fail;
3010 }
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303011
3012 pdev = vdev->pdev;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303013 qdf_mem_zero(&params, sizeof(params));
3014
3015 params.std.need_status = 1;
3016 params.u.upd_queue_params.update_pn_valid = 1;
3017 params.u.upd_queue_params.update_pn_size = 1;
3018 params.u.upd_queue_params.update_pn = 1;
3019 params.u.upd_queue_params.update_pn_check_needed = 1;
Gurumoorthi Gnanasambandhand733cd72018-06-12 17:05:52 +05303020 params.u.upd_queue_params.update_svld = 1;
3021 params.u.upd_queue_params.svld = 0;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303022
3023 peer->security[dp_sec_ucast].sec_type = sec_type;
3024
3025 switch (sec_type) {
3026 case cdp_sec_type_tkip_nomic:
3027 case cdp_sec_type_aes_ccmp:
3028 case cdp_sec_type_aes_ccmp_256:
3029 case cdp_sec_type_aes_gcmp:
3030 case cdp_sec_type_aes_gcmp_256:
3031 params.u.upd_queue_params.pn_check_needed = 1;
3032 params.u.upd_queue_params.pn_size = 48;
sumedh baikadye3947bd2017-11-29 19:19:25 -08003033 pn_size = 48;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303034 break;
3035 case cdp_sec_type_wapi:
3036 params.u.upd_queue_params.pn_check_needed = 1;
3037 params.u.upd_queue_params.pn_size = 128;
sumedh baikadye3947bd2017-11-29 19:19:25 -08003038 pn_size = 128;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303039 if (vdev->opmode == wlan_op_mode_ap) {
3040 params.u.upd_queue_params.pn_even = 1;
3041 params.u.upd_queue_params.update_pn_even = 1;
3042 } else {
3043 params.u.upd_queue_params.pn_uneven = 1;
3044 params.u.upd_queue_params.update_pn_uneven = 1;
3045 }
3046 break;
3047 default:
3048 params.u.upd_queue_params.pn_check_needed = 0;
sumedh baikadye3947bd2017-11-29 19:19:25 -08003049 pn_size = 0;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303050 break;
3051 }
3052
3053
3054 for (i = 0; i < DP_MAX_TIDS; i++) {
3055 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08003056 qdf_spin_lock_bh(&rx_tid->tid_lock);
Jeff Johnsona8edf332019-03-18 09:51:52 -07003057 if (rx_tid->hw_qdesc_vaddr_unaligned) {
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303058 params.std.addr_lo =
3059 rx_tid->hw_qdesc_paddr & 0xffffffff;
3060 params.std.addr_hi =
3061 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
3062
Krunal Sonid3eb8bc2018-11-12 19:06:15 -08003063 if (pn_size) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303064 QDF_TRACE(QDF_MODULE_ID_DP,
Krunal Sonid3eb8bc2018-11-12 19:06:15 -08003065 QDF_TRACE_LEVEL_INFO_HIGH,
3066 "%s PN set for TID:%d pn:%x:%x:%x:%x",
3067 __func__, i, rx_pn[3], rx_pn[2],
3068 rx_pn[1], rx_pn[0]);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303069 params.u.upd_queue_params.update_pn_valid = 1;
3070 params.u.upd_queue_params.pn_31_0 = rx_pn[0];
3071 params.u.upd_queue_params.pn_63_32 = rx_pn[1];
3072 params.u.upd_queue_params.pn_95_64 = rx_pn[2];
3073 params.u.upd_queue_params.pn_127_96 = rx_pn[3];
3074 }
sumedh baikadye3947bd2017-11-29 19:19:25 -08003075 rx_tid->pn_size = pn_size;
Rakesh Pillaiae0f6012020-01-02 11:03:09 +05303076 if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
3077 CMD_UPDATE_RX_REO_QUEUE,
3078 &params, dp_rx_tid_update_cb,
3079 rx_tid)) {
3080 dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
3081 "tid %d desc %pK", rx_tid->tid,
3082 (void *)(rx_tid->hw_qdesc_paddr));
3083 DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
3084 rx.err.reo_cmd_send_fail, 1);
3085 }
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303086 } else {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303087 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3088 "PN Check not setup for TID :%d ", i);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303089 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08003090 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303091 }
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05303092fail:
3093 if (peer)
3094 dp_peer_unref_delete(peer);
3095
3096 return status;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05303097}
3098
3099
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003100void
Akshay Kosigi8a753142019-06-27 14:17:08 +05303101dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3102 enum cdp_sec_type sec_type, int is_unicast,
3103 u_int32_t *michael_key,
3104 u_int32_t *rx_pn)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003105{
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003106 struct dp_peer *peer;
3107 int sec_index;
3108
3109 peer = dp_peer_find_by_id(soc, peer_id);
3110 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303111 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3112 "Couldn't find peer from ID %d - skipping security inits",
3113 peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003114 return;
3115 }
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303116 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Shivani Sonia5707a42020-01-08 16:42:08 +05303117 "sec spec for peer %pK %pM: %s key of type %d",
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303118 peer,
Shivani Sonia5707a42020-01-08 16:42:08 +05303119 peer->mac_addr.raw,
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303120 is_unicast ? "ucast" : "mcast",
3121 sec_type);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003122 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3123 peer->security[sec_index].sec_type = sec_type;
Leo Chang5ea93a42016-11-03 12:39:49 -07003124#ifdef notyet /* TODO: See if this is required for defrag support */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003125 /* michael key only valid for TKIP, but for simplicity,
3126 * copy it anyway
3127 */
3128 qdf_mem_copy(
3129 &peer->security[sec_index].michael_key[0],
3130 michael_key,
3131 sizeof(peer->security[sec_index].michael_key));
3132#ifdef BIG_ENDIAN_HOST
3133 OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
3134 sizeof(peer->security[sec_index].michael_key));
3135#endif /* BIG_ENDIAN_HOST */
3136#endif
3137
3138#ifdef notyet /* TODO: Check if this is required for wifi3.0 */
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07003139 if (sec_type != cdp_sec_type_wapi) {
hangtianfe681a52019-01-16 17:16:28 +08003140 qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003141 } else {
3142 for (i = 0; i < DP_MAX_TIDS; i++) {
3143 /*
3144 * Setting PN valid bit for WAPI sec_type,
3145 * since WAPI PN has to be started with predefined value
3146 */
3147 peer->tids_last_pn_valid[i] = 1;
3148 qdf_mem_copy(
3149 (u_int8_t *) &peer->tids_last_pn[i],
3150 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3151 peer->tids_last_pn[i].pn128[1] =
3152 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3153 peer->tids_last_pn[i].pn128[0] =
3154 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3155 }
3156 }
3157#endif
3158 /* TODO: Update HW TID queue with PN check parameters (pn type for
3159 * all security types and last pn for WAPI) once REO command API
3160 * is available
3161 */
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303162
3163 dp_peer_unref_del_find_by_id(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003164}
3165
Vevek Venkatesande31ff62019-06-11 12:50:49 +05303166#ifdef DP_PEER_EXTENDED_API
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303167QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3168 struct ol_txrx_desc_type *sta_desc)
Leo Chang5ea93a42016-11-03 12:39:49 -07003169{
3170 struct dp_peer *peer;
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303171 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3172 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07003173
Rakshith Suresh Patkar9e02e1e2019-07-26 11:25:29 +05303174 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05303175 sta_desc->peer_addr.bytes);
Rakshith Suresh Patkar9e02e1e2019-07-26 11:25:29 +05303176
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303177 if (!pdev)
3178 return QDF_STATUS_E_FAULT;
3179
Leo Chang5ea93a42016-11-03 12:39:49 -07003180 if (!peer)
3181 return QDF_STATUS_E_FAULT;
3182
3183 qdf_spin_lock_bh(&peer->peer_info_lock);
3184 peer->state = OL_TXRX_PEER_STATE_CONN;
3185 qdf_spin_unlock_bh(&peer->peer_info_lock);
3186
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05303187 dp_rx_flush_rx_cached(peer, false);
3188
Leo Chang5ea93a42016-11-03 12:39:49 -07003189 return QDF_STATUS_SUCCESS;
3190}
3191
Rakshith Suresh Patkar03751082019-07-26 12:30:23 +05303192QDF_STATUS
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303193dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3194 struct qdf_mac_addr peer_addr)
Leo Chang5ea93a42016-11-03 12:39:49 -07003195{
3196 struct dp_peer *peer;
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303197 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3198 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3199
3200 if (!pdev)
3201 return QDF_STATUS_E_FAULT;
Leo Chang5ea93a42016-11-03 12:39:49 -07003202
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05303203 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
Leo Chang5ea93a42016-11-03 12:39:49 -07003204 if (!peer)
3205 return QDF_STATUS_E_FAULT;
3206
3207 qdf_spin_lock_bh(&peer->peer_info_lock);
3208 peer->state = OL_TXRX_PEER_STATE_DISC;
3209 qdf_spin_unlock_bh(&peer->peer_info_lock);
3210
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05303211 dp_rx_flush_rx_cached(peer, true);
3212
Leo Chang5ea93a42016-11-03 12:39:49 -07003213 return QDF_STATUS_SUCCESS;
3214}
3215
3216/**
3217 * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
3218 * @pdev - data path device instance
3219 * @vdev - virtual interface instance
3220 * @peer_addr - peer mac address
Leo Chang5ea93a42016-11-03 12:39:49 -07003221 *
3222 * Find peer by peer mac address within vdev
3223 *
3224 * Return: peer instance void pointer
3225 * NULL cannot find target peer
3226 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003227void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
3228 struct cdp_vdev *vdev_handle,
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05303229 uint8_t *peer_addr)
Leo Chang5ea93a42016-11-03 12:39:49 -07003230{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003231 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3232 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07003233 struct dp_peer *peer;
3234
Chaithanya Garrepalli3583cfb2019-12-20 17:40:53 +05303235 peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL);
Leo Chang5ea93a42016-11-03 12:39:49 -07003236
3237 if (!peer)
3238 return NULL;
3239
Krunal Soni304792a2018-06-28 14:18:30 -07003240 if (peer->vdev != vdev) {
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05303241 dp_peer_unref_delete(peer);
Leo Chang5ea93a42016-11-03 12:39:49 -07003242 return NULL;
Krunal Soni304792a2018-06-28 14:18:30 -07003243 }
Leo Chang5ea93a42016-11-03 12:39:49 -07003244
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08003245 /* ref_cnt is incremented inside dp_peer_find_hash_find().
3246 * Decrement it here.
3247 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05303248 dp_peer_unref_delete(peer);
Leo Chang5ea93a42016-11-03 12:39:49 -07003249
3250 return peer;
3251}
3252
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303253QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3254 enum ol_txrx_peer_state state)
Leo Chang5ea93a42016-11-03 12:39:49 -07003255{
3256 struct dp_peer *peer;
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303257 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
Leo Chang5ea93a42016-11-03 12:39:49 -07003258
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303259 peer = dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
Jeff Johnsona8edf332019-03-18 09:51:52 -07003260 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303261 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3262 "Failed to find peer for: [%pM]", peer_mac);
Ankit Gupta6fb389b2017-01-03 12:23:45 -08003263 return QDF_STATUS_E_FAILURE;
3264 }
Leo Chang5ea93a42016-11-03 12:39:49 -07003265 peer->state = state;
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08003266
Amruta Kulkarni0f0a36c2020-01-03 15:09:57 -08003267 dp_info("peer %pK state %d", peer, peer->state);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08003268 /* ref_cnt is incremented inside dp_peer_find_hash_find().
3269 * Decrement it here.
3270 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05303271 dp_peer_unref_delete(peer);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08003272
Leo Chang5ea93a42016-11-03 12:39:49 -07003273 return QDF_STATUS_SUCCESS;
3274}
3275
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303276QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3277 uint8_t *vdev_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07003278{
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303279 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3280 struct dp_peer *peer =
3281 dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
3282
3283 if (!peer)
3284 return QDF_STATUS_E_FAILURE;
Leo Chang5ea93a42016-11-03 12:39:49 -07003285
Jinwei Chenb02de7e2019-09-10 17:21:14 +08003286 dp_info("peer %pK vdev %pK vdev id %d",
3287 peer, peer->vdev, peer->vdev->vdev_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07003288 *vdev_id = peer->vdev->vdev_id;
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303289 /* ref_cnt is incremented inside dp_peer_find_hash_find().
3290 * Decrement it here.
3291 */
3292 dp_peer_unref_delete(peer);
3293
Leo Chang5ea93a42016-11-03 12:39:49 -07003294 return QDF_STATUS_SUCCESS;
3295}
3296
Rakshith Suresh Patkarfb42ec32019-07-26 13:52:00 +05303297struct cdp_vdev *
3298dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3299 struct qdf_mac_addr peer_addr)
Yun Parkfde6b9e2017-06-26 17:13:11 -07003300{
Yun Park601d0d82017-08-28 21:49:31 -07003301 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Yun Parkfde6b9e2017-06-26 17:13:11 -07003302 struct dp_peer *peer = NULL;
Yun Parkfde6b9e2017-06-26 17:13:11 -07003303
Yun Parkfde6b9e2017-06-26 17:13:11 -07003304 if (!pdev) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303305 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Shivani Sonia5707a42020-01-08 16:42:08 +05303306 "PDEV not found for peer_addr: %pM",
3307 peer_addr.bytes);
Yun Parkfde6b9e2017-06-26 17:13:11 -07003308 return NULL;
3309 }
3310
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05303311 peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
Yun Parkfde6b9e2017-06-26 17:13:11 -07003312 if (!peer) {
Rakshith Suresh Patkarfb42ec32019-07-26 13:52:00 +05303313 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Shivani Sonia5707a42020-01-08 16:42:08 +05303314 "PDEV not found for peer_addr: %pM",
3315 peer_addr.bytes);
Yun Parkfde6b9e2017-06-26 17:13:11 -07003316 return NULL;
3317 }
3318
3319 return (struct cdp_vdev *)peer->vdev;
3320}
3321
Leo Chang5ea93a42016-11-03 12:39:49 -07003322/**
3323 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
3324 * @peer - peer instance
3325 *
3326 * Get virtual interface instance which peer belongs
3327 *
3328 * Return: virtual interface instance pointer
3329 * NULL in case cannot find
3330 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003331struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07003332{
3333 struct dp_peer *peer = peer_handle;
3334
Mohit Khanna7ac554b2018-05-24 11:58:13 -07003335 DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003336 return (struct cdp_vdev *)peer->vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07003337}
3338
3339/**
3340 * dp_peer_get_peer_mac_addr() - Get peer mac address
3341 * @peer - peer instance
3342 *
3343 * Get peer mac address
3344 *
3345 * Return: peer mac address pointer
3346 * NULL in case cannot find
3347 */
3348uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3349{
3350 struct dp_peer *peer = peer_handle;
3351 uint8_t *mac;
3352
3353 mac = peer->mac_addr.raw;
Amruta Kulkarni0f0a36c2020-01-03 15:09:57 -08003354 dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
Leo Chang5ea93a42016-11-03 12:39:49 -07003355 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3356 return peer->mac_addr.raw;
3357}
3358
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303359int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3360 uint8_t *peer_mac)
Leo Chang5ea93a42016-11-03 12:39:49 -07003361{
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303362 enum ol_txrx_peer_state peer_state;
3363 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3364 struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0,
3365 vdev_id);
3366
3367 if (!peer)
3368 return QDF_STATUS_E_FAILURE;
Leo Chang5ea93a42016-11-03 12:39:49 -07003369
Yun Park11d46e02017-11-27 10:51:53 -08003370 DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303371 peer_state = peer->state;
3372 dp_peer_unref_delete(peer);
3373
3374 return peer_state;
Leo Chang5ea93a42016-11-03 12:39:49 -07003375}
3376
3377/**
3378 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3379 * @pdev - data path device instance
3380 *
3381 * local peer id pool alloc for physical device
3382 *
3383 * Return: none
3384 */
3385void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3386{
3387 int i;
3388
3389 /* point the freelist to the first ID */
3390 pdev->local_peer_ids.freelist = 0;
3391
3392 /* link each ID to the next one */
3393 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3394 pdev->local_peer_ids.pool[i] = i + 1;
3395 pdev->local_peer_ids.map[i] = NULL;
3396 }
3397
3398 /* link the last ID to itself, to mark the end of the list */
3399 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3400 pdev->local_peer_ids.pool[i] = i;
3401
3402 qdf_spinlock_create(&pdev->local_peer_ids.lock);
3403 DP_TRACE(INFO, "Peer pool init");
3404}
3405
3406/**
3407 * dp_local_peer_id_alloc() - allocate local peer id
3408 * @pdev - data path device instance
3409 * @peer - new peer instance
3410 *
3411 * allocate local peer id
3412 *
3413 * Return: none
3414 */
3415void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3416{
3417 int i;
3418
3419 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3420 i = pdev->local_peer_ids.freelist;
3421 if (pdev->local_peer_ids.pool[i] == i) {
3422 /* the list is empty, except for the list-end marker */
3423 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3424 } else {
3425 /* take the head ID and advance the freelist */
3426 peer->local_id = i;
3427 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3428 pdev->local_peer_ids.map[i] = peer;
3429 }
3430 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Amruta Kulkarni0f0a36c2020-01-03 15:09:57 -08003431 dp_info("peer %pK, local id %d", peer, peer->local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07003432}
3433
3434/**
3435 * dp_local_peer_id_free() - remove local peer id
3436 * @pdev - data path device instance
3437 * @peer - peer instance should be removed
3438 *
3439 * remove local peer id
3440 *
3441 * Return: none
3442 */
3443void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3444{
3445 int i = peer->local_id;
3446 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3447 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3448 return;
3449 }
3450
3451 /* put this ID on the head of the freelist */
3452 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3453 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3454 pdev->local_peer_ids.freelist = i;
3455 pdev->local_peer_ids.map[i] = NULL;
3456 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3457}
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303458
3459bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
3460 uint8_t vdev_id, uint8_t *peer_addr)
3461{
3462 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3463 struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
3464
3465 if (!vdev)
3466 return false;
3467
3468 return !!dp_find_peer_by_addr_and_vdev(
3469 dp_pdev_to_cdp_pdev(vdev->pdev),
3470 dp_vdev_to_cdp_vdev(vdev),
3471 peer_addr);
3472}
3473
3474bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
3475 uint8_t vdev_id, uint8_t *peer_addr,
3476 uint16_t max_bssid)
3477{
3478 int i;
3479 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3480 struct dp_vdev *vdev;
3481
3482 for (i = 0; i < max_bssid; i++) {
Ananya Guptaf4897732020-02-25 19:38:01 +05303483 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, i);
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303484 /* Need to check vdevs other than the vdev_id */
3485 if (vdev_id == i || !vdev)
3486 continue;
3487 if (dp_find_peer_by_addr_and_vdev(
3488 dp_pdev_to_cdp_pdev(vdev->pdev),
3489 dp_vdev_to_cdp_vdev(vdev),
3490 peer_addr)) {
Ananya Guptaf4897732020-02-25 19:38:01 +05303491 dp_err("%s: Duplicate peer %pM already exist on vdev %d",
3492 __func__, peer_addr, i);
Vevek Venkatesanaf776982019-09-12 03:43:08 +05303493 return true;
3494 }
3495 }
3496
3497 return false;
3498}
3499
3500bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3501 uint8_t *peer_addr)
3502{
3503 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3504 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3505
3506 if (!pdev)
3507 return false;
3508
3509 return !!dp_find_peer_by_addr(dp_pdev_to_cdp_pdev(pdev), peer_addr);
3510}
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05303511#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05303512
3513/**
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003514 * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3515 * @peer: DP peer handle
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303516 * @dp_stats_cmd_cb: REO command callback function
3517 * @cb_ctxt: Callback context
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003518 *
Sravan Goud0bbce752020-02-11 18:07:03 +05303519 * Return: count of tid stats cmd send succeeded
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003520 */
Sravan Goud0bbce752020-02-11 18:07:03 +05303521int dp_peer_rxtid_stats(struct dp_peer *peer,
3522 dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303523 void *cb_ctxt)
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003524{
3525 struct dp_soc *soc = peer->vdev->pdev->soc;
3526 struct hal_reo_cmd_params params;
3527 int i;
Sravan Goud0bbce752020-02-11 18:07:03 +05303528 int stats_cmd_sent_cnt = 0;
3529 QDF_STATUS status;
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003530
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303531 if (!dp_stats_cmd_cb)
Sravan Goud0bbce752020-02-11 18:07:03 +05303532 return stats_cmd_sent_cnt;
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303533
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003534 qdf_mem_zero(&params, sizeof(params));
3535 for (i = 0; i < DP_MAX_TIDS; i++) {
3536 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
Jeff Johnsona8edf332019-03-18 09:51:52 -07003537 if (rx_tid->hw_qdesc_vaddr_unaligned) {
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003538 params.std.need_status = 1;
3539 params.std.addr_lo =
3540 rx_tid->hw_qdesc_paddr & 0xffffffff;
3541 params.std.addr_hi =
3542 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303543
3544 if (cb_ctxt) {
Sravan Goud0bbce752020-02-11 18:07:03 +05303545 status = dp_reo_send_cmd(
3546 soc, CMD_GET_QUEUE_STATS,
3547 &params, dp_stats_cmd_cb,
3548 cb_ctxt);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303549 } else {
Sravan Goud0bbce752020-02-11 18:07:03 +05303550 status = dp_reo_send_cmd(
3551 soc, CMD_GET_QUEUE_STATS,
3552 &params, dp_stats_cmd_cb,
3553 rx_tid);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303554 }
Karunakar Dasineni3da08112017-06-15 14:42:39 -07003555
Sravan Goud0bbce752020-02-11 18:07:03 +05303556 if (QDF_IS_STATUS_SUCCESS(status))
3557 stats_cmd_sent_cnt++;
3558
Karunakar Dasineni3da08112017-06-15 14:42:39 -07003559 /* Flush REO descriptor from HW cache to update stats
3560 * in descriptor memory. This is to help debugging */
3561 qdf_mem_zero(&params, sizeof(params));
3562 params.std.need_status = 0;
3563 params.std.addr_lo =
3564 rx_tid->hw_qdesc_paddr & 0xffffffff;
3565 params.std.addr_hi =
3566 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08003567 params.u.fl_cache_params.flush_no_inval = 1;
Karunakar Dasineni3da08112017-06-15 14:42:39 -07003568 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3569 NULL);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003570 }
3571 }
Sravan Goud0bbce752020-02-11 18:07:03 +05303572
3573 return stats_cmd_sent_cnt;
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003574}
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303575
Pavankumar Nandeshwar6c834052020-01-06 20:20:31 +05303576QDF_STATUS
3577dp_set_michael_key(struct cdp_soc_t *soc,
3578 uint8_t vdev_id,
3579 uint8_t *peer_mac,
3580 bool is_unicast, uint32_t *key)
Pramod Simha6e10cb22018-06-20 12:05:44 -07003581{
Pavankumar Nandeshwar6c834052020-01-06 20:20:31 +05303582 QDF_STATUS status = QDF_STATUS_SUCCESS;
Pramod Simha6e10cb22018-06-20 12:05:44 -07003583 uint8_t sec_index = is_unicast ? 1 : 0;
Pavankumar Nandeshwar6c834052020-01-06 20:20:31 +05303584 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
3585 peer_mac, 0, vdev_id);
Pramod Simha6e10cb22018-06-20 12:05:44 -07003586
Pavankumar Nandeshwar6c834052020-01-06 20:20:31 +05303587 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303588 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Pramod Simha6e10cb22018-06-20 12:05:44 -07003589 "peer not found ");
Pavankumar Nandeshwar6c834052020-01-06 20:20:31 +05303590 status = QDF_STATUS_E_FAILURE;
3591 goto fail;
Pramod Simha6e10cb22018-06-20 12:05:44 -07003592 }
3593
3594 qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3595 key, IEEE80211_WEP_MICLEN);
Pavankumar Nandeshwar6c834052020-01-06 20:20:31 +05303596
3597fail:
3598 if (peer)
3599 dp_peer_unref_delete(peer);
3600
3601 return status;
Pramod Simha6e10cb22018-06-20 12:05:44 -07003602}
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303603
3604bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3605{
3606 struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3607
3608 if (peer) {
3609 /*
3610 * Decrement the peer ref which is taken as part of
3611 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3612 */
3613 dp_peer_unref_del_find_by_id(peer);
3614
3615 return true;
3616 }
3617
3618 return false;
3619}