blob: 48974e968afb0c452468ae8fade075700670db4f [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
Harilakshmi Deshkumar1ea21092017-05-08 21:16:27 +053016 * PERFORMANCE OF THIS SOFTWARE.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070017 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Balamurugan Mahalingamf72cb1f2018-06-25 12:18:34 +053021#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070022#include "dp_htt.h"
23#include "dp_types.h"
24#include "dp_internal.h"
Jeff Johnson2cb8fc72016-12-17 10:45:08 -080025#include "dp_peer.h"
Lin Baif1c577e2018-05-22 20:45:42 +080026#include "dp_rx_defrag.h"
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +053027#include "dp_rx.h"
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070028#include <hal_api.h>
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -080029#include <hal_reo.h>
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080030#ifdef CONFIG_MCL
31#include <cds_ieee80211_common.h>
Yun Parkfde6b9e2017-06-26 17:13:11 -070032#include <cds_api.h>
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080033#endif
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080034#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080035#include <wlan_cfg.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070036
nobeljdebe2b32019-04-23 11:18:47 -070037#ifdef WLAN_TX_PKT_CAPTURE_ENH
38#include "dp_tx_capture.h"
39#endif
40
Pramod Simhab17d0672017-03-06 17:20:13 -080041#ifdef DP_LFR
42static inline void
43dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
44 uint8_t valid)
45{
46 params->u.upd_queue_params.update_svld = 1;
47 params->u.upd_queue_params.svld = valid;
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +053048 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
49 "%s: Setting SSN valid bit to %d",
50 __func__, valid);
Pramod Simhab17d0672017-03-06 17:20:13 -080051}
52#else
53static inline void
54dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
55 uint8_t valid) {};
56#endif
57
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070058static inline int dp_peer_find_mac_addr_cmp(
59 union dp_align_mac_addr *mac_addr1,
60 union dp_align_mac_addr *mac_addr2)
61{
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070062 /*
63 * Intentionally use & rather than &&.
64 * because the operands are binary rather than generic boolean,
65 * the functionality is equivalent.
66 * Using && has the advantage of short-circuited evaluation,
67 * but using & has the advantage of no conditional branching,
68 * which is a more significant benefit.
69 */
Amir Patelcb990262019-05-28 15:12:48 +053070 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
71 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070072}
73
Tallapragada Kalyanc7413082019-03-07 21:22:10 +053074static int dp_peer_ast_table_attach(struct dp_soc *soc)
75{
76 uint32_t max_ast_index;
77
78 max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
79 /* allocate ast_table for ast entry to ast_index map */
80 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
81 "\n<=== cfg max ast idx %d ====>", max_ast_index);
82 soc->ast_table = qdf_mem_malloc(max_ast_index *
83 sizeof(struct dp_ast_entry *));
84 if (!soc->ast_table) {
85 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
86 "%s: ast_table memory allocation failed", __func__);
87 return QDF_STATUS_E_NOMEM;
88 }
89 return 0; /* success */
90}
91
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070092static int dp_peer_find_map_attach(struct dp_soc *soc)
93{
94 uint32_t max_peers, peer_map_size;
95
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +053096 max_peers = soc->max_peers;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070097 /* allocate the peer ID -> peer object map */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +053098 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
99 "\n<=== cfg max peer id %d ====>", max_peers);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700100 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
101 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
102 if (!soc->peer_id_to_obj_map) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530103 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
104 "%s: peer map memory allocation failed", __func__);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700105 return QDF_STATUS_E_NOMEM;
106 }
107
108 /*
109 * The peer_id_to_obj_map doesn't really need to be initialized,
110 * since elements are only used after they have been individually
111 * initialized.
112 * However, it is convenient for debugging to have all elements
113 * that are not in use set to 0.
114 */
115 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700116 return 0; /* success */
117}
118
Amir Patelcb990262019-05-28 15:12:48 +0530119static int dp_log2_ceil(unsigned int value)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700120{
Amir Patelcb990262019-05-28 15:12:48 +0530121 unsigned int tmp = value;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700122 int log2 = -1;
123
124 while (tmp) {
125 log2++;
126 tmp >>= 1;
127 }
128 if (1 << log2 != value)
129 log2++;
130 return log2;
131}
132
133static int dp_peer_find_add_id_to_obj(
134 struct dp_peer *peer,
135 uint16_t peer_id)
136{
137 int i;
138
139 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
140 if (peer->peer_ids[i] == HTT_INVALID_PEER) {
141 peer->peer_ids[i] = peer_id;
142 return 0; /* success */
143 }
144 }
145 return QDF_STATUS_E_FAILURE; /* failure */
146}
147
148#define DP_PEER_HASH_LOAD_MULT 2
149#define DP_PEER_HASH_LOAD_SHIFT 0
150
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530151#define DP_AST_HASH_LOAD_MULT 2
152#define DP_AST_HASH_LOAD_SHIFT 0
153
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700154static int dp_peer_find_hash_attach(struct dp_soc *soc)
155{
156 int i, hash_elems, log2;
157
158 /* allocate the peer MAC address -> peer object hash table */
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +0530159 hash_elems = soc->max_peers;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700160 hash_elems *= DP_PEER_HASH_LOAD_MULT;
161 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
162 log2 = dp_log2_ceil(hash_elems);
163 hash_elems = 1 << log2;
164
165 soc->peer_hash.mask = hash_elems - 1;
166 soc->peer_hash.idx_bits = log2;
167 /* allocate an array of TAILQ peer object lists */
168 soc->peer_hash.bins = qdf_mem_malloc(
169 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
170 if (!soc->peer_hash.bins)
171 return QDF_STATUS_E_NOMEM;
172
173 for (i = 0; i < hash_elems; i++)
174 TAILQ_INIT(&soc->peer_hash.bins[i]);
175
176 return 0;
177}
178
179static void dp_peer_find_hash_detach(struct dp_soc *soc)
180{
phadimanb1007502019-04-03 15:21:53 +0530181 if (soc->peer_hash.bins) {
182 qdf_mem_free(soc->peer_hash.bins);
183 soc->peer_hash.bins = NULL;
184 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700185}
186
187static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
188 union dp_align_mac_addr *mac_addr)
189{
190 unsigned index;
191
192 index =
193 mac_addr->align2.bytes_ab ^
194 mac_addr->align2.bytes_cd ^
195 mac_addr->align2.bytes_ef;
196 index ^= index >> soc->peer_hash.idx_bits;
197 index &= soc->peer_hash.mask;
198 return index;
199}
200
201
202void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
203{
204 unsigned index;
205
206 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
207 qdf_spin_lock_bh(&soc->peer_ref_mutex);
208 /*
209 * It is important to add the new peer at the tail of the peer list
210 * with the bin index. Together with having the hash_find function
211 * search from head to tail, this ensures that if two entries with
212 * the same MAC address are stored, the one added first will be
213 * found first.
214 */
215 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
216 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
217}
218
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530219#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530220/*
221 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
222 * @soc: SoC handle
223 *
224 * Return: None
225 */
226static int dp_peer_ast_hash_attach(struct dp_soc *soc)
227{
228 int i, hash_elems, log2;
Tallapragada Kalyanc7413082019-03-07 21:22:10 +0530229 unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530230
Tallapragada Kalyanc7413082019-03-07 21:22:10 +0530231 hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530232 DP_AST_HASH_LOAD_SHIFT);
233
234 log2 = dp_log2_ceil(hash_elems);
235 hash_elems = 1 << log2;
236
237 soc->ast_hash.mask = hash_elems - 1;
238 soc->ast_hash.idx_bits = log2;
239
Tallapragada Kalyanc7413082019-03-07 21:22:10 +0530240 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
241 "ast hash_elems: %d, max_ast_idx: %d",
242 hash_elems, max_ast_idx);
243
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530244 /* allocate an array of TAILQ peer object lists */
245 soc->ast_hash.bins = qdf_mem_malloc(
246 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
247 dp_ast_entry)));
248
249 if (!soc->ast_hash.bins)
250 return QDF_STATUS_E_NOMEM;
251
252 for (i = 0; i < hash_elems; i++)
253 TAILQ_INIT(&soc->ast_hash.bins[i]);
254
255 return 0;
256}
257
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530258/*
259 * dp_peer_ast_cleanup() - cleanup the references
260 * @soc: SoC handle
261 * @ast: ast entry
262 *
263 * Return: None
264 */
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530265static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
266 struct dp_ast_entry *ast)
267{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530268 txrx_ast_free_cb cb = ast->callback;
269 void *cookie = ast->cookie;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530270
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530271 /* Call the callbacks to free up the cookie */
272 if (cb) {
273 ast->callback = NULL;
274 ast->cookie = NULL;
275 cb(soc->ctrl_psoc,
276 soc,
277 cookie,
278 CDP_TXRX_AST_DELETE_IN_PROGRESS);
279 }
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530280}
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530281
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530282/*
283 * dp_peer_ast_hash_detach() - Free AST Hash table
284 * @soc: SoC handle
285 *
286 * Return: None
287 */
288static void dp_peer_ast_hash_detach(struct dp_soc *soc)
289{
Chaithanya Garrepalli157543d2018-07-09 17:42:59 +0530290 unsigned int index;
291 struct dp_ast_entry *ast, *ast_next;
292
293 if (!soc->ast_hash.mask)
294 return;
295
phadimanb1007502019-04-03 15:21:53 +0530296 if (!soc->ast_hash.bins)
297 return;
298
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530299 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli157543d2018-07-09 17:42:59 +0530300 for (index = 0; index <= soc->ast_hash.mask; index++) {
301 if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
302 TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
303 hash_list_elem, ast_next) {
304 TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
305 hash_list_elem);
306 dp_peer_ast_cleanup(soc, ast);
307 qdf_mem_free(ast);
308 }
309 }
310 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530311 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli157543d2018-07-09 17:42:59 +0530312
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530313 qdf_mem_free(soc->ast_hash.bins);
phadimanb1007502019-04-03 15:21:53 +0530314 soc->ast_hash.bins = NULL;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530315}
316
317/*
318 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
319 * @soc: SoC handle
320 *
321 * Return: AST hash
322 */
323static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
324 union dp_align_mac_addr *mac_addr)
325{
326 uint32_t index;
327
328 index =
329 mac_addr->align2.bytes_ab ^
330 mac_addr->align2.bytes_cd ^
331 mac_addr->align2.bytes_ef;
332 index ^= index >> soc->ast_hash.idx_bits;
333 index &= soc->ast_hash.mask;
334 return index;
335}
336
337/*
338 * dp_peer_ast_hash_add() - Add AST entry into hash table
339 * @soc: SoC handle
340 *
341 * This function adds the AST entry into SoC AST hash table
342 * It assumes caller has taken the ast lock to protect the access to this table
343 *
344 * Return: None
345 */
346static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
347 struct dp_ast_entry *ase)
348{
349 uint32_t index;
350
351 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
352 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
353}
354
355/*
356 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
357 * @soc: SoC handle
358 *
359 * This function removes the AST entry from soc AST hash table
360 * It assumes caller has taken the ast lock to protect the access to this table
361 *
362 * Return: None
363 */
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +0530364void dp_peer_ast_hash_remove(struct dp_soc *soc,
365 struct dp_ast_entry *ase)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530366{
367 unsigned index;
368 struct dp_ast_entry *tmpase;
369 int found = 0;
370
371 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
372 /* Check if tail is not empty before delete*/
373 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
374
375 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
376 if (tmpase == ase) {
377 found = 1;
378 break;
379 }
380 }
381
382 QDF_ASSERT(found);
383 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
384}
385
386/*
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530387 * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
388 * @soc: SoC handle
389 * @peer: peer handle
390 * @ast_mac_addr: mac address
391 *
392 * It assumes caller has taken the ast lock to protect the access to ast list
393 *
394 * Return: AST entry
395 */
396struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
397 struct dp_peer *peer,
398 uint8_t *ast_mac_addr)
399{
400 struct dp_ast_entry *ast_entry = NULL;
401 union dp_align_mac_addr *mac_addr =
402 (union dp_align_mac_addr *)ast_mac_addr;
403
404 TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
405 if (!dp_peer_find_mac_addr_cmp(mac_addr,
406 &ast_entry->mac_addr)) {
407 return ast_entry;
408 }
409 }
410
411 return NULL;
412}
413
414/*
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530415 * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530416 * @soc: SoC handle
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530417 *
418 * It assumes caller has taken the ast lock to protect the access to
419 * AST hash table
420 *
421 * Return: AST entry
422 */
423struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
424 uint8_t *ast_mac_addr,
425 uint8_t pdev_id)
426{
427 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
428 uint32_t index;
429 struct dp_ast_entry *ase;
430
431 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800432 ast_mac_addr, QDF_MAC_ADDR_SIZE);
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530433 mac_addr = &local_mac_addr_aligned;
434
435 index = dp_peer_ast_hash_index(soc, mac_addr);
436 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
437 if ((pdev_id == ase->pdev_id) &&
438 !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
439 return ase;
440 }
441 }
442
443 return NULL;
444}
445
446/*
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530447 * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530448 * @soc: SoC handle
449 *
450 * It assumes caller has taken the ast lock to protect the access to
451 * AST hash table
452 *
453 * Return: AST entry
454 */
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530455struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
456 uint8_t *ast_mac_addr)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530457{
458 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
459 unsigned index;
460 struct dp_ast_entry *ase;
461
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530462 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800463 ast_mac_addr, QDF_MAC_ADDR_SIZE);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530464 mac_addr = &local_mac_addr_aligned;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530465
466 index = dp_peer_ast_hash_index(soc, mac_addr);
467 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
468 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
469 return ase;
470 }
471 }
472
473 return NULL;
474}
475
476/*
477 * dp_peer_map_ast() - Map the ast entry with HW AST Index
478 * @soc: SoC handle
479 * @peer: peer to which ast node belongs
480 * @mac_addr: MAC address of ast node
481 * @hw_peer_id: HW AST Index returned by target in peer map event
482 * @vdev_id: vdev id for VAP to which the peer belongs to
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +0530483 * @ast_hash: ast hash value in HW
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530484 *
485 * Return: None
486 */
487static inline void dp_peer_map_ast(struct dp_soc *soc,
488 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +0530489 uint8_t vdev_id, uint16_t ast_hash)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530490{
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530491 struct dp_ast_entry *ast_entry = NULL;
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530492 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530493
494 if (!peer) {
495 return;
496 }
497
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530498 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
499 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
500 __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
501 mac_addr[1], mac_addr[2], mac_addr[3],
502 mac_addr[4], mac_addr[5]);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530503
504 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530505
506 ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
507
508 if (ast_entry) {
509 ast_entry->ast_idx = hw_peer_id;
510 soc->ast_table[hw_peer_id] = ast_entry;
511 ast_entry->is_active = TRUE;
512 peer_type = ast_entry->type;
513 ast_entry->ast_hash_value = ast_hash;
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530514 ast_entry->is_mapped = TRUE;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530515 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530516
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530517 if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530518 if (soc->cdp_soc.ol_ops->peer_map_event) {
519 soc->cdp_soc.ol_ops->peer_map_event(
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +0530520 soc->ctrl_psoc, peer->peer_ids[0],
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530521 hw_peer_id, vdev_id,
Radha krishna Simha Jigurud359eb42018-09-16 13:56:34 +0530522 mac_addr, peer_type, ast_hash);
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530523 }
524 } else {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530525 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
526 "AST entry not found");
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530527 }
528
529 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530530 return;
531}
532
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530533void dp_peer_free_hmwds_cb(void *ctrl_psoc,
534 void *dp_soc,
535 void *cookie,
536 enum cdp_ast_free_status status)
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +0530537{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530538 struct dp_ast_free_cb_params *param =
539 (struct dp_ast_free_cb_params *)cookie;
540 struct dp_soc *soc = (struct dp_soc *)dp_soc;
541 struct dp_peer *peer = NULL;
542
543 if (status != CDP_TXRX_AST_DELETED) {
544 qdf_mem_free(cookie);
545 return;
546 }
547
548 peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
549 0, param->vdev_id);
550 if (peer) {
551 dp_peer_add_ast(soc, peer,
552 &param->mac_addr.raw[0],
553 param->type,
554 param->flags);
555 dp_peer_unref_delete(peer);
556 }
557 qdf_mem_free(cookie);
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +0530558}
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +0530559
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530560/*
561 * dp_peer_add_ast() - Allocate and add AST entry into peer list
562 * @soc: SoC handle
563 * @peer: peer to which ast node belongs
564 * @mac_addr: MAC address of ast node
565 * @is_self: Is this base AST entry with peer mac address
566 *
Jeff Johnsonbd6e61f2018-05-06 17:11:15 -0700567 * This API is used by WDS source port learning function to
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530568 * add a new AST entry into peer AST list
569 *
570 * Return: 0 if new entry is allocated,
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530571 * -1 if entry add failed
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530572 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530573int dp_peer_add_ast(struct dp_soc *soc,
574 struct dp_peer *peer,
575 uint8_t *mac_addr,
576 enum cdp_txrx_ast_entry_type type,
577 uint32_t flags)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530578{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530579 struct dp_ast_entry *ast_entry = NULL;
Ruchi, Agrawal93bcf122018-10-26 13:56:34 +0530580 struct dp_vdev *vdev = NULL;
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530581 struct dp_pdev *pdev = NULL;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530582 uint8_t next_node_mac[6];
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530583 int ret = -1;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530584 txrx_ast_free_cb cb = NULL;
585 void *cookie = NULL;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530586
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +0530587 qdf_spin_lock_bh(&soc->ast_lock);
588 if (peer->delete_in_progress) {
589 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530590 return ret;
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +0530591 }
Ruchi, Agrawal93bcf122018-10-26 13:56:34 +0530592
593 vdev = peer->vdev;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530594 if (!vdev) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530595 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
596 FL("Peers vdev is NULL"));
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530597 QDF_ASSERT(0);
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +0530598 qdf_spin_unlock_bh(&soc->ast_lock);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530599 return ret;
600 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530601
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530602 pdev = vdev->pdev;
603
phadimand2e88e32019-01-23 12:58:43 +0530604 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
phadimane9fb5472018-10-30 16:53:05 +0530605 "%s: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
606 __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
607 peer->mac_addr.raw, peer, mac_addr);
608
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530609
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530610 /* fw supports only 2 times the max_peers ast entries */
611 if (soc->num_ast_entries >=
612 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
613 qdf_spin_unlock_bh(&soc->ast_lock);
614 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
615 FL("Max ast entries reached"));
616 return ret;
617 }
618
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530619 /* If AST entry already exists , just return from here
620 * ast entry with same mac address can exist on different radios
621 * if ast_override support is enabled use search by pdev in this
622 * case
623 */
624 if (soc->ast_override_support) {
625 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
626 pdev->pdev_id);
627 if (ast_entry) {
Tallapragada Kalyan9e4b36f2019-05-02 13:22:34 +0530628 if ((type == CDP_TXRX_AST_TYPE_MEC) &&
629 (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
630 ast_entry->is_active = TRUE;
631
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +0530632 qdf_spin_unlock_bh(&soc->ast_lock);
633 return 0;
634 }
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530635 } else {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530636 /* For HWMWDS_SEC entries can be added for same mac address
637 * do not check for existing entry
638 */
639 if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
640 goto add_ast_entry;
641
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530642 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +0530643
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530644 if (ast_entry) {
Pamidipati, Vijayb113bbc2019-01-22 22:06:36 +0530645 if ((type == CDP_TXRX_AST_TYPE_MEC) &&
646 (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530647 ast_entry->is_active = TRUE;
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530648
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530649 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
650 !ast_entry->delete_in_progress) {
651 qdf_spin_unlock_bh(&soc->ast_lock);
652 return 0;
653 }
654
655 /* Add for HMWDS entry we cannot be ignored if there
656 * is AST entry with same mac address
657 *
658 * if ast entry exists with the requested mac address
659 * send a delete command and register callback which
660 * can take care of adding HMWDS ast enty on delete
661 * confirmation from target
662 */
663 if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
664 soc->is_peer_map_unmap_v2) {
665 struct dp_ast_free_cb_params *param = NULL;
666
667 if (ast_entry->type ==
668 CDP_TXRX_AST_TYPE_WDS_HM_SEC)
669 goto add_ast_entry;
670
671 /* save existing callback */
672 if (ast_entry->callback) {
673 cb = ast_entry->callback;
674 cookie = ast_entry->cookie;
675 }
676
677 param = qdf_mem_malloc(sizeof(*param));
678 if (!param) {
679 QDF_TRACE(QDF_MODULE_ID_TXRX,
680 QDF_TRACE_LEVEL_ERROR,
681 "Allocation failed");
682 qdf_spin_unlock_bh(&soc->ast_lock);
683 return ret;
684 }
685
686 qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800687 QDF_MAC_ADDR_SIZE);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530688 qdf_mem_copy(&param->peer_mac_addr.raw[0],
689 &peer->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800690 QDF_MAC_ADDR_SIZE);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530691 param->type = type;
692 param->flags = flags;
693 param->vdev_id = vdev->vdev_id;
694 ast_entry->callback = dp_peer_free_hmwds_cb;
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530695 ast_entry->pdev_id = vdev->pdev->pdev_id;
696 ast_entry->type = type;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530697 ast_entry->cookie = (void *)param;
698 if (!ast_entry->delete_in_progress)
699 dp_peer_del_ast(soc, ast_entry);
700 }
701
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +0530702 /* Modify an already existing AST entry from type
703 * WDS to MEC on promption. This serves as a fix when
704 * backbone of interfaces are interchanged wherein
Nandha Kishore Easwaran8dd440d2018-11-30 15:02:20 +0530705 * wds entr becomes its own MEC. The entry should be
706 * replaced only when the ast_entry peer matches the
707 * peer received in mec event. This additional check
708 * is needed in wds repeater cases where a multicast
709 * packet from station to the root via the repeater
710 * should not remove the wds entry.
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +0530711 */
712 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
Nandha Kishore Easwaran8dd440d2018-11-30 15:02:20 +0530713 (type == CDP_TXRX_AST_TYPE_MEC) &&
714 (ast_entry->peer == peer)) {
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +0530715 ast_entry->is_active = FALSE;
716 dp_peer_del_ast(soc, ast_entry);
717 }
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530718 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530719
720 /* Call the saved callback*/
721 if (cb) {
722 cb(soc->ctrl_psoc, soc, cookie,
723 CDP_TXRX_AST_DELETE_IN_PROGRESS);
724 }
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530725 return 0;
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +0530726 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530727 }
728
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530729add_ast_entry:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530730 ast_entry = (struct dp_ast_entry *)
731 qdf_mem_malloc(sizeof(struct dp_ast_entry));
732
733 if (!ast_entry) {
734 qdf_spin_unlock_bh(&soc->ast_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530735 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
736 FL("fail to allocate ast_entry"));
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530737 QDF_ASSERT(0);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530738 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530739 }
740
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800741 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530742 ast_entry->pdev_id = vdev->pdev->pdev_id;
743 ast_entry->vdev_id = vdev->vdev_id;
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530744 ast_entry->is_mapped = false;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530745 ast_entry->delete_in_progress = false;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530746
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530747 switch (type) {
748 case CDP_TXRX_AST_TYPE_STATIC:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530749 peer->self_ast_entry = ast_entry;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530750 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530751 if (peer->vdev->opmode == wlan_op_mode_sta)
752 ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530753 break;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530754 case CDP_TXRX_AST_TYPE_SELF:
755 peer->self_ast_entry = ast_entry;
756 ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
757 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530758 case CDP_TXRX_AST_TYPE_WDS:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530759 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530760 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530761 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530762 case CDP_TXRX_AST_TYPE_WDS_HM:
763 ast_entry->next_hop = 1;
764 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
765 break;
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530766 case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
767 ast_entry->next_hop = 1;
768 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
769 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530770 case CDP_TXRX_AST_TYPE_MEC:
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530771 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530772 ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530773 break;
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530774 case CDP_TXRX_AST_TYPE_DA:
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530775 peer = peer->vdev->vap_bss_peer;
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530776 ast_entry->next_hop = 1;
777 ast_entry->type = CDP_TXRX_AST_TYPE_DA;
778 break;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530779 default:
780 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
781 FL("Incorrect AST entry type"));
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530782 }
783
784 ast_entry->is_active = TRUE;
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530785 DP_STATS_INC(soc, ast.added, 1);
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530786 soc->num_ast_entries++;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530787 dp_peer_ast_hash_add(soc, ast_entry);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530788
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530789 ast_entry->peer = peer;
790
791 if (type == CDP_TXRX_AST_TYPE_MEC)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530792 qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
Ruchi, Agrawald536f882018-03-02 15:51:23 +0530793 else
794 qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530795
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530796 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530797
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530798 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530799 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530800 (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
801 (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530802 if (QDF_STATUS_SUCCESS ==
803 soc->cdp_soc.ol_ops->peer_add_wds_entry(
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530804 peer->vdev->osif_vdev,
syed touqeer pasha0050ec92018-10-14 19:36:15 +0530805 (struct cdp_peer *)peer,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530806 mac_addr,
807 next_node_mac,
Chaithanya Garrepalli58e7c5e2019-04-02 16:55:16 +0530808 flags)) {
809 qdf_spin_unlock_bh(&soc->ast_lock);
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530810 return 0;
Chaithanya Garrepalli58e7c5e2019-04-02 16:55:16 +0530811 }
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530812 }
813
Chaithanya Garrepalli58e7c5e2019-04-02 16:55:16 +0530814 qdf_spin_unlock_bh(&soc->ast_lock);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530815 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530816}
817
818/*
819 * dp_peer_del_ast() - Delete and free AST entry
820 * @soc: SoC handle
821 * @ast_entry: AST entry of the node
822 *
823 * This function removes the AST entry from peer and soc tables
824 * It assumes caller has taken the ast lock to protect the access to these
825 * tables
826 *
827 * Return: None
828 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530829void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530830{
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +0530831 struct dp_peer *peer;
832
833 if (!ast_entry)
834 return;
835
836 peer = ast_entry->peer;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530837
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530838 dp_peer_ast_send_wds_del(soc, ast_entry);
839
840 /*
Tallapragada Kalyan9e4b36f2019-05-02 13:22:34 +0530841 * release the reference only if it is mapped
842 * to ast_table
843 */
844 if (ast_entry->is_mapped)
845 soc->ast_table[ast_entry->ast_idx] = NULL;
846
847 /*
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530848 * if peer map v2 is enabled we are not freeing ast entry
849 * here and it is supposed to be freed in unmap event (after
850 * we receive delete confirmation from target)
851 *
852 * if peer_id is invalid we did not get the peer map event
853 * for the peer free ast entry from here only in this case
854 */
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +0530855 if (soc->is_peer_map_unmap_v2) {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530856
857 /*
858 * For HM_SEC and SELF type we do not receive unmap event
859 * free ast_entry from here it self
860 */
861 if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
862 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
863 return;
864 }
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530865
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +0530866 /* SELF and STATIC entries are removed in teardown itself */
867 if (ast_entry->next_hop)
868 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
Pamidipati, Vijay3eab5b12018-08-23 16:00:44 +0530869
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530870 DP_STATS_INC(soc, ast.deleted, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530871 dp_peer_ast_hash_remove(soc, ast_entry);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530872 dp_peer_ast_cleanup(soc, ast_entry);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530873 qdf_mem_free(ast_entry);
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530874 soc->num_ast_entries--;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530875}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530876
877/*
878 * dp_peer_update_ast() - Delete and free AST entry
879 * @soc: SoC handle
880 * @peer: peer to which ast node belongs
881 * @ast_entry: AST entry of the node
882 * @flags: wds or hmwds
883 *
884 * This function update the AST entry to the roamed peer and soc tables
885 * It assumes caller has taken the ast lock to protect the access to these
886 * tables
887 *
888 * Return: 0 if ast entry is updated successfully
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530889 * -1 failure
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530890 */
891int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
892 struct dp_ast_entry *ast_entry, uint32_t flags)
893{
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530894 int ret = -1;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530895 struct dp_peer *old_peer;
896
phadimand2e88e32019-01-23 12:58:43 +0530897 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
phadimane9fb5472018-10-30 16:53:05 +0530898 "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
899 __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
900 peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
901 peer->mac_addr.raw);
902
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530903 if (ast_entry->delete_in_progress)
904 return ret;
905
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530906 if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530907 (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
908 (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
909 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530910 return 0;
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530911
syed touqeer pasha8a0928b2019-03-01 18:06:50 +0530912 /*
913 * Avoids flood of WMI update messages sent to FW for same peer.
914 */
915 if (qdf_unlikely(ast_entry->peer == peer) &&
916 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
917 (ast_entry->vdev_id == peer->vdev->vdev_id) &&
918 (ast_entry->is_active))
919 return 0;
920
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530921 old_peer = ast_entry->peer;
922 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530923
924 ast_entry->peer = peer;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530925 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
926 ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
927 ast_entry->vdev_id = peer->vdev->vdev_id;
928 ast_entry->is_active = TRUE;
929 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
930
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530931 ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530932 peer->vdev->osif_vdev,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530933 ast_entry->mac_addr.raw,
934 peer->mac_addr.raw,
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530935 flags);
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530936
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530937 return ret;
938}
939
940/*
941 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
942 * @soc: SoC handle
943 * @ast_entry: AST entry of the node
944 *
945 * This function gets the pdev_id from the ast entry.
946 *
947 * Return: (uint8_t) pdev_id
948 */
949uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
950 struct dp_ast_entry *ast_entry)
951{
952 return ast_entry->pdev_id;
953}
954
955/*
956 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
957 * @soc: SoC handle
958 * @ast_entry: AST entry of the node
959 *
960 * This function gets the next hop from the ast entry.
961 *
962 * Return: (uint8_t) next_hop
963 */
964uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
965 struct dp_ast_entry *ast_entry)
966{
967 return ast_entry->next_hop;
968}
969
970/*
971 * dp_peer_ast_set_type() - set type from the ast entry
972 * @soc: SoC handle
973 * @ast_entry: AST entry of the node
974 *
975 * This function sets the type in the ast entry.
976 *
977 * Return:
978 */
979void dp_peer_ast_set_type(struct dp_soc *soc,
980 struct dp_ast_entry *ast_entry,
981 enum cdp_txrx_ast_entry_type type)
982{
983 ast_entry->type = type;
984}
985
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800986#else
987int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530988 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
989 uint32_t flags)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800990{
991 return 1;
992}
993
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530994void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800995{
996}
997
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530998int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
999 struct dp_ast_entry *ast_entry, uint32_t flags)
1000{
1001 return 1;
1002}
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001003
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +05301004struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1005 uint8_t *ast_mac_addr)
1006{
1007 return NULL;
1008}
1009
1010struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1011 uint8_t *ast_mac_addr,
1012 uint8_t pdev_id)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001013{
1014 return NULL;
1015}
1016
1017static int dp_peer_ast_hash_attach(struct dp_soc *soc)
1018{
1019 return 0;
1020}
1021
1022static inline void dp_peer_map_ast(struct dp_soc *soc,
1023 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301024 uint8_t vdev_id, uint16_t ast_hash)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001025{
1026 return;
1027}
1028
1029static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1030{
1031}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301032
1033void dp_peer_ast_set_type(struct dp_soc *soc,
1034 struct dp_ast_entry *ast_entry,
1035 enum cdp_txrx_ast_entry_type type)
1036{
1037}
1038
1039uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1040 struct dp_ast_entry *ast_entry)
1041{
1042 return 0xff;
1043}
1044
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301045uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1046 struct dp_ast_entry *ast_entry)
1047{
1048 return 0xff;
1049}
Amir Patelcb990262019-05-28 15:12:48 +05301050
1051int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
1052 struct dp_ast_entry *ast_entry, uint32_t flags)
1053{
1054 return 1;
1055}
1056
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001057#endif
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301058
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301059void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1060 struct dp_ast_entry *ast_entry)
1061{
1062 struct dp_peer *peer = ast_entry->peer;
1063 struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1064
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301065 if (ast_entry->delete_in_progress)
1066 return;
1067
Chaithanya Garrepalli9ff4c542019-01-07 23:03:09 +05301068 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1069 "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1070 __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1071 peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1072 ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1073
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +05301074 if (ast_entry->next_hop) {
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301075 cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +05301076 ast_entry->mac_addr.raw,
1077 ast_entry->type);
1078 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301079
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05301080 /* Remove SELF and STATIC entries in teardown itself */
1081 if (!ast_entry->next_hop) {
1082 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1083 peer->self_ast_entry = NULL;
1084 ast_entry->peer = NULL;
1085 }
1086
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301087 ast_entry->delete_in_progress = true;
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301088}
1089
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301090static void dp_peer_ast_free_entry(struct dp_soc *soc,
1091 struct dp_ast_entry *ast_entry)
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301092{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301093 struct dp_peer *peer = ast_entry->peer;
1094 void *cookie = NULL;
1095 txrx_ast_free_cb cb = NULL;
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301096
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +05301097 /*
1098 * release the reference only if it is mapped
1099 * to ast_table
1100 */
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301101
1102 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +05301103 if (ast_entry->is_mapped)
1104 soc->ast_table[ast_entry->ast_idx] = NULL;
Tallapragada Kalyan887fb5d2018-10-24 18:27:58 +05301105
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301106 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301107 DP_STATS_INC(soc, ast.deleted, 1);
1108 dp_peer_ast_hash_remove(soc, ast_entry);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301109
1110 cb = ast_entry->callback;
1111 cookie = ast_entry->cookie;
1112 ast_entry->callback = NULL;
1113 ast_entry->cookie = NULL;
1114
1115 if (ast_entry == peer->self_ast_entry)
1116 peer->self_ast_entry = NULL;
1117
1118 qdf_spin_unlock_bh(&soc->ast_lock);
1119
1120 if (cb) {
1121 cb(soc->ctrl_psoc,
1122 soc,
1123 cookie,
1124 CDP_TXRX_AST_DELETED);
1125 }
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301126 qdf_mem_free(ast_entry);
Tallapragada Kalyana7023622018-12-03 19:29:52 +05301127 soc->num_ast_entries--;
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301128}
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301129
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05301130struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001131 uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001132{
1133 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1134 unsigned index;
1135 struct dp_peer *peer;
1136
1137 if (mac_addr_is_aligned) {
1138 mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1139 } else {
1140 qdf_mem_copy(
1141 &local_mac_addr_aligned.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08001142 peer_mac_addr, QDF_MAC_ADDR_SIZE);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001143 mac_addr = &local_mac_addr_aligned;
1144 }
1145 index = dp_peer_find_hash_index(soc, mac_addr);
1146 qdf_spin_lock_bh(&soc->peer_ref_mutex);
1147 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1148#if ATH_SUPPORT_WRAP
1149 /* ProxySTA may have multiple BSS peer with same MAC address,
1150 * modified find will take care of finding the correct BSS peer.
1151 */
1152 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05301153 ((peer->vdev->vdev_id == vdev_id) ||
1154 (vdev_id == DP_VDEV_ALL))) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001155#else
1156 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
1157#endif
1158 /* found it - increment the ref count before releasing
1159 * the lock
1160 */
1161 qdf_atomic_inc(&peer->ref_cnt);
1162 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1163 return peer;
1164 }
1165 }
1166 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1167 return NULL; /* failure */
1168}
1169
1170void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1171{
1172 unsigned index;
1173 struct dp_peer *tmppeer = NULL;
1174 int found = 0;
1175
1176 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1177 /* Check if tail is not empty before delete*/
1178 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1179 /*
1180 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1181 * by the caller.
1182 * The caller needs to hold the lock from the time the peer object's
1183 * reference count is decremented and tested up through the time the
1184 * reference to the peer object is removed from the hash table, by
1185 * this function.
1186 * Holding the lock only while removing the peer object reference
1187 * from the hash table keeps the hash table consistent, but does not
1188 * protect against a new HL tx context starting to use the peer object
1189 * if it looks up the peer object from its MAC address just after the
1190 * peer ref count is decremented to zero, but just before the peer
1191 * object reference is removed from the hash table.
1192 */
1193 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1194 if (tmppeer == peer) {
1195 found = 1;
1196 break;
1197 }
1198 }
1199 QDF_ASSERT(found);
1200 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1201}
1202
1203void dp_peer_find_hash_erase(struct dp_soc *soc)
1204{
1205 int i;
1206
1207 /*
1208 * Not really necessary to take peer_ref_mutex lock - by this point,
1209 * it's known that the soc is no longer in use.
1210 */
1211 for (i = 0; i <= soc->peer_hash.mask; i++) {
1212 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1213 struct dp_peer *peer, *peer_next;
1214
1215 /*
1216 * TAILQ_FOREACH_SAFE must be used here to avoid any
1217 * memory access violation after peer is freed
1218 */
1219 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1220 hash_list_elem, peer_next) {
1221 /*
1222 * Don't remove the peer from the hash table -
1223 * that would modify the list we are currently
1224 * traversing, and it's not necessary anyway.
1225 */
1226 /*
1227 * Artificially adjust the peer's ref count to
1228 * 1, so it will get deleted by
1229 * dp_peer_unref_delete.
1230 */
1231 /* set to zero */
1232 qdf_atomic_init(&peer->ref_cnt);
1233 /* incr to one */
1234 qdf_atomic_inc(&peer->ref_cnt);
1235 dp_peer_unref_delete(peer);
1236 }
1237 }
1238 }
1239}
1240
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301241static void dp_peer_ast_table_detach(struct dp_soc *soc)
1242{
phadimanb1007502019-04-03 15:21:53 +05301243 if (soc->ast_table) {
1244 qdf_mem_free(soc->ast_table);
1245 soc->ast_table = NULL;
1246 }
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301247}
1248
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001249static void dp_peer_find_map_detach(struct dp_soc *soc)
1250{
phadimanb1007502019-04-03 15:21:53 +05301251 if (soc->peer_id_to_obj_map) {
1252 qdf_mem_free(soc->peer_id_to_obj_map);
1253 soc->peer_id_to_obj_map = NULL;
1254 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001255}
1256
1257int dp_peer_find_attach(struct dp_soc *soc)
1258{
1259 if (dp_peer_find_map_attach(soc))
1260 return 1;
1261
1262 if (dp_peer_find_hash_attach(soc)) {
1263 dp_peer_find_map_detach(soc);
1264 return 1;
1265 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301266
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301267 if (dp_peer_ast_table_attach(soc)) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301268 dp_peer_find_hash_detach(soc);
1269 dp_peer_find_map_detach(soc);
1270 return 1;
1271 }
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301272
1273 if (dp_peer_ast_hash_attach(soc)) {
1274 dp_peer_ast_table_detach(soc);
1275 dp_peer_find_hash_detach(soc);
1276 dp_peer_find_map_detach(soc);
1277 return 1;
1278 }
1279
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001280 return 0; /* success */
1281}
1282
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05301283void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001284 union hal_reo_status *reo_status)
1285{
1286 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1287 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1288
1289 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
Venkata Sharath Chandra Manchalac61826c2019-05-14 22:24:25 -07001290 DP_PRINT_STATS("REO stats failure %d for TID %d\n",
1291 queue_status->header.status, rx_tid->tid);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001292 return;
1293 }
1294
Venkata Sharath Chandra Manchalac61826c2019-05-14 22:24:25 -07001295 DP_PRINT_STATS("REO queue stats (TID: %d):\n"
1296 "ssn: %d\n"
1297 "curr_idx : %d\n"
1298 "pn_31_0 : %08x\n"
1299 "pn_63_32 : %08x\n"
1300 "pn_95_64 : %08x\n"
1301 "pn_127_96 : %08x\n"
1302 "last_rx_enq_tstamp : %08x\n"
1303 "last_rx_deq_tstamp : %08x\n"
1304 "rx_bitmap_31_0 : %08x\n"
1305 "rx_bitmap_63_32 : %08x\n"
1306 "rx_bitmap_95_64 : %08x\n"
1307 "rx_bitmap_127_96 : %08x\n"
1308 "rx_bitmap_159_128 : %08x\n"
1309 "rx_bitmap_191_160 : %08x\n"
1310 "rx_bitmap_223_192 : %08x\n"
1311 "rx_bitmap_255_224 : %08x\n",
1312 rx_tid->tid,
1313 queue_status->ssn, queue_status->curr_idx,
1314 queue_status->pn_31_0, queue_status->pn_63_32,
1315 queue_status->pn_95_64, queue_status->pn_127_96,
1316 queue_status->last_rx_enq_tstamp,
1317 queue_status->last_rx_deq_tstamp,
1318 queue_status->rx_bitmap_31_0,
1319 queue_status->rx_bitmap_63_32,
1320 queue_status->rx_bitmap_95_64,
1321 queue_status->rx_bitmap_127_96,
1322 queue_status->rx_bitmap_159_128,
1323 queue_status->rx_bitmap_191_160,
1324 queue_status->rx_bitmap_223_192,
1325 queue_status->rx_bitmap_255_224);
Karunakar Dasineni3da08112017-06-15 14:42:39 -07001326
Venkata Sharath Chandra Manchalac61826c2019-05-14 22:24:25 -07001327 DP_PRINT_STATS(
1328 "curr_mpdu_cnt : %d\n"
1329 "curr_msdu_cnt : %d\n"
1330 "fwd_timeout_cnt : %d\n"
1331 "fwd_bar_cnt : %d\n"
1332 "dup_cnt : %d\n"
1333 "frms_in_order_cnt : %d\n"
1334 "bar_rcvd_cnt : %d\n"
1335 "mpdu_frms_cnt : %d\n"
1336 "msdu_frms_cnt : %d\n"
1337 "total_byte_cnt : %d\n"
1338 "late_recv_mpdu_cnt : %d\n"
1339 "win_jump_2k : %d\n"
1340 "hole_cnt : %d\n",
1341 queue_status->curr_mpdu_cnt,
1342 queue_status->curr_msdu_cnt,
1343 queue_status->fwd_timeout_cnt,
1344 queue_status->fwd_bar_cnt,
1345 queue_status->dup_cnt,
1346 queue_status->frms_in_order_cnt,
1347 queue_status->bar_rcvd_cnt,
1348 queue_status->mpdu_frms_cnt,
1349 queue_status->msdu_frms_cnt,
1350 queue_status->total_cnt,
1351 queue_status->late_recv_mpdu_cnt,
1352 queue_status->win_jump_2k,
1353 queue_status->hole_cnt);
sumedh baikadye3947bd2017-11-29 19:19:25 -08001354
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001355 DP_PRINT_STATS("Addba Req : %d\n"
1356 "Addba Resp : %d\n"
1357 "Addba Resp success : %d\n"
1358 "Addba Resp failed : %d\n"
1359 "Delba Req received : %d\n"
1360 "Delba Tx success : %d\n"
1361 "Delba Tx Fail : %d\n"
1362 "BA window size : %d\n"
1363 "Pn size : %d\n",
1364 rx_tid->num_of_addba_req,
1365 rx_tid->num_of_addba_resp,
1366 rx_tid->num_addba_rsp_success,
1367 rx_tid->num_addba_rsp_failed,
1368 rx_tid->num_of_delba_req,
1369 rx_tid->delba_tx_success_cnt,
1370 rx_tid->delba_tx_fail_cnt,
1371 rx_tid->ba_win_size,
1372 rx_tid->pn_size);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001373}
1374
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301375static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301376 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1377 uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001378{
1379 struct dp_peer *peer;
1380
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05301381 QDF_ASSERT(peer_id <= soc->max_peers);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001382 /* check if there's already a peer object with this MAC address */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001383 peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1384 0 /* is aligned */, vdev_id);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301385 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1386 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1387 __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1388 peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1389 peer_mac_addr[4], peer_mac_addr[5]);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001390
1391 if (peer) {
1392 /* peer's ref count was already incremented by
1393 * peer_find_hash_find
1394 */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301395 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001396 "%s: ref_cnt: %d", __func__,
1397 qdf_atomic_read(&peer->ref_cnt));
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301398 if (!soc->peer_id_to_obj_map[peer_id])
1399 soc->peer_id_to_obj_map[peer_id] = peer;
1400 else {
1401 /* Peer map event came for peer_id which
1402 * is already mapped, this is not expected
1403 */
1404 QDF_ASSERT(0);
1405 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001406
1407 if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1408 /* TBDXXX: assert for now */
1409 QDF_ASSERT(0);
1410 }
1411
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301412 return peer;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301413 }
1414
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301415 return NULL;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301416}
1417
1418/**
1419 * dp_rx_peer_map_handler() - handle peer map event from firmware
1420 * @soc_handle - genereic soc handle
1421 * @peeri_id - peer_id from firmware
1422 * @hw_peer_id - ast index for this peer
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301423 * @vdev_id - vdev ID
1424 * @peer_mac_addr - mac address of the peer
1425 * @ast_hash - ast hash value
1426 * @is_wds - flag to indicate peer map event for WDS ast entry
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301427 *
1428 * associate the peer_id that firmware provided with peer entry
1429 * and update the ast table in the host with the hw_peer_id.
1430 *
1431 * Return: none
1432 */
1433
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001434void
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301435dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
1436 uint16_t hw_peer_id, uint8_t vdev_id,
1437 uint8_t *peer_mac_addr, uint16_t ast_hash,
1438 uint8_t is_wds)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001439{
1440 struct dp_soc *soc = (struct dp_soc *)soc_handle;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301441 struct dp_peer *peer = NULL;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301442 enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301443
Mohit Khanna02553142019-04-11 17:49:27 -07001444 dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d",
1445 soc, peer_id, hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301446 peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1447 peer_mac_addr[5], vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001448
Tallapragada Kalyana7023622018-12-03 19:29:52 +05301449 if ((hw_peer_id < 0) ||
1450 (hw_peer_id >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301451 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301452 "invalid hw_peer_id: %d", hw_peer_id);
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05301453 qdf_assert_always(0);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301454 }
1455
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301456 /* Peer map event for WDS ast entry get the peer from
1457 * obj map
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301458 */
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301459 if (is_wds) {
1460 peer = soc->peer_id_to_obj_map[peer_id];
1461 } else {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301462 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301463 hw_peer_id, vdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301464
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301465 if (peer) {
Chaitanya Kiran Godavarthi70aeda12019-02-01 17:32:48 +05301466 if (wlan_op_mode_sta == peer->vdev->opmode &&
1467 qdf_mem_cmp(peer->mac_addr.raw,
1468 peer->vdev->mac_addr.raw,
1469 QDF_MAC_ADDR_SIZE) != 0) {
1470 dp_info("STA vdev bss_peer!!!!");
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301471 peer->bss_peer = 1;
1472 peer->vdev->vap_bss_peer = peer;
1473 }
1474
1475 if (peer->vdev->opmode == wlan_op_mode_sta)
1476 peer->vdev->bss_ast_hash = ast_hash;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301477
1478 /* Add ast entry incase self ast entry is
1479 * deleted due to DP CP sync issue
1480 *
1481 * self_ast_entry is modified in peer create
1482 * and peer unmap path which cannot run in
1483 * parllel with peer map, no lock need before
1484 * referring it
1485 */
1486 if (!peer->self_ast_entry) {
Mohit Khanna02553142019-04-11 17:49:27 -07001487 dp_info("Add self ast from map %pM",
1488 peer_mac_addr);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301489 dp_peer_add_ast(soc, peer,
1490 peer_mac_addr,
1491 type, 0);
1492 }
1493
sumedh baikady68450ab2018-03-23 18:36:29 -07001494 }
Anish Nataraj0dae6762018-03-02 22:31:45 +05301495 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301496 dp_peer_map_ast(soc, peer, peer_mac_addr,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301497 hw_peer_id, vdev_id, ast_hash);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001498}
1499
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301500/**
1501 * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1502 * @soc_handle - genereic soc handle
1503 * @peeri_id - peer_id from firmware
1504 * @vdev_id - vdev ID
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301505 * @mac_addr - mac address of the peer or wds entry
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301506 * @is_wds - flag to indicate peer map event for WDS ast entry
1507 *
1508 * Return: none
1509 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001510void
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301511dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301512 uint8_t vdev_id, uint8_t *mac_addr,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301513 uint8_t is_wds)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001514{
1515 struct dp_peer *peer;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301516 struct dp_ast_entry *ast_entry;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001517 struct dp_soc *soc = (struct dp_soc *)soc_handle;
1518 uint8_t i;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301519
1520 peer = __dp_peer_find_by_id(soc, peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001521
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001522 /*
1523 * Currently peer IDs are assigned for vdevs as well as peers.
1524 * If the peer ID is for a vdev, then the peer pointer stored
1525 * in peer_id_to_obj_map will be NULL.
1526 */
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301527 if (!peer) {
Mohit Khanna02553142019-04-11 17:49:27 -07001528 dp_err("Received unmap event for invalid peer_id %u", peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001529 return;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301530 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001531
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301532 /* If V2 Peer map messages are enabled AST entry has to be freed here
1533 */
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05301534 if (soc->is_peer_map_unmap_v2 && is_wds) {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301535
1536 qdf_spin_lock_bh(&soc->ast_lock);
1537 ast_entry = dp_peer_ast_list_find(soc, peer,
1538 mac_addr);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301539 qdf_spin_unlock_bh(&soc->ast_lock);
1540
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05301541 if (ast_entry) {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301542 dp_peer_ast_free_entry(soc, ast_entry);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301543 return;
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05301544 }
1545
Mohit Khanna02553142019-04-11 17:49:27 -07001546 dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
1547 peer, peer->peer_ids[0],
1548 peer->mac_addr.raw, mac_addr, vdev_id,
1549 is_wds);
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05301550
1551 return;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301552 }
1553
Mohit Khanna02553142019-04-11 17:49:27 -07001554 dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301555 soc, peer_id, peer);
1556
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001557 soc->peer_id_to_obj_map[peer_id] = NULL;
1558 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1559 if (peer->peer_ids[i] == peer_id) {
1560 peer->peer_ids[i] = HTT_INVALID_PEER;
1561 break;
1562 }
1563 }
1564
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301565 if (soc->cdp_soc.ol_ops->peer_unmap_event) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05301566 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
Subhranil Choudhury9bcfecf2019-02-28 13:41:45 +05301567 peer_id, vdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301568 }
1569
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001570 /*
1571 * Remove a reference to the peer.
1572 * If there are no more references, delete the peer object.
1573 */
1574 dp_peer_unref_delete(peer);
1575}
1576
1577void
1578dp_peer_find_detach(struct dp_soc *soc)
1579{
1580 dp_peer_find_map_detach(soc);
1581 dp_peer_find_hash_detach(soc);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301582 dp_peer_ast_hash_detach(soc);
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301583 dp_peer_ast_table_detach(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001584}
1585
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001586static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1587 union hal_reo_status *reo_status)
1588{
1589 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001590
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001591 if ((reo_status->rx_queue_status.header.status !=
1592 HAL_REO_CMD_SUCCESS) &&
1593 (reo_status->rx_queue_status.header.status !=
1594 HAL_REO_CMD_DRAIN)) {
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001595 /* Should not happen normally. Just print error for now */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301596 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1597 "%s: Rx tid HW desc update failed(%d): tid %d",
1598 __func__,
1599 reo_status->rx_queue_status.header.status,
1600 rx_tid->tid);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001601 }
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001602}
1603
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001604/*
Leo Chang5ea93a42016-11-03 12:39:49 -07001605 * dp_find_peer_by_addr - find peer instance by mac address
1606 * @dev: physical device instance
1607 * @peer_mac_addr: peer mac address
1608 * @local_id: local id for the peer
1609 *
1610 * Return: peer instance pointer
1611 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001612void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
Leo Chang5ea93a42016-11-03 12:39:49 -07001613 uint8_t *local_id)
1614{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001615 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07001616 struct dp_peer *peer;
1617
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05301618 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05301619
Leo Chang5ea93a42016-11-03 12:39:49 -07001620 if (!peer)
1621 return NULL;
1622
1623 /* Multiple peer ids? How can know peer id? */
1624 *local_id = peer->local_id;
Krunal Sonic96a1162019-02-21 11:33:26 -08001625 dp_verbose_debug("peer %pK id %d", peer, *local_id);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001626
1627 /* ref_cnt is incremented inside dp_peer_find_hash_find().
1628 * Decrement it here.
1629 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05301630 dp_peer_unref_delete(peer);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001631
Leo Chang5ea93a42016-11-03 12:39:49 -07001632 return peer;
1633}
1634
1635/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001636 * dp_rx_tid_update_wifi3() – Update receive TID state
1637 * @peer: Datapath peer handle
1638 * @tid: TID
1639 * @ba_window_size: BlockAck window size
1640 * @start_seq: Starting sequence number
1641 *
1642 * Return: 0 on success, error code on failure
1643 */
Jeff Johnson416168b2017-01-06 09:42:43 -08001644static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1645 ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001646{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001647 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1648 struct dp_soc *soc = peer->vdev->pdev->soc;
1649 struct hal_reo_cmd_params params;
1650
1651 qdf_mem_zero(&params, sizeof(params));
1652
1653 params.std.need_status = 1;
1654 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1655 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1656 params.u.upd_queue_params.update_ba_window_size = 1;
1657 params.u.upd_queue_params.ba_window_size = ba_window_size;
1658
1659 if (start_seq < IEEE80211_SEQ_MAX) {
1660 params.u.upd_queue_params.update_ssn = 1;
1661 params.u.upd_queue_params.ssn = start_seq;
1662 }
1663
Pramod Simhab17d0672017-03-06 17:20:13 -08001664 dp_set_ssn_valid_flag(&params, 0);
Sravan Kumar Kairam4f6b8f52019-03-18 14:53:06 +05301665 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1666 dp_rx_tid_update_cb, rx_tid);
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001667
1668 rx_tid->ba_win_size = ba_window_size;
Gyanranjan Hazarika7f9c0502018-07-25 23:26:16 -07001669
Rakesh Pillai9498cd72019-04-05 18:43:47 +05301670 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1671 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1672 peer->vdev->pdev->ctrl_pdev,
1673 peer->vdev->vdev_id, peer->mac_addr.raw,
1674 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1675 }
Sravan Kumar Kairam4f6b8f52019-03-18 14:53:06 +05301676
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001677 return 0;
1678}
1679
1680/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001681 * dp_reo_desc_free() - Callback free reo descriptor memory after
1682 * HW cache flush
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001683 *
1684 * @soc: DP SOC handle
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001685 * @cb_ctxt: Callback context
1686 * @reo_status: REO command status
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001687 */
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001688static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1689 union hal_reo_status *reo_status)
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001690{
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001691 struct reo_desc_list_node *freedesc =
1692 (struct reo_desc_list_node *)cb_ctxt;
1693 struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001694
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001695 if ((reo_status->fl_cache_status.header.status !=
1696 HAL_REO_CMD_SUCCESS) &&
1697 (reo_status->fl_cache_status.header.status !=
1698 HAL_REO_CMD_DRAIN)) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301699 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1700 "%s: Rx tid HW desc flush failed(%d): tid %d",
1701 __func__,
1702 reo_status->rx_queue_status.header.status,
1703 freedesc->rx_tid.tid);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001704 }
chenguo8df4d462018-12-19 16:33:14 +08001705 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1706 "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1707 (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001708 qdf_mem_unmap_nbytes_single(soc->osdev,
1709 rx_tid->hw_qdesc_paddr,
1710 QDF_DMA_BIDIRECTIONAL,
1711 rx_tid->hw_qdesc_alloc_size);
1712 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1713 qdf_mem_free(freedesc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001714}
1715
Basamma Yakkanahallib85768e2019-04-27 05:24:00 +05301716#if defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86)
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001717/* Hawkeye emulation requires bus address to be >= 0x50000000 */
1718static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1719{
1720 if (dma_addr < 0x50000000)
1721 return QDF_STATUS_E_FAILURE;
1722 else
1723 return QDF_STATUS_SUCCESS;
1724}
1725#else
1726static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1727{
1728 return QDF_STATUS_SUCCESS;
1729}
1730#endif
1731
1732
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001733/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001734 * dp_rx_tid_setup_wifi3() – Setup receive TID state
1735 * @peer: Datapath peer handle
1736 * @tid: TID
1737 * @ba_window_size: BlockAck window size
1738 * @start_seq: Starting sequence number
1739 *
1740 * Return: 0 on success, error code on failure
1741 */
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001742int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001743 uint32_t ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001744{
1745 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1746 struct dp_vdev *vdev = peer->vdev;
1747 struct dp_soc *soc = vdev->pdev->soc;
1748 uint32_t hw_qdesc_size;
1749 uint32_t hw_qdesc_align;
1750 int hal_pn_type;
1751 void *hw_qdesc_vaddr;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001752 uint32_t alloc_tries = 0;
nobeljfdfe7ea2018-06-19 18:08:25 -07001753 int err = QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001754
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05301755 if (peer->delete_in_progress ||
1756 !qdf_atomic_read(&peer->is_default_route_set))
Karunakar Dasineni372647d2018-01-15 22:27:39 -08001757 return QDF_STATUS_E_FAILURE;
1758
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001759 rx_tid->ba_win_size = ba_window_size;
Jeff Johnsona8edf332019-03-18 09:51:52 -07001760 if (rx_tid->hw_qdesc_vaddr_unaligned)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001761 return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1762 start_seq);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001763 rx_tid->delba_tx_status = 0;
1764 rx_tid->ppdu_id_2k = 0;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001765 rx_tid->num_of_addba_req = 0;
1766 rx_tid->num_of_delba_req = 0;
1767 rx_tid->num_of_addba_resp = 0;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001768 rx_tid->num_addba_rsp_failed = 0;
1769 rx_tid->num_addba_rsp_success = 0;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001770 rx_tid->delba_tx_success_cnt = 0;
1771 rx_tid->delba_tx_fail_cnt = 0;
1772 rx_tid->statuscode = 0;
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001773
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001774 /* TODO: Allocating HW queue descriptors based on max BA window size
1775 * for all QOS TIDs so that same descriptor can be used later when
1776 * ADDBA request is recevied. This should be changed to allocate HW
1777 * queue descriptors based on BA window size being negotiated (0 for
1778 * non BA cases), and reallocate when BA window size changes and also
1779 * send WMI message to FW to change the REO queue descriptor in Rx
1780 * peer entry as part of dp_rx_tid_update.
1781 */
1782 if (tid != DP_NON_QOS_TID)
1783 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001784 HAL_RX_MAX_BA_WINDOW, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001785 else
1786 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001787 ba_window_size, tid);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001788
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001789 hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1790 /* To avoid unnecessary extra allocation for alignment, try allocating
1791 * exact size and see if we already have aligned address.
1792 */
1793 rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001794
1795try_desc_alloc:
1796 rx_tid->hw_qdesc_vaddr_unaligned =
1797 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001798
1799 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301800 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1801 "%s: Rx tid HW desc alloc failed: tid %d",
1802 __func__, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001803 return QDF_STATUS_E_NOMEM;
1804 }
1805
1806 if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1807 hw_qdesc_align) {
1808 /* Address allocated above is not alinged. Allocate extra
1809 * memory for alignment
1810 */
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001811 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001812 rx_tid->hw_qdesc_vaddr_unaligned =
Pramod Simha6b23f752017-03-30 11:54:18 -07001813 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1814 hw_qdesc_align - 1);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001815
1816 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301817 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1818 "%s: Rx tid HW desc alloc failed: tid %d",
1819 __func__, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001820 return QDF_STATUS_E_NOMEM;
1821 }
1822
Pramod Simha6b23f752017-03-30 11:54:18 -07001823 hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1824 rx_tid->hw_qdesc_vaddr_unaligned,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001825 hw_qdesc_align);
Pramod Simha6b23f752017-03-30 11:54:18 -07001826
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301827 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1828 "%s: Total Size %d Aligned Addr %pK",
1829 __func__, rx_tid->hw_qdesc_alloc_size,
1830 hw_qdesc_vaddr);
Pramod Simha6b23f752017-03-30 11:54:18 -07001831
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001832 } else {
1833 hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001834 }
1835
1836 /* TODO: Ensure that sec_type is set before ADDBA is received.
1837 * Currently this is set based on htt indication
1838 * HTT_T2H_MSG_TYPE_SEC_IND from target
1839 */
1840 switch (peer->security[dp_sec_ucast].sec_type) {
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301841 case cdp_sec_type_tkip_nomic:
1842 case cdp_sec_type_aes_ccmp:
1843 case cdp_sec_type_aes_ccmp_256:
1844 case cdp_sec_type_aes_gcmp:
1845 case cdp_sec_type_aes_gcmp_256:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001846 hal_pn_type = HAL_PN_WPA;
1847 break;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301848 case cdp_sec_type_wapi:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001849 if (vdev->opmode == wlan_op_mode_ap)
1850 hal_pn_type = HAL_PN_WAPI_EVEN;
1851 else
1852 hal_pn_type = HAL_PN_WAPI_UNEVEN;
1853 break;
1854 default:
1855 hal_pn_type = HAL_PN_NONE;
1856 break;
1857 }
1858
1859 hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1860 hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1861
Pramod Simha6b23f752017-03-30 11:54:18 -07001862 qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001863 QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
Pramod Simha6b23f752017-03-30 11:54:18 -07001864 &(rx_tid->hw_qdesc_paddr));
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001865
Pramod Simha6b23f752017-03-30 11:54:18 -07001866 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001867 QDF_STATUS_SUCCESS) {
nobeljfdfe7ea2018-06-19 18:08:25 -07001868 if (alloc_tries++ < 10) {
1869 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1870 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001871 goto try_desc_alloc;
nobeljfdfe7ea2018-06-19 18:08:25 -07001872 } else {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301873 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1874 "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1875 __func__, tid);
nobeljfdfe7ea2018-06-19 18:08:25 -07001876 err = QDF_STATUS_E_NOMEM;
1877 goto error;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001878 }
1879 }
1880
Leo Chang5ea93a42016-11-03 12:39:49 -07001881 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
nobeljfdfe7ea2018-06-19 18:08:25 -07001882 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1883 vdev->pdev->ctrl_pdev, peer->vdev->vdev_id,
1884 peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1885 1, ba_window_size)) {
1886 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1887 "%s: Failed to send reo queue setup to FW - tid %d\n",
1888 __func__, tid);
1889 err = QDF_STATUS_E_FAILURE;
1890 goto error;
1891 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001892 }
1893 return 0;
nobeljfdfe7ea2018-06-19 18:08:25 -07001894error:
Jeff Johnsona8edf332019-03-18 09:51:52 -07001895 if (rx_tid->hw_qdesc_vaddr_unaligned) {
nobeljfdfe7ea2018-06-19 18:08:25 -07001896 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
1897 QDF_STATUS_SUCCESS)
1898 qdf_mem_unmap_nbytes_single(
1899 soc->osdev,
1900 rx_tid->hw_qdesc_paddr,
1901 QDF_DMA_BIDIRECTIONAL,
1902 rx_tid->hw_qdesc_alloc_size);
1903 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1904 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1905 }
1906 return err;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001907}
1908
1909/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001910 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1911 * after deleting the entries (ie., setting valid=0)
1912 *
1913 * @soc: DP SOC handle
1914 * @cb_ctxt: Callback context
1915 * @reo_status: REO command status
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001916 */
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001917static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1918 union hal_reo_status *reo_status)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001919{
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001920 struct reo_desc_list_node *freedesc =
1921 (struct reo_desc_list_node *)cb_ctxt;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001922 uint32_t list_size;
1923 struct reo_desc_list_node *desc;
1924 unsigned long curr_ts = qdf_get_system_timestamp();
1925 uint32_t desc_size, tot_desc_size;
1926 struct hal_reo_cmd_params params;
1927
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001928 if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1929 qdf_mem_zero(reo_status, sizeof(*reo_status));
1930 reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1931 dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1932 return;
1933 } else if (reo_status->rx_queue_status.header.status !=
1934 HAL_REO_CMD_SUCCESS) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001935 /* Should not happen normally. Just print error for now */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301936 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1937 "%s: Rx tid HW desc deletion failed(%d): tid %d",
1938 __func__,
1939 reo_status->rx_queue_status.header.status,
1940 freedesc->rx_tid.tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001941 }
1942
Houston Hoffman41b912c2017-08-30 14:27:51 -07001943 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Aditya Sathishded018e2018-07-02 16:25:21 +05301944 "%s: rx_tid: %d status: %d", __func__,
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001945 freedesc->rx_tid.tid,
1946 reo_status->rx_queue_status.header.status);
Krishna Kumaar Natarajan1741dc42017-01-26 19:24:48 -08001947
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001948 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1949 freedesc->free_ts = curr_ts;
1950 qdf_list_insert_back_size(&soc->reo_desc_freelist,
1951 (qdf_list_node_t *)freedesc, &list_size);
1952
1953 while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1954 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1955 ((list_size >= REO_DESC_FREELIST_SIZE) ||
1956 ((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1957 struct dp_rx_tid *rx_tid;
1958
1959 qdf_list_remove_front(&soc->reo_desc_freelist,
1960 (qdf_list_node_t **)&desc);
1961 list_size--;
1962 rx_tid = &desc->rx_tid;
1963
1964 /* Flush and invalidate REO descriptor from HW cache: Base and
1965 * extension descriptors should be flushed separately */
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001966 tot_desc_size = rx_tid->hw_qdesc_alloc_size;
1967 /* Get base descriptor size by passing non-qos TID */
1968 desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
1969 DP_NON_QOS_TID);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001970
1971 /* Flush reo extension descriptors */
1972 while ((tot_desc_size -= desc_size) > 0) {
1973 qdf_mem_zero(&params, sizeof(params));
1974 params.std.addr_lo =
1975 ((uint64_t)(rx_tid->hw_qdesc_paddr) +
1976 tot_desc_size) & 0xffffffff;
1977 params.std.addr_hi =
1978 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1979
1980 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1981 CMD_FLUSH_CACHE,
1982 &params,
1983 NULL,
1984 NULL)) {
1985 QDF_TRACE(QDF_MODULE_ID_DP,
1986 QDF_TRACE_LEVEL_ERROR,
1987 "%s: fail to send CMD_CACHE_FLUSH:"
Aditya Sathishded018e2018-07-02 16:25:21 +05301988 "tid %d desc %pK", __func__,
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001989 rx_tid->tid,
1990 (void *)(rx_tid->hw_qdesc_paddr));
1991 }
1992 }
1993
1994 /* Flush base descriptor */
1995 qdf_mem_zero(&params, sizeof(params));
1996 params.std.need_status = 1;
1997 params.std.addr_lo =
1998 (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1999 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2000
2001 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
2002 CMD_FLUSH_CACHE,
2003 &params,
2004 dp_reo_desc_free,
2005 (void *)desc)) {
2006 union hal_reo_status reo_status;
2007 /*
2008 * If dp_reo_send_cmd return failure, related TID queue desc
2009 * should be unmapped. Also locally reo_desc, together with
2010 * TID queue desc also need to be freed accordingly.
2011 *
2012 * Here invoke desc_free function directly to do clean up.
2013 */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302014 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2015 "%s: fail to send REO cmd to flush cache: tid %d",
2016 __func__, rx_tid->tid);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002017 qdf_mem_zero(&reo_status, sizeof(reo_status));
2018 reo_status.fl_cache_status.header.status = 0;
2019 dp_reo_desc_free(soc, (void *)desc, &reo_status);
2020 }
2021 }
2022 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002023}
2024
2025/*
2026 * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2027 * @peer: Datapath peer handle
2028 * @tid: TID
2029 *
2030 * Return: 0 on success, error code on failure
2031 */
Jeff Johnson416168b2017-01-06 09:42:43 -08002032static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002033{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002034 struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2035 struct dp_soc *soc = peer->vdev->pdev->soc;
2036 struct hal_reo_cmd_params params;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002037 struct reo_desc_list_node *freedesc =
2038 qdf_mem_malloc(sizeof(*freedesc));
Lin Baifca76402017-12-11 15:03:49 +08002039
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002040 if (!freedesc) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302041 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2042 "%s: malloc failed for freedesc: tid %d",
2043 __func__, tid);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08002044 return -ENOMEM;
2045 }
2046
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002047 freedesc->rx_tid = *rx_tid;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002048
2049 qdf_mem_zero(&params, sizeof(params));
2050
Karunakar Dasineni6a526752018-08-02 08:56:19 -07002051 params.std.need_status = 1;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002052 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2053 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2054 params.u.upd_queue_params.update_vld = 1;
2055 params.u.upd_queue_params.vld = 0;
2056
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002057 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2058 dp_rx_tid_delete_cb, (void *)freedesc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002059
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08002060 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2061 rx_tid->hw_qdesc_alloc_size = 0;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002062 rx_tid->hw_qdesc_paddr = 0;
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08002063
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002064 return 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002065}
2066
Pramod Simhab17d0672017-03-06 17:20:13 -08002067#ifdef DP_LFR
2068static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2069{
2070 int tid;
2071
2072 for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2073 dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302074 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2075 "Setting up TID %d for peer %pK peer->local_id %d",
2076 tid, peer, peer->local_id);
Pramod Simhab17d0672017-03-06 17:20:13 -08002077 }
2078}
2079#else
2080static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2081#endif
nobeljdebe2b32019-04-23 11:18:47 -07002082
2083#ifndef WLAN_TX_PKT_CAPTURE_ENH
2084/*
2085 * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
2086 * @peer: Datapath peer
2087 *
2088 */
2089static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
2090{
2091}
2092
2093/*
2094 * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
2095 * @peer: Datapath peer
2096 *
2097 */
2098static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
2099{
2100}
2101
2102/*
2103 * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
2104 * @vdev: Datapath vdev
2105 * @peer: Datapath peer
2106 *
2107 */
2108static inline void
2109dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
2110{
2111}
2112#endif
2113
2114/*
2115 * dp_peer_tx_init() – Initialize receive TID state
2116 * @pdev: Datapath pdev
2117 * @peer: Datapath peer
2118 *
2119 */
2120void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2121{
2122 dp_peer_tid_queue_init(peer);
2123 dp_peer_update_80211_hdr(peer->vdev, peer);
2124}
2125
2126/*
2127 * dp_peer_tx_cleanup() – Deinitialize receive TID state
2128 * @vdev: Datapath vdev
2129 * @peer: Datapath peer
2130 *
2131 */
2132static inline void
2133dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2134{
2135 dp_peer_tid_queue_cleanup(peer);
2136}
2137
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002138/*
2139 * dp_peer_rx_init() – Initialize receive TID state
2140 * @pdev: Datapath pdev
2141 * @peer: Datapath peer
2142 *
2143 */
2144void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2145{
2146 int tid;
2147 struct dp_rx_tid *rx_tid;
2148 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2149 rx_tid = &peer->rx_tid[tid];
2150 rx_tid->array = &rx_tid->base;
2151 rx_tid->base.head = rx_tid->base.tail = NULL;
2152 rx_tid->tid = tid;
2153 rx_tid->defrag_timeout_ms = 0;
2154 rx_tid->ba_win_size = 0;
2155 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2156
2157 rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2158 rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002159 }
2160
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002161 peer->active_ba_session_cnt = 0;
2162 peer->hw_buffer_size = 0;
2163 peer->kill_256_sessions = 0;
2164
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002165 /* Setup default (non-qos) rx tid queue */
2166 dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002167
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002168 /* Setup rx tid queue for TID 0.
2169 * Other queues will be setup on receiving first packet, which will cause
2170 * NULL REO queue error
2171 */
2172 dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2173
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002174 /*
Pramod Simhab17d0672017-03-06 17:20:13 -08002175 * Setup the rest of TID's to handle LFR
2176 */
2177 dp_peer_setup_remaining_tids(peer);
2178
2179 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002180 * Set security defaults: no PN check, no security. The target may
2181 * send a HTT SEC_IND message to overwrite these defaults.
2182 */
2183 peer->security[dp_sec_ucast].sec_type =
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302184 peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002185}
2186
2187/*
2188 * dp_peer_rx_cleanup() – Cleanup receive TID state
2189 * @vdev: Datapath vdev
2190 * @peer: Datapath peer
2191 *
2192 */
2193void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2194{
2195 int tid;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002196 uint32_t tid_delete_mask = 0;
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07002197
2198 DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002199 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002200 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2201
2202 qdf_spin_lock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthi70aeda12019-02-01 17:32:48 +05302203 if (!peer->bss_peer && peer->vdev->opmode != wlan_op_mode_sta) {
Lin Baif1c577e2018-05-22 20:45:42 +08002204 /* Cleanup defrag related resource */
2205 dp_rx_defrag_waitlist_remove(peer, tid);
2206 dp_rx_reorder_flush_frag(peer, tid);
Karunakar Dasinenif8ec0cb2019-01-29 13:07:05 -08002207 }
2208
2209 if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2210 dp_rx_tid_delete_wifi3(peer, tid);
Lin Baif1c577e2018-05-22 20:45:42 +08002211
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002212 tid_delete_mask |= (1 << tid);
2213 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002214 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002215 }
2216#ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2217 if (soc->ol_ops->peer_rx_reorder_queue_remove) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302218 soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002219 peer->vdev->vdev_id, peer->mac_addr.raw,
2220 tid_delete_mask);
2221 }
2222#endif
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002223 for (tid = 0; tid < DP_MAX_TIDS; tid++)
2224 qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002225}
2226
2227/*
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002228 * dp_peer_cleanup() – Cleanup peer information
2229 * @vdev: Datapath vdev
2230 * @peer: Datapath peer
2231 *
2232 */
2233void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2234{
nobeljdebe2b32019-04-23 11:18:47 -07002235 dp_peer_tx_cleanup(vdev, peer);
2236
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002237 /* cleanup the Rx reorder queues for this peer */
2238 dp_peer_rx_cleanup(vdev, peer);
2239}
2240
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002241/* dp_teardown_256_ba_session() - Teardown sessions using 256
2242 * window size when a request with
2243 * 64 window size is received.
2244 * This is done as a WAR since HW can
2245 * have only one setting per peer (64 or 256).
sumedh baikady61cbe852018-10-09 11:04:34 -07002246 * For HKv2, we use per tid buffersize setting
2247 * for 0 to per_tid_basize_max_tid. For tid
2248 * more than per_tid_basize_max_tid we use HKv1
2249 * method.
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002250 * @peer: Datapath peer
2251 *
2252 * Return: void
2253 */
2254static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2255{
2256 uint8_t delba_rcode = 0;
2257 int tid;
2258 struct dp_rx_tid *rx_tid = NULL;
2259
sumedh baikady61cbe852018-10-09 11:04:34 -07002260 tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2261 for (; tid < DP_MAX_TIDS; tid++) {
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002262 rx_tid = &peer->rx_tid[tid];
2263 qdf_spin_lock_bh(&rx_tid->tid_lock);
2264
2265 if (rx_tid->ba_win_size <= 64) {
2266 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2267 continue;
2268 } else {
2269 if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2270 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2271 /* send delba */
2272 if (!rx_tid->delba_tx_status) {
2273 rx_tid->delba_tx_retry++;
2274 rx_tid->delba_tx_status = 1;
2275 rx_tid->delba_rcode =
2276 IEEE80211_REASON_QOS_SETUP_REQUIRED;
2277 delba_rcode = rx_tid->delba_rcode;
2278
2279 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2280 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2281 peer->vdev->pdev->ctrl_pdev,
2282 peer->ctrl_peer,
2283 peer->mac_addr.raw,
2284 tid, peer->vdev->ctrl_vdev,
2285 delba_rcode);
2286 } else {
2287 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2288 }
2289 } else {
2290 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2291 }
2292 }
2293 }
2294}
2295
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002296/*
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002297* dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002298*
2299* @peer: Datapath peer handle
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002300* @tid: TID number
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002301* @status: tx completion status
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002302* Return: 0 on success, error code on failure
2303*/
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002304int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
2305 uint8_t tid, int status)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002306{
2307 struct dp_peer *peer = (struct dp_peer *)peer_handle;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002308 struct dp_rx_tid *rx_tid = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002309
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002310 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302311 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002312 "%s: Peer is NULL!\n", __func__);
2313 return QDF_STATUS_E_FAILURE;
2314 }
2315 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002316 qdf_spin_lock_bh(&rx_tid->tid_lock);
2317 if (status) {
2318 rx_tid->num_addba_rsp_failed++;
sumedh baikadyc7738482019-04-02 18:14:46 -07002319 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002320 rx_tid->ba_status = DP_RX_BA_INACTIVE;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002321 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302322 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002323 "%s: Rx Tid- %d addba rsp tx completion failed!",
2324 __func__, tid);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002325 return QDF_STATUS_SUCCESS;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002326 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002327
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002328 rx_tid->num_addba_rsp_success++;
2329 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2330 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302331 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002332 "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2333 __func__, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002334 return QDF_STATUS_E_FAILURE;
2335 }
2336
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05302337 if (!qdf_atomic_read(&peer->is_default_route_set)) {
2338 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302339 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05302340 "%s: default route is not set for peer: %pM",
2341 __func__, peer->mac_addr.raw);
2342 return QDF_STATUS_E_FAILURE;
2343 }
2344
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002345 /* First Session */
2346 if (peer->active_ba_session_cnt == 0) {
2347 if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2348 peer->hw_buffer_size = 256;
2349 else
2350 peer->hw_buffer_size = 64;
2351 }
2352
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002353 rx_tid->ba_status = DP_RX_BA_ACTIVE;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002354
2355 peer->active_ba_session_cnt++;
2356
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002357 qdf_spin_unlock_bh(&rx_tid->tid_lock);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002358
2359 /* Kill any session having 256 buffer size
2360 * when 64 buffer size request is received.
2361 * Also, latch on to 64 as new buffer size.
2362 */
2363 if (peer->kill_256_sessions) {
2364 dp_teardown_256_ba_sessions(peer);
2365 peer->kill_256_sessions = 0;
2366 }
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002367 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002368}
2369
2370/*
2371* dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2372*
2373* @peer: Datapath peer handle
2374* @tid: TID number
2375* @dialogtoken: output dialogtoken
2376* @statuscode: output dialogtoken
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002377* @buffersize: Output BA window size
2378* @batimeout: Output BA timeout
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002379*/
2380void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
2381 uint8_t *dialogtoken, uint16_t *statuscode,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002382 uint16_t *buffersize, uint16_t *batimeout)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002383{
2384 struct dp_peer *peer = (struct dp_peer *)peer_handle;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002385 struct dp_rx_tid *rx_tid = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002386
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002387 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302388 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002389 "%s: Peer is NULL!\n", __func__);
2390 return;
2391 }
2392 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002393 qdf_spin_lock_bh(&rx_tid->tid_lock);
sumedh baikadye3947bd2017-11-29 19:19:25 -08002394 rx_tid->num_of_addba_resp++;
Jeff Johnson97a1cc52018-05-06 15:28:56 -07002395 /* setup ADDBA response parameters */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002396 *dialogtoken = rx_tid->dialogtoken;
2397 *statuscode = rx_tid->statuscode;
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002398 *buffersize = rx_tid->ba_win_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002399 *batimeout = 0;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002400 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2401}
2402
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002403/* dp_check_ba_buffersize() - Check buffer size in request
2404 * and latch onto this size based on
2405 * size used in first active session.
2406 * @peer: Datapath peer
2407 * @tid: Tid
2408 * @buffersize: Block ack window size
2409 *
2410 * Return: void
2411 */
2412static void dp_check_ba_buffersize(struct dp_peer *peer,
2413 uint16_t tid,
2414 uint16_t buffersize)
2415{
2416 struct dp_rx_tid *rx_tid = NULL;
2417
2418 rx_tid = &peer->rx_tid[tid];
sumedh baikady61cbe852018-10-09 11:04:34 -07002419 if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2420 tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002421 rx_tid->ba_win_size = buffersize;
sumedh baikady61cbe852018-10-09 11:04:34 -07002422 return;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002423 } else {
sumedh baikady61cbe852018-10-09 11:04:34 -07002424 if (peer->active_ba_session_cnt == 0) {
2425 rx_tid->ba_win_size = buffersize;
2426 } else {
2427 if (peer->hw_buffer_size == 64) {
2428 if (buffersize <= 64)
2429 rx_tid->ba_win_size = buffersize;
2430 else
2431 rx_tid->ba_win_size = peer->hw_buffer_size;
2432 } else if (peer->hw_buffer_size == 256) {
2433 if (buffersize > 64) {
2434 rx_tid->ba_win_size = buffersize;
2435 } else {
2436 rx_tid->ba_win_size = buffersize;
2437 peer->hw_buffer_size = 64;
2438 peer->kill_256_sessions = 1;
2439 }
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002440 }
2441 }
2442 }
2443}
2444
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002445/*
2446 * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2447 *
2448 * @peer: Datapath peer handle
2449 * @dialogtoken: dialogtoken from ADDBA frame
2450 * @tid: TID number
2451 * @batimeout: BA timeout
2452 * @buffersize: BA window size
2453 * @startseqnum: Start seq. number received in BA sequence control
2454 *
2455 * Return: 0 on success, error code on failure
2456 */
2457int dp_addba_requestprocess_wifi3(void *peer_handle,
2458 uint8_t dialogtoken,
2459 uint16_t tid, uint16_t batimeout,
2460 uint16_t buffersize,
2461 uint16_t startseqnum)
2462{
2463 struct dp_peer *peer = (struct dp_peer *)peer_handle;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002464 struct dp_rx_tid *rx_tid = NULL;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002465
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002466 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302467 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002468 "%s: Peer is NULL!\n", __func__);
2469 return QDF_STATUS_E_FAILURE;
2470 }
2471 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002472 qdf_spin_lock_bh(&rx_tid->tid_lock);
2473 rx_tid->num_of_addba_req++;
2474 if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
sumedh baikady6d66c7e2019-02-28 15:20:18 -08002475 rx_tid->hw_qdesc_vaddr_unaligned)) {
sumedh baikadyc7738482019-04-02 18:14:46 -07002476 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002477 rx_tid->ba_status = DP_RX_BA_INACTIVE;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002478 peer->active_ba_session_cnt--;
sumedh baikadyc7738482019-04-02 18:14:46 -07002479 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2480 "%s: Addba recvd for Rx Tid-%d hw qdesc is already setup",
2481 __func__, tid);
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002482 }
2483
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002484 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2485 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2486 return QDF_STATUS_E_FAILURE;
2487 }
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002488 dp_check_ba_buffersize(peer, tid, buffersize);
2489
sumedh baikady61cbe852018-10-09 11:04:34 -07002490 if (dp_rx_tid_setup_wifi3(peer, tid,
2491 rx_tid->ba_win_size, startseqnum)) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002492 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2493 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2494 return QDF_STATUS_E_FAILURE;
2495 }
2496 rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2497
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002498 rx_tid->dialogtoken = dialogtoken;
2499 rx_tid->startseqnum = startseqnum;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002500
2501 if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2502 rx_tid->statuscode = rx_tid->userstatuscode;
2503 else
2504 rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2505
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002506 qdf_spin_unlock_bh(&rx_tid->tid_lock);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002507
2508 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002509}
2510
2511/*
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002512* dp_set_addba_response() – Set a user defined ADDBA response status code
2513*
2514* @peer: Datapath peer handle
2515* @tid: TID number
2516* @statuscode: response status code to be set
2517*/
2518void dp_set_addba_response(void *peer_handle, uint8_t tid,
2519 uint16_t statuscode)
2520{
2521 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2522 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2523
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002524 qdf_spin_lock_bh(&rx_tid->tid_lock);
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002525 rx_tid->userstatuscode = statuscode;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002526 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002527}
2528
2529/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002530* dp_rx_delba_process_wifi3() – Process DELBA from peer
2531* @peer: Datapath peer handle
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002532* @tid: TID number
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002533* @reasoncode: Reason code received in DELBA frame
2534*
2535* Return: 0 on success, error code on failure
2536*/
2537int dp_delba_process_wifi3(void *peer_handle,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002538 int tid, uint16_t reasoncode)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002539{
2540 struct dp_peer *peer = (struct dp_peer *)peer_handle;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002541 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2542
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002543 qdf_spin_lock_bh(&rx_tid->tid_lock);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002544 if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2545 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002546 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002547 return QDF_STATUS_E_FAILURE;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002548 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002549 /* TODO: See if we can delete the existing REO queue descriptor and
2550 * replace with a new one without queue extenstion descript to save
2551 * memory
2552 */
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002553 rx_tid->delba_rcode = reasoncode;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002554 rx_tid->num_of_delba_req++;
sumedh baikadyc7738482019-04-02 18:14:46 -07002555 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002556
2557 rx_tid->ba_status = DP_RX_BA_INACTIVE;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002558 peer->active_ba_session_cnt--;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002559 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002560 return 0;
2561}
2562
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002563/*
2564 * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2565 *
2566 * @peer: Datapath peer handle
2567 * @tid: TID number
2568 * @status: tx completion status
2569 * Return: 0 on success, error code on failure
2570 */
2571
2572int dp_delba_tx_completion_wifi3(void *peer_handle,
2573 uint8_t tid, int status)
2574{
2575 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2576 struct dp_rx_tid *rx_tid = NULL;
2577
2578 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302579 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002580 "%s: Peer is NULL!", __func__);
2581 return QDF_STATUS_E_FAILURE;
2582 }
2583 rx_tid = &peer->rx_tid[tid];
2584 qdf_spin_lock_bh(&rx_tid->tid_lock);
2585 if (status) {
2586 rx_tid->delba_tx_fail_cnt++;
2587 if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2588 rx_tid->delba_tx_retry = 0;
2589 rx_tid->delba_tx_status = 0;
2590 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2591 } else {
2592 rx_tid->delba_tx_retry++;
2593 rx_tid->delba_tx_status = 1;
2594 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2595 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2596 peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002597 peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2598 rx_tid->delba_rcode);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002599 }
2600 return QDF_STATUS_SUCCESS;
2601 } else {
2602 rx_tid->delba_tx_success_cnt++;
2603 rx_tid->delba_tx_retry = 0;
2604 rx_tid->delba_tx_status = 0;
2605 }
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002606 if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
sumedh baikadyc7738482019-04-02 18:14:46 -07002607 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002608 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2609 peer->active_ba_session_cnt--;
2610 }
2611 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
sumedh baikadyc7738482019-04-02 18:14:46 -07002612 dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002613 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2614 }
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002615 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2616
2617 return QDF_STATUS_SUCCESS;
2618}
2619
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002620void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
2621 qdf_nbuf_t msdu_list)
2622{
2623 while (msdu_list) {
2624 qdf_nbuf_t msdu = msdu_list;
2625
2626 msdu_list = qdf_nbuf_next(msdu_list);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302627 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2628 "discard rx %pK from partly-deleted peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
2629 msdu, peer,
2630 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2631 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2632 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002633 qdf_nbuf_free(msdu);
2634 }
2635}
2636
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302637
2638/**
2639 * dp_set_pn_check_wifi3() - enable PN check in REO for security
2640 * @peer: Datapath peer handle
2641 * @vdev: Datapath vdev
2642 * @pdev - data path device instance
2643 * @sec_type - security type
2644 * @rx_pn - Receive pn starting number
2645 *
2646 */
2647
2648void
2649dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
2650{
2651 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2652 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2653 struct dp_pdev *pdev;
2654 struct dp_soc *soc;
2655 int i;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002656 uint8_t pn_size;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302657 struct hal_reo_cmd_params params;
2658
2659 /* preconditions */
2660 qdf_assert(vdev);
2661
2662 pdev = vdev->pdev;
2663 soc = pdev->soc;
2664
2665
2666 qdf_mem_zero(&params, sizeof(params));
2667
2668 params.std.need_status = 1;
2669 params.u.upd_queue_params.update_pn_valid = 1;
2670 params.u.upd_queue_params.update_pn_size = 1;
2671 params.u.upd_queue_params.update_pn = 1;
2672 params.u.upd_queue_params.update_pn_check_needed = 1;
Gurumoorthi Gnanasambandhand733cd72018-06-12 17:05:52 +05302673 params.u.upd_queue_params.update_svld = 1;
2674 params.u.upd_queue_params.svld = 0;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302675
2676 peer->security[dp_sec_ucast].sec_type = sec_type;
2677
2678 switch (sec_type) {
2679 case cdp_sec_type_tkip_nomic:
2680 case cdp_sec_type_aes_ccmp:
2681 case cdp_sec_type_aes_ccmp_256:
2682 case cdp_sec_type_aes_gcmp:
2683 case cdp_sec_type_aes_gcmp_256:
2684 params.u.upd_queue_params.pn_check_needed = 1;
2685 params.u.upd_queue_params.pn_size = 48;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002686 pn_size = 48;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302687 break;
2688 case cdp_sec_type_wapi:
2689 params.u.upd_queue_params.pn_check_needed = 1;
2690 params.u.upd_queue_params.pn_size = 128;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002691 pn_size = 128;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302692 if (vdev->opmode == wlan_op_mode_ap) {
2693 params.u.upd_queue_params.pn_even = 1;
2694 params.u.upd_queue_params.update_pn_even = 1;
2695 } else {
2696 params.u.upd_queue_params.pn_uneven = 1;
2697 params.u.upd_queue_params.update_pn_uneven = 1;
2698 }
2699 break;
2700 default:
2701 params.u.upd_queue_params.pn_check_needed = 0;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002702 pn_size = 0;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302703 break;
2704 }
2705
2706
2707 for (i = 0; i < DP_MAX_TIDS; i++) {
2708 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002709 qdf_spin_lock_bh(&rx_tid->tid_lock);
Jeff Johnsona8edf332019-03-18 09:51:52 -07002710 if (rx_tid->hw_qdesc_vaddr_unaligned) {
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302711 params.std.addr_lo =
2712 rx_tid->hw_qdesc_paddr & 0xffffffff;
2713 params.std.addr_hi =
2714 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2715
Krunal Sonid3eb8bc2018-11-12 19:06:15 -08002716 if (pn_size) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302717 QDF_TRACE(QDF_MODULE_ID_DP,
Krunal Sonid3eb8bc2018-11-12 19:06:15 -08002718 QDF_TRACE_LEVEL_INFO_HIGH,
2719 "%s PN set for TID:%d pn:%x:%x:%x:%x",
2720 __func__, i, rx_pn[3], rx_pn[2],
2721 rx_pn[1], rx_pn[0]);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302722 params.u.upd_queue_params.update_pn_valid = 1;
2723 params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2724 params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2725 params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2726 params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2727 }
sumedh baikadye3947bd2017-11-29 19:19:25 -08002728 rx_tid->pn_size = pn_size;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302729 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2730 dp_rx_tid_update_cb, rx_tid);
2731 } else {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302732 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2733 "PN Check not setup for TID :%d ", i);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302734 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002735 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302736 }
2737}
2738
2739
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002740void
2741dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07002742 enum cdp_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002743 u_int32_t *rx_pn)
2744{
2745 struct dp_soc *soc = (struct dp_soc *)soc_handle;
2746 struct dp_peer *peer;
2747 int sec_index;
2748
2749 peer = dp_peer_find_by_id(soc, peer_id);
2750 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302751 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2752 "Couldn't find peer from ID %d - skipping security inits",
2753 peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002754 return;
2755 }
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302756 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2757 "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): %s key of type %d",
2758 peer,
2759 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2760 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2761 peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2762 is_unicast ? "ucast" : "mcast",
2763 sec_type);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002764 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2765 peer->security[sec_index].sec_type = sec_type;
Leo Chang5ea93a42016-11-03 12:39:49 -07002766#ifdef notyet /* TODO: See if this is required for defrag support */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002767 /* michael key only valid for TKIP, but for simplicity,
2768 * copy it anyway
2769 */
2770 qdf_mem_copy(
2771 &peer->security[sec_index].michael_key[0],
2772 michael_key,
2773 sizeof(peer->security[sec_index].michael_key));
2774#ifdef BIG_ENDIAN_HOST
2775 OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2776 sizeof(peer->security[sec_index].michael_key));
2777#endif /* BIG_ENDIAN_HOST */
2778#endif
2779
2780#ifdef notyet /* TODO: Check if this is required for wifi3.0 */
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07002781 if (sec_type != cdp_sec_type_wapi) {
hangtianfe681a52019-01-16 17:16:28 +08002782 qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002783 } else {
2784 for (i = 0; i < DP_MAX_TIDS; i++) {
2785 /*
2786 * Setting PN valid bit for WAPI sec_type,
2787 * since WAPI PN has to be started with predefined value
2788 */
2789 peer->tids_last_pn_valid[i] = 1;
2790 qdf_mem_copy(
2791 (u_int8_t *) &peer->tids_last_pn[i],
2792 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2793 peer->tids_last_pn[i].pn128[1] =
2794 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2795 peer->tids_last_pn[i].pn128[0] =
2796 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2797 }
2798 }
2799#endif
2800 /* TODO: Update HW TID queue with PN check parameters (pn type for
2801 * all security types and last pn for WAPI) once REO command API
2802 * is available
2803 */
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302804
2805 dp_peer_unref_del_find_by_id(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002806}
2807
Pranita Solanke05862962019-01-09 11:39:29 +05302808#ifdef CONFIG_MCL
Leo Chang5ea93a42016-11-03 12:39:49 -07002809/**
2810 * dp_register_peer() - Register peer into physical device
2811 * @pdev - data path device instance
2812 * @sta_desc - peer description
2813 *
2814 * Register peer into physical device
2815 *
2816 * Return: QDF_STATUS_SUCCESS registration success
2817 * QDF_STATUS_E_FAULT peer not found
2818 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002819QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07002820 struct ol_txrx_desc_type *sta_desc)
2821{
2822 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002823 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002824
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002825 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2826 sta_desc->sta_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002827 if (!peer)
2828 return QDF_STATUS_E_FAULT;
2829
2830 qdf_spin_lock_bh(&peer->peer_info_lock);
2831 peer->state = OL_TXRX_PEER_STATE_CONN;
2832 qdf_spin_unlock_bh(&peer->peer_info_lock);
2833
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05302834 dp_rx_flush_rx_cached(peer, false);
2835
Leo Chang5ea93a42016-11-03 12:39:49 -07002836 return QDF_STATUS_SUCCESS;
2837}
2838
2839/**
2840 * dp_clear_peer() - remove peer from physical device
2841 * @pdev - data path device instance
2842 * @sta_id - local peer id
2843 *
2844 * remove peer from physical device
2845 *
2846 * Return: QDF_STATUS_SUCCESS registration success
2847 * QDF_STATUS_E_FAULT peer not found
2848 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002849QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07002850{
2851 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002852 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002853
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002854 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002855 if (!peer)
2856 return QDF_STATUS_E_FAULT;
2857
2858 qdf_spin_lock_bh(&peer->peer_info_lock);
2859 peer->state = OL_TXRX_PEER_STATE_DISC;
2860 qdf_spin_unlock_bh(&peer->peer_info_lock);
2861
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05302862 dp_rx_flush_rx_cached(peer, true);
2863
Leo Chang5ea93a42016-11-03 12:39:49 -07002864 return QDF_STATUS_SUCCESS;
2865}
2866
2867/**
2868 * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2869 * @pdev - data path device instance
2870 * @vdev - virtual interface instance
2871 * @peer_addr - peer mac address
2872 * @peer_id - local peer id with target mac address
2873 *
2874 * Find peer by peer mac address within vdev
2875 *
2876 * Return: peer instance void pointer
2877 * NULL cannot find target peer
2878 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002879void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2880 struct cdp_vdev *vdev_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07002881 uint8_t *peer_addr, uint8_t *local_id)
2882{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002883 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2884 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002885 struct dp_peer *peer;
2886
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05302887 peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
Leo Chang5ea93a42016-11-03 12:39:49 -07002888
2889 if (!peer)
2890 return NULL;
2891
Krunal Soni304792a2018-06-28 14:18:30 -07002892 if (peer->vdev != vdev) {
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05302893 dp_peer_unref_delete(peer);
Leo Chang5ea93a42016-11-03 12:39:49 -07002894 return NULL;
Krunal Soni304792a2018-06-28 14:18:30 -07002895 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002896
2897 *local_id = peer->local_id;
2898
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002899 /* ref_cnt is incremented inside dp_peer_find_hash_find().
2900 * Decrement it here.
2901 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05302902 dp_peer_unref_delete(peer);
Leo Chang5ea93a42016-11-03 12:39:49 -07002903
2904 return peer;
2905}
2906
2907/**
2908 * dp_local_peer_id() - Find local peer id within peer instance
2909 * @peer - peer instance
2910 *
2911 * Find local peer id within peer instance
2912 *
2913 * Return: local peer id
2914 */
2915uint16_t dp_local_peer_id(void *peer)
2916{
2917 return ((struct dp_peer *)peer)->local_id;
2918}
2919
2920/**
2921 * dp_peer_find_by_local_id() - Find peer by local peer id
2922 * @pdev - data path device instance
2923 * @local_peer_id - local peer id want to find
2924 *
2925 * Find peer by local peer id within physical device
2926 *
2927 * Return: peer instance void pointer
2928 * NULL cannot find target peer
2929 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002930void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07002931{
2932 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002933 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002934
Ryan Hsu9d56e3a2018-06-06 16:20:05 -07002935 if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
Mohit Khanna890818b2018-07-23 11:41:08 -07002936 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2937 "Incorrect local id %u", local_id);
Ryan Hsu9d56e3a2018-06-06 16:20:05 -07002938 return NULL;
2939 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002940 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2941 peer = pdev->local_peer_ids.map[local_id];
2942 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Yun Park11d46e02017-11-27 10:51:53 -08002943 DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002944 return peer;
2945}
2946
2947/**
2948 * dp_peer_state_update() - update peer local state
2949 * @pdev - data path device instance
2950 * @peer_addr - peer mac address
2951 * @state - new peer local state
2952 *
2953 * update peer local state
2954 *
2955 * Return: QDF_STATUS_SUCCESS registration success
2956 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002957QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
Leo Chang5ea93a42016-11-03 12:39:49 -07002958 enum ol_txrx_peer_state state)
2959{
2960 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002961 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002962
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05302963 peer = dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
Jeff Johnsona8edf332019-03-18 09:51:52 -07002964 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302965 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2966 "Failed to find peer for: [%pM]", peer_mac);
Ankit Gupta6fb389b2017-01-03 12:23:45 -08002967 return QDF_STATUS_E_FAILURE;
2968 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002969 peer->state = state;
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002970
Jeff Johnson3f217e22017-09-18 10:13:35 -07002971 DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002972 /* ref_cnt is incremented inside dp_peer_find_hash_find().
2973 * Decrement it here.
2974 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05302975 dp_peer_unref_delete(peer);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002976
Leo Chang5ea93a42016-11-03 12:39:49 -07002977 return QDF_STATUS_SUCCESS;
2978}
2979
2980/**
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002981 * dp_get_vdevid() - Get virtual interface id which peer registered
Leo Chang5ea93a42016-11-03 12:39:49 -07002982 * @peer - peer instance
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002983 * @vdev_id - virtual interface id which peer registered
Leo Chang5ea93a42016-11-03 12:39:49 -07002984 *
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002985 * Get virtual interface id which peer registered
Leo Chang5ea93a42016-11-03 12:39:49 -07002986 *
2987 * Return: QDF_STATUS_SUCCESS registration success
2988 */
2989QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2990{
2991 struct dp_peer *peer = peer_handle;
2992
Jeff Johnson3f217e22017-09-18 10:13:35 -07002993 DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
Leo Chang5ea93a42016-11-03 12:39:49 -07002994 peer, peer->vdev, peer->vdev->vdev_id);
2995 *vdev_id = peer->vdev->vdev_id;
2996 return QDF_STATUS_SUCCESS;
2997}
2998
Yun Park601d0d82017-08-28 21:49:31 -07002999struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
3000 uint8_t sta_id)
Yun Parkfde6b9e2017-06-26 17:13:11 -07003001{
Yun Park601d0d82017-08-28 21:49:31 -07003002 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Yun Parkfde6b9e2017-06-26 17:13:11 -07003003 struct dp_peer *peer = NULL;
Yun Parkfde6b9e2017-06-26 17:13:11 -07003004
3005 if (sta_id >= WLAN_MAX_STA_COUNT) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303006 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Yun Parkfde6b9e2017-06-26 17:13:11 -07003007 "Invalid sta id passed");
3008 return NULL;
3009 }
3010
Yun Parkfde6b9e2017-06-26 17:13:11 -07003011 if (!pdev) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303012 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Yun Parkfde6b9e2017-06-26 17:13:11 -07003013 "PDEV not found for sta_id [%d]", sta_id);
3014 return NULL;
3015 }
3016
3017 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
3018 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303019 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Yun Parkfde6b9e2017-06-26 17:13:11 -07003020 "PEER [%d] not found", sta_id);
3021 return NULL;
3022 }
3023
3024 return (struct cdp_vdev *)peer->vdev;
3025}
3026
Leo Chang5ea93a42016-11-03 12:39:49 -07003027/**
3028 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
3029 * @peer - peer instance
3030 *
3031 * Get virtual interface instance which peer belongs
3032 *
3033 * Return: virtual interface instance pointer
3034 * NULL in case cannot find
3035 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003036struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07003037{
3038 struct dp_peer *peer = peer_handle;
3039
Mohit Khanna7ac554b2018-05-24 11:58:13 -07003040 DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003041 return (struct cdp_vdev *)peer->vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07003042}
3043
3044/**
3045 * dp_peer_get_peer_mac_addr() - Get peer mac address
3046 * @peer - peer instance
3047 *
3048 * Get peer mac address
3049 *
3050 * Return: peer mac address pointer
3051 * NULL in case cannot find
3052 */
3053uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3054{
3055 struct dp_peer *peer = peer_handle;
3056 uint8_t *mac;
3057
3058 mac = peer->mac_addr.raw;
Jeff Johnson3f217e22017-09-18 10:13:35 -07003059 DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
Leo Chang5ea93a42016-11-03 12:39:49 -07003060 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3061 return peer->mac_addr.raw;
3062}
3063
3064/**
3065 * dp_get_peer_state() - Get local peer state
3066 * @peer - peer instance
3067 *
3068 * Get local peer state
3069 *
3070 * Return: peer status
3071 */
3072int dp_get_peer_state(void *peer_handle)
3073{
3074 struct dp_peer *peer = peer_handle;
3075
Yun Park11d46e02017-11-27 10:51:53 -08003076 DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
Leo Chang5ea93a42016-11-03 12:39:49 -07003077 return peer->state;
3078}
3079
3080/**
3081 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3082 * @pdev - data path device instance
3083 *
3084 * local peer id pool alloc for physical device
3085 *
3086 * Return: none
3087 */
3088void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3089{
3090 int i;
3091
3092 /* point the freelist to the first ID */
3093 pdev->local_peer_ids.freelist = 0;
3094
3095 /* link each ID to the next one */
3096 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3097 pdev->local_peer_ids.pool[i] = i + 1;
3098 pdev->local_peer_ids.map[i] = NULL;
3099 }
3100
3101 /* link the last ID to itself, to mark the end of the list */
3102 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3103 pdev->local_peer_ids.pool[i] = i;
3104
3105 qdf_spinlock_create(&pdev->local_peer_ids.lock);
3106 DP_TRACE(INFO, "Peer pool init");
3107}
3108
3109/**
3110 * dp_local_peer_id_alloc() - allocate local peer id
3111 * @pdev - data path device instance
3112 * @peer - new peer instance
3113 *
3114 * allocate local peer id
3115 *
3116 * Return: none
3117 */
3118void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3119{
3120 int i;
3121
3122 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3123 i = pdev->local_peer_ids.freelist;
3124 if (pdev->local_peer_ids.pool[i] == i) {
3125 /* the list is empty, except for the list-end marker */
3126 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3127 } else {
3128 /* take the head ID and advance the freelist */
3129 peer->local_id = i;
3130 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3131 pdev->local_peer_ids.map[i] = peer;
3132 }
3133 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Jeff Johnson3f217e22017-09-18 10:13:35 -07003134 DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07003135}
3136
3137/**
3138 * dp_local_peer_id_free() - remove local peer id
3139 * @pdev - data path device instance
3140 * @peer - peer instance should be removed
3141 *
3142 * remove local peer id
3143 *
3144 * Return: none
3145 */
3146void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3147{
3148 int i = peer->local_id;
3149 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3150 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3151 return;
3152 }
3153
3154 /* put this ID on the head of the freelist */
3155 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3156 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3157 pdev->local_peer_ids.freelist = i;
3158 pdev->local_peer_ids.map[i] = NULL;
3159 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3160}
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05303161#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05303162
3163/**
3164 * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
3165 * @soc_handle: DP SOC handle
3166 * @peer_id:peer_id of the peer
3167 *
3168 * return: vdev_id of the vap
3169 */
3170uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
3171 uint16_t peer_id, uint8_t *peer_mac)
3172{
3173 struct dp_soc *soc = (struct dp_soc *)soc_handle;
3174 struct dp_peer *peer;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303175 uint8_t vdev_id;
Ishank Jain1e7401c2017-02-17 15:38:39 +05303176
3177 peer = dp_peer_find_by_id(soc, peer_id);
3178
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303179 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3180 "soc %pK peer_id %d", soc, peer_id);
Ishank Jain1e7401c2017-02-17 15:38:39 +05303181
3182 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303183 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3184 "peer not found ");
Ishank Jain1e7401c2017-02-17 15:38:39 +05303185 return CDP_INVALID_VDEV_ID;
3186 }
3187
3188 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303189 vdev_id = peer->vdev->vdev_id;
3190
3191 dp_peer_unref_del_find_by_id(peer);
3192
3193 return vdev_id;
Ishank Jain1e7401c2017-02-17 15:38:39 +05303194}
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003195
3196/**
3197 * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3198 * @peer: DP peer handle
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303199 * @dp_stats_cmd_cb: REO command callback function
3200 * @cb_ctxt: Callback context
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003201 *
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303202 * Return: none
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003203 */
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303204void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3205 void *cb_ctxt)
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003206{
3207 struct dp_soc *soc = peer->vdev->pdev->soc;
3208 struct hal_reo_cmd_params params;
3209 int i;
3210
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303211 if (!dp_stats_cmd_cb)
3212 return;
3213
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003214 qdf_mem_zero(&params, sizeof(params));
3215 for (i = 0; i < DP_MAX_TIDS; i++) {
3216 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
Jeff Johnsona8edf332019-03-18 09:51:52 -07003217 if (rx_tid->hw_qdesc_vaddr_unaligned) {
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003218 params.std.need_status = 1;
3219 params.std.addr_lo =
3220 rx_tid->hw_qdesc_paddr & 0xffffffff;
3221 params.std.addr_hi =
3222 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303223
3224 if (cb_ctxt) {
3225 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3226 &params, dp_stats_cmd_cb, cb_ctxt);
3227 } else {
3228 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3229 &params, dp_stats_cmd_cb, rx_tid);
3230 }
Karunakar Dasineni3da08112017-06-15 14:42:39 -07003231
3232 /* Flush REO descriptor from HW cache to update stats
3233 * in descriptor memory. This is to help debugging */
3234 qdf_mem_zero(&params, sizeof(params));
3235 params.std.need_status = 0;
3236 params.std.addr_lo =
3237 rx_tid->hw_qdesc_paddr & 0xffffffff;
3238 params.std.addr_hi =
3239 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08003240 params.u.fl_cache_params.flush_no_inval = 1;
Karunakar Dasineni3da08112017-06-15 14:42:39 -07003241 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3242 NULL);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003243 }
3244 }
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003245}
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303246
Pramod Simha6e10cb22018-06-20 12:05:44 -07003247void dp_set_michael_key(struct cdp_peer *peer_handle,
3248 bool is_unicast, uint32_t *key)
3249{
3250 struct dp_peer *peer = (struct dp_peer *)peer_handle;
3251 uint8_t sec_index = is_unicast ? 1 : 0;
3252
3253 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303254 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Pramod Simha6e10cb22018-06-20 12:05:44 -07003255 "peer not found ");
3256 return;
3257 }
3258
3259 qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3260 key, IEEE80211_WEP_MICLEN);
3261}
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303262
3263bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3264{
3265 struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3266
3267 if (peer) {
3268 /*
3269 * Decrement the peer ref which is taken as part of
3270 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3271 */
3272 dp_peer_unref_del_find_by_id(peer);
3273
3274 return true;
3275 }
3276
3277 return false;
3278}