blob: 72886fc9f41d15064ddf939a95e4602a88a57b4c [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
Harilakshmi Deshkumar1ea21092017-05-08 21:16:27 +053016 * PERFORMANCE OF THIS SOFTWARE.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070017 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Balamurugan Mahalingamf72cb1f2018-06-25 12:18:34 +053021#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070022#include "dp_htt.h"
23#include "dp_types.h"
24#include "dp_internal.h"
Jeff Johnson2cb8fc72016-12-17 10:45:08 -080025#include "dp_peer.h"
Lin Baif1c577e2018-05-22 20:45:42 +080026#include "dp_rx_defrag.h"
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070027#include <hal_api.h>
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -080028#include <hal_reo.h>
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080029#ifdef CONFIG_MCL
30#include <cds_ieee80211_common.h>
Yun Parkfde6b9e2017-06-26 17:13:11 -070031#include <cds_api.h>
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080032#endif
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080033#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080034#include <wlan_cfg.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070035
Pramod Simhab17d0672017-03-06 17:20:13 -080036#ifdef DP_LFR
37static inline void
38dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 uint8_t valid)
40{
41 params->u.upd_queue_params.update_svld = 1;
42 params->u.upd_queue_params.svld = valid;
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +053043 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
44 "%s: Setting SSN valid bit to %d",
45 __func__, valid);
Pramod Simhab17d0672017-03-06 17:20:13 -080046}
47#else
48static inline void
49dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
50 uint8_t valid) {};
51#endif
52
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070053static inline int dp_peer_find_mac_addr_cmp(
54 union dp_align_mac_addr *mac_addr1,
55 union dp_align_mac_addr *mac_addr2)
56{
57 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
58 /*
59 * Intentionally use & rather than &&.
60 * because the operands are binary rather than generic boolean,
61 * the functionality is equivalent.
62 * Using && has the advantage of short-circuited evaluation,
63 * but using & has the advantage of no conditional branching,
64 * which is a more significant benefit.
65 */
66 &
67 (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
68}
69
Tallapragada Kalyanc7413082019-03-07 21:22:10 +053070static int dp_peer_ast_table_attach(struct dp_soc *soc)
71{
72 uint32_t max_ast_index;
73
74 max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
75 /* allocate ast_table for ast entry to ast_index map */
76 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
77 "\n<=== cfg max ast idx %d ====>", max_ast_index);
78 soc->ast_table = qdf_mem_malloc(max_ast_index *
79 sizeof(struct dp_ast_entry *));
80 if (!soc->ast_table) {
81 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
82 "%s: ast_table memory allocation failed", __func__);
83 return QDF_STATUS_E_NOMEM;
84 }
85 return 0; /* success */
86}
87
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070088static int dp_peer_find_map_attach(struct dp_soc *soc)
89{
90 uint32_t max_peers, peer_map_size;
91
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +053092 max_peers = soc->max_peers;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070093 /* allocate the peer ID -> peer object map */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +053094 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
95 "\n<=== cfg max peer id %d ====>", max_peers);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070096 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
97 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
98 if (!soc->peer_id_to_obj_map) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +053099 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
100 "%s: peer map memory allocation failed", __func__);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700101 return QDF_STATUS_E_NOMEM;
102 }
103
104 /*
105 * The peer_id_to_obj_map doesn't really need to be initialized,
106 * since elements are only used after they have been individually
107 * initialized.
108 * However, it is convenient for debugging to have all elements
109 * that are not in use set to 0.
110 */
111 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700112 return 0; /* success */
113}
114
115static int dp_log2_ceil(unsigned value)
116{
117 unsigned tmp = value;
118 int log2 = -1;
119
120 while (tmp) {
121 log2++;
122 tmp >>= 1;
123 }
124 if (1 << log2 != value)
125 log2++;
126 return log2;
127}
128
129static int dp_peer_find_add_id_to_obj(
130 struct dp_peer *peer,
131 uint16_t peer_id)
132{
133 int i;
134
135 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
136 if (peer->peer_ids[i] == HTT_INVALID_PEER) {
137 peer->peer_ids[i] = peer_id;
138 return 0; /* success */
139 }
140 }
141 return QDF_STATUS_E_FAILURE; /* failure */
142}
143
144#define DP_PEER_HASH_LOAD_MULT 2
145#define DP_PEER_HASH_LOAD_SHIFT 0
146
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530147#define DP_AST_HASH_LOAD_MULT 2
148#define DP_AST_HASH_LOAD_SHIFT 0
149
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700150static int dp_peer_find_hash_attach(struct dp_soc *soc)
151{
152 int i, hash_elems, log2;
153
154 /* allocate the peer MAC address -> peer object hash table */
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +0530155 hash_elems = soc->max_peers;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700156 hash_elems *= DP_PEER_HASH_LOAD_MULT;
157 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
158 log2 = dp_log2_ceil(hash_elems);
159 hash_elems = 1 << log2;
160
161 soc->peer_hash.mask = hash_elems - 1;
162 soc->peer_hash.idx_bits = log2;
163 /* allocate an array of TAILQ peer object lists */
164 soc->peer_hash.bins = qdf_mem_malloc(
165 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
166 if (!soc->peer_hash.bins)
167 return QDF_STATUS_E_NOMEM;
168
169 for (i = 0; i < hash_elems; i++)
170 TAILQ_INIT(&soc->peer_hash.bins[i]);
171
172 return 0;
173}
174
175static void dp_peer_find_hash_detach(struct dp_soc *soc)
176{
177 qdf_mem_free(soc->peer_hash.bins);
178}
179
180static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
181 union dp_align_mac_addr *mac_addr)
182{
183 unsigned index;
184
185 index =
186 mac_addr->align2.bytes_ab ^
187 mac_addr->align2.bytes_cd ^
188 mac_addr->align2.bytes_ef;
189 index ^= index >> soc->peer_hash.idx_bits;
190 index &= soc->peer_hash.mask;
191 return index;
192}
193
194
195void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
196{
197 unsigned index;
198
199 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
200 qdf_spin_lock_bh(&soc->peer_ref_mutex);
201 /*
202 * It is important to add the new peer at the tail of the peer list
203 * with the bin index. Together with having the hash_find function
204 * search from head to tail, this ensures that if two entries with
205 * the same MAC address are stored, the one added first will be
206 * found first.
207 */
208 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
209 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
210}
211
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530212#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530213/*
214 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
215 * @soc: SoC handle
216 *
217 * Return: None
218 */
219static int dp_peer_ast_hash_attach(struct dp_soc *soc)
220{
221 int i, hash_elems, log2;
Tallapragada Kalyanc7413082019-03-07 21:22:10 +0530222 unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530223
Tallapragada Kalyanc7413082019-03-07 21:22:10 +0530224 hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530225 DP_AST_HASH_LOAD_SHIFT);
226
227 log2 = dp_log2_ceil(hash_elems);
228 hash_elems = 1 << log2;
229
230 soc->ast_hash.mask = hash_elems - 1;
231 soc->ast_hash.idx_bits = log2;
232
Tallapragada Kalyanc7413082019-03-07 21:22:10 +0530233 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
234 "ast hash_elems: %d, max_ast_idx: %d",
235 hash_elems, max_ast_idx);
236
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530237 /* allocate an array of TAILQ peer object lists */
238 soc->ast_hash.bins = qdf_mem_malloc(
239 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
240 dp_ast_entry)));
241
242 if (!soc->ast_hash.bins)
243 return QDF_STATUS_E_NOMEM;
244
245 for (i = 0; i < hash_elems; i++)
246 TAILQ_INIT(&soc->ast_hash.bins[i]);
247
248 return 0;
249}
250
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530251/*
252 * dp_peer_ast_cleanup() - cleanup the references
253 * @soc: SoC handle
254 * @ast: ast entry
255 *
256 * Return: None
257 */
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530258static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
259 struct dp_ast_entry *ast)
260{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530261 txrx_ast_free_cb cb = ast->callback;
262 void *cookie = ast->cookie;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530263
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530264 /* Call the callbacks to free up the cookie */
265 if (cb) {
266 ast->callback = NULL;
267 ast->cookie = NULL;
268 cb(soc->ctrl_psoc,
269 soc,
270 cookie,
271 CDP_TXRX_AST_DELETE_IN_PROGRESS);
272 }
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530273}
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530274
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530275/*
276 * dp_peer_ast_hash_detach() - Free AST Hash table
277 * @soc: SoC handle
278 *
279 * Return: None
280 */
281static void dp_peer_ast_hash_detach(struct dp_soc *soc)
282{
Chaithanya Garrepalli157543d2018-07-09 17:42:59 +0530283 unsigned int index;
284 struct dp_ast_entry *ast, *ast_next;
285
286 if (!soc->ast_hash.mask)
287 return;
288
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530289 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli157543d2018-07-09 17:42:59 +0530290 for (index = 0; index <= soc->ast_hash.mask; index++) {
291 if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
292 TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
293 hash_list_elem, ast_next) {
294 TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
295 hash_list_elem);
296 dp_peer_ast_cleanup(soc, ast);
297 qdf_mem_free(ast);
298 }
299 }
300 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530301 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli157543d2018-07-09 17:42:59 +0530302
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530303 qdf_mem_free(soc->ast_hash.bins);
304}
305
306/*
307 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
308 * @soc: SoC handle
309 *
310 * Return: AST hash
311 */
312static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
313 union dp_align_mac_addr *mac_addr)
314{
315 uint32_t index;
316
317 index =
318 mac_addr->align2.bytes_ab ^
319 mac_addr->align2.bytes_cd ^
320 mac_addr->align2.bytes_ef;
321 index ^= index >> soc->ast_hash.idx_bits;
322 index &= soc->ast_hash.mask;
323 return index;
324}
325
326/*
327 * dp_peer_ast_hash_add() - Add AST entry into hash table
328 * @soc: SoC handle
329 *
330 * This function adds the AST entry into SoC AST hash table
331 * It assumes caller has taken the ast lock to protect the access to this table
332 *
333 * Return: None
334 */
335static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
336 struct dp_ast_entry *ase)
337{
338 uint32_t index;
339
340 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
341 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
342}
343
344/*
345 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
346 * @soc: SoC handle
347 *
348 * This function removes the AST entry from soc AST hash table
349 * It assumes caller has taken the ast lock to protect the access to this table
350 *
351 * Return: None
352 */
353static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
354 struct dp_ast_entry *ase)
355{
356 unsigned index;
357 struct dp_ast_entry *tmpase;
358 int found = 0;
359
360 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
361 /* Check if tail is not empty before delete*/
362 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
363
364 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
365 if (tmpase == ase) {
366 found = 1;
367 break;
368 }
369 }
370
371 QDF_ASSERT(found);
372 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
373}
374
375/*
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530376 * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
377 * @soc: SoC handle
378 * @peer: peer handle
379 * @ast_mac_addr: mac address
380 *
381 * It assumes caller has taken the ast lock to protect the access to ast list
382 *
383 * Return: AST entry
384 */
385struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
386 struct dp_peer *peer,
387 uint8_t *ast_mac_addr)
388{
389 struct dp_ast_entry *ast_entry = NULL;
390 union dp_align_mac_addr *mac_addr =
391 (union dp_align_mac_addr *)ast_mac_addr;
392
393 TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
394 if (!dp_peer_find_mac_addr_cmp(mac_addr,
395 &ast_entry->mac_addr)) {
396 return ast_entry;
397 }
398 }
399
400 return NULL;
401}
402
403/*
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530404 * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530405 * @soc: SoC handle
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530406 *
407 * It assumes caller has taken the ast lock to protect the access to
408 * AST hash table
409 *
410 * Return: AST entry
411 */
412struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
413 uint8_t *ast_mac_addr,
414 uint8_t pdev_id)
415{
416 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
417 uint32_t index;
418 struct dp_ast_entry *ase;
419
420 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
421 ast_mac_addr, DP_MAC_ADDR_LEN);
422 mac_addr = &local_mac_addr_aligned;
423
424 index = dp_peer_ast_hash_index(soc, mac_addr);
425 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
426 if ((pdev_id == ase->pdev_id) &&
427 !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
428 return ase;
429 }
430 }
431
432 return NULL;
433}
434
435/*
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530436 * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530437 * @soc: SoC handle
438 *
439 * It assumes caller has taken the ast lock to protect the access to
440 * AST hash table
441 *
442 * Return: AST entry
443 */
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530444struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
445 uint8_t *ast_mac_addr)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530446{
447 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
448 unsigned index;
449 struct dp_ast_entry *ase;
450
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530451 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530452 ast_mac_addr, DP_MAC_ADDR_LEN);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530453 mac_addr = &local_mac_addr_aligned;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530454
455 index = dp_peer_ast_hash_index(soc, mac_addr);
456 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
457 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
458 return ase;
459 }
460 }
461
462 return NULL;
463}
464
465/*
466 * dp_peer_map_ast() - Map the ast entry with HW AST Index
467 * @soc: SoC handle
468 * @peer: peer to which ast node belongs
469 * @mac_addr: MAC address of ast node
470 * @hw_peer_id: HW AST Index returned by target in peer map event
471 * @vdev_id: vdev id for VAP to which the peer belongs to
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +0530472 * @ast_hash: ast hash value in HW
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530473 *
474 * Return: None
475 */
476static inline void dp_peer_map_ast(struct dp_soc *soc,
477 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +0530478 uint8_t vdev_id, uint16_t ast_hash)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530479{
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530480 struct dp_ast_entry *ast_entry = NULL;
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530481 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530482
483 if (!peer) {
484 return;
485 }
486
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530487 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
488 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
489 __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
490 mac_addr[1], mac_addr[2], mac_addr[3],
491 mac_addr[4], mac_addr[5]);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530492
493 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530494
495 ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
496
497 if (ast_entry) {
498 ast_entry->ast_idx = hw_peer_id;
499 soc->ast_table[hw_peer_id] = ast_entry;
500 ast_entry->is_active = TRUE;
501 peer_type = ast_entry->type;
502 ast_entry->ast_hash_value = ast_hash;
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530503 ast_entry->is_mapped = TRUE;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530504 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530505
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +0530506 if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530507 if (soc->cdp_soc.ol_ops->peer_map_event) {
508 soc->cdp_soc.ol_ops->peer_map_event(
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +0530509 soc->ctrl_psoc, peer->peer_ids[0],
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530510 hw_peer_id, vdev_id,
Radha krishna Simha Jigurud359eb42018-09-16 13:56:34 +0530511 mac_addr, peer_type, ast_hash);
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530512 }
513 } else {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530514 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
515 "AST entry not found");
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530516 }
517
518 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530519 return;
520}
521
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530522void dp_peer_free_hmwds_cb(void *ctrl_psoc,
523 void *dp_soc,
524 void *cookie,
525 enum cdp_ast_free_status status)
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +0530526{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530527 struct dp_ast_free_cb_params *param =
528 (struct dp_ast_free_cb_params *)cookie;
529 struct dp_soc *soc = (struct dp_soc *)dp_soc;
530 struct dp_peer *peer = NULL;
531
532 if (status != CDP_TXRX_AST_DELETED) {
533 qdf_mem_free(cookie);
534 return;
535 }
536
537 peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
538 0, param->vdev_id);
539 if (peer) {
540 dp_peer_add_ast(soc, peer,
541 &param->mac_addr.raw[0],
542 param->type,
543 param->flags);
544 dp_peer_unref_delete(peer);
545 }
546 qdf_mem_free(cookie);
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +0530547}
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +0530548
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530549/*
550 * dp_peer_add_ast() - Allocate and add AST entry into peer list
551 * @soc: SoC handle
552 * @peer: peer to which ast node belongs
553 * @mac_addr: MAC address of ast node
554 * @is_self: Is this base AST entry with peer mac address
555 *
Jeff Johnsonbd6e61f2018-05-06 17:11:15 -0700556 * This API is used by WDS source port learning function to
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530557 * add a new AST entry into peer AST list
558 *
559 * Return: 0 if new entry is allocated,
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530560 * -1 if entry add failed
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530561 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530562int dp_peer_add_ast(struct dp_soc *soc,
563 struct dp_peer *peer,
564 uint8_t *mac_addr,
565 enum cdp_txrx_ast_entry_type type,
566 uint32_t flags)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530567{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530568 struct dp_ast_entry *ast_entry = NULL;
Ruchi, Agrawal93bcf122018-10-26 13:56:34 +0530569 struct dp_vdev *vdev = NULL;
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530570 struct dp_pdev *pdev = NULL;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530571 uint8_t next_node_mac[6];
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530572 int ret = -1;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530573 txrx_ast_free_cb cb = NULL;
574 void *cookie = NULL;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530575
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +0530576 qdf_spin_lock_bh(&soc->ast_lock);
577 if (peer->delete_in_progress) {
578 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530579 return ret;
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +0530580 }
Ruchi, Agrawal93bcf122018-10-26 13:56:34 +0530581
582 vdev = peer->vdev;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530583 if (!vdev) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530584 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
585 FL("Peers vdev is NULL"));
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530586 QDF_ASSERT(0);
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +0530587 qdf_spin_unlock_bh(&soc->ast_lock);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530588 return ret;
589 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530590
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530591 pdev = vdev->pdev;
592
phadimand2e88e32019-01-23 12:58:43 +0530593 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
phadimane9fb5472018-10-30 16:53:05 +0530594 "%s: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
595 __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
596 peer->mac_addr.raw, peer, mac_addr);
597
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530598
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530599 /* fw supports only 2 times the max_peers ast entries */
600 if (soc->num_ast_entries >=
601 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
602 qdf_spin_unlock_bh(&soc->ast_lock);
603 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
604 FL("Max ast entries reached"));
605 return ret;
606 }
607
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530608 /* If AST entry already exists , just return from here
609 * ast entry with same mac address can exist on different radios
610 * if ast_override support is enabled use search by pdev in this
611 * case
612 */
613 if (soc->ast_override_support) {
614 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
615 pdev->pdev_id);
616 if (ast_entry) {
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +0530617 qdf_spin_unlock_bh(&soc->ast_lock);
618 return 0;
619 }
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530620 } else {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530621 /* For HWMWDS_SEC entries can be added for same mac address
622 * do not check for existing entry
623 */
624 if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
625 goto add_ast_entry;
626
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530627 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +0530628
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530629 if (ast_entry) {
Pamidipati, Vijayb113bbc2019-01-22 22:06:36 +0530630 if ((type == CDP_TXRX_AST_TYPE_MEC) &&
631 (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530632 ast_entry->is_active = TRUE;
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530633
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530634 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
635 !ast_entry->delete_in_progress) {
636 qdf_spin_unlock_bh(&soc->ast_lock);
637 return 0;
638 }
639
640 /* Add for HMWDS entry we cannot be ignored if there
641 * is AST entry with same mac address
642 *
643 * if ast entry exists with the requested mac address
644 * send a delete command and register callback which
645 * can take care of adding HMWDS ast enty on delete
646 * confirmation from target
647 */
648 if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
649 soc->is_peer_map_unmap_v2) {
650 struct dp_ast_free_cb_params *param = NULL;
651
652 if (ast_entry->type ==
653 CDP_TXRX_AST_TYPE_WDS_HM_SEC)
654 goto add_ast_entry;
655
656 /* save existing callback */
657 if (ast_entry->callback) {
658 cb = ast_entry->callback;
659 cookie = ast_entry->cookie;
660 }
661
662 param = qdf_mem_malloc(sizeof(*param));
663 if (!param) {
664 QDF_TRACE(QDF_MODULE_ID_TXRX,
665 QDF_TRACE_LEVEL_ERROR,
666 "Allocation failed");
667 qdf_spin_unlock_bh(&soc->ast_lock);
668 return ret;
669 }
670
671 qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
672 DP_MAC_ADDR_LEN);
673 qdf_mem_copy(&param->peer_mac_addr.raw[0],
674 &peer->mac_addr.raw[0],
675 DP_MAC_ADDR_LEN);
676 param->type = type;
677 param->flags = flags;
678 param->vdev_id = vdev->vdev_id;
679 ast_entry->callback = dp_peer_free_hmwds_cb;
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530680 ast_entry->pdev_id = vdev->pdev->pdev_id;
681 ast_entry->type = type;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530682 ast_entry->cookie = (void *)param;
683 if (!ast_entry->delete_in_progress)
684 dp_peer_del_ast(soc, ast_entry);
685 }
686
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +0530687 /* Modify an already existing AST entry from type
688 * WDS to MEC on promption. This serves as a fix when
689 * backbone of interfaces are interchanged wherein
Nandha Kishore Easwaran8dd440d2018-11-30 15:02:20 +0530690 * wds entr becomes its own MEC. The entry should be
691 * replaced only when the ast_entry peer matches the
692 * peer received in mec event. This additional check
693 * is needed in wds repeater cases where a multicast
694 * packet from station to the root via the repeater
695 * should not remove the wds entry.
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +0530696 */
697 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
Nandha Kishore Easwaran8dd440d2018-11-30 15:02:20 +0530698 (type == CDP_TXRX_AST_TYPE_MEC) &&
699 (ast_entry->peer == peer)) {
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +0530700 ast_entry->is_active = FALSE;
701 dp_peer_del_ast(soc, ast_entry);
702 }
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530703 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530704
705 /* Call the saved callback*/
706 if (cb) {
707 cb(soc->ctrl_psoc, soc, cookie,
708 CDP_TXRX_AST_DELETE_IN_PROGRESS);
709 }
Chaithanya Garrepallid203e2d2018-09-18 14:23:17 +0530710 return 0;
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +0530711 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530712 }
713
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530714add_ast_entry:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530715 ast_entry = (struct dp_ast_entry *)
716 qdf_mem_malloc(sizeof(struct dp_ast_entry));
717
718 if (!ast_entry) {
719 qdf_spin_unlock_bh(&soc->ast_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +0530720 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
721 FL("fail to allocate ast_entry"));
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530722 QDF_ASSERT(0);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530723 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530724 }
725
726 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530727 ast_entry->pdev_id = vdev->pdev->pdev_id;
728 ast_entry->vdev_id = vdev->vdev_id;
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530729 ast_entry->is_mapped = false;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530730 ast_entry->delete_in_progress = false;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530731
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530732 switch (type) {
733 case CDP_TXRX_AST_TYPE_STATIC:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530734 peer->self_ast_entry = ast_entry;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530735 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530736 if (peer->vdev->opmode == wlan_op_mode_sta)
737 ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530738 break;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530739 case CDP_TXRX_AST_TYPE_SELF:
740 peer->self_ast_entry = ast_entry;
741 ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
742 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530743 case CDP_TXRX_AST_TYPE_WDS:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530744 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530745 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530746 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530747 case CDP_TXRX_AST_TYPE_WDS_HM:
748 ast_entry->next_hop = 1;
749 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
750 break;
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530751 case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
752 ast_entry->next_hop = 1;
753 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
754 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530755 case CDP_TXRX_AST_TYPE_MEC:
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530756 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530757 ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530758 break;
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530759 case CDP_TXRX_AST_TYPE_DA:
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530760 peer = peer->vdev->vap_bss_peer;
Tallapragada Kalyan2ae71e02018-08-31 19:30:54 +0530761 ast_entry->next_hop = 1;
762 ast_entry->type = CDP_TXRX_AST_TYPE_DA;
763 break;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530764 default:
765 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
766 FL("Incorrect AST entry type"));
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530767 }
768
769 ast_entry->is_active = TRUE;
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530770 DP_STATS_INC(soc, ast.added, 1);
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530771 soc->num_ast_entries++;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530772 dp_peer_ast_hash_add(soc, ast_entry);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530773
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530774 ast_entry->peer = peer;
775
776 if (type == CDP_TXRX_AST_TYPE_MEC)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530777 qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
Ruchi, Agrawald536f882018-03-02 15:51:23 +0530778 else
779 qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530780
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530781 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
782 qdf_spin_unlock_bh(&soc->ast_lock);
783
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530784 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530785 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530786 (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
787 (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530788 if (QDF_STATUS_SUCCESS ==
789 soc->cdp_soc.ol_ops->peer_add_wds_entry(
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530790 peer->vdev->osif_vdev,
syed touqeer pasha0050ec92018-10-14 19:36:15 +0530791 (struct cdp_peer *)peer,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530792 mac_addr,
793 next_node_mac,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530794 flags))
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530795 return 0;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530796 }
797
798 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530799}
800
801/*
802 * dp_peer_del_ast() - Delete and free AST entry
803 * @soc: SoC handle
804 * @ast_entry: AST entry of the node
805 *
806 * This function removes the AST entry from peer and soc tables
807 * It assumes caller has taken the ast lock to protect the access to these
808 * tables
809 *
810 * Return: None
811 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530812void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530813{
814 struct dp_peer *peer = ast_entry->peer;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530815 uint16_t peer_id = peer->peer_ids[0];
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530816
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530817 dp_peer_ast_send_wds_del(soc, ast_entry);
818
819 /*
820 * if peer map v2 is enabled we are not freeing ast entry
821 * here and it is supposed to be freed in unmap event (after
822 * we receive delete confirmation from target)
823 *
824 * if peer_id is invalid we did not get the peer map event
825 * for the peer free ast entry from here only in this case
826 */
827 if (soc->is_peer_map_unmap_v2 && (peer_id != HTT_INVALID_PEER)) {
828
829 /*
830 * For HM_SEC and SELF type we do not receive unmap event
831 * free ast_entry from here it self
832 */
833 if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
834 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
835 return;
836 }
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530837
Lin Bai83fb60c2018-10-16 16:23:36 +0800838 /*
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530839 * release the reference only if it is mapped
840 * to ast_table
Lin Bai83fb60c2018-10-16 16:23:36 +0800841 */
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +0530842 if (ast_entry->is_mapped)
Lin Bai83fb60c2018-10-16 16:23:36 +0800843 soc->ast_table[ast_entry->ast_idx] = NULL;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530844 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
Pamidipati, Vijay3eab5b12018-08-23 16:00:44 +0530845
846 if (ast_entry == peer->self_ast_entry)
847 peer->self_ast_entry = NULL;
848
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530849 DP_STATS_INC(soc, ast.deleted, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530850 dp_peer_ast_hash_remove(soc, ast_entry);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530851 dp_peer_ast_cleanup(soc, ast_entry);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530852 qdf_mem_free(ast_entry);
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530853 soc->num_ast_entries--;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530854}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530855
856/*
857 * dp_peer_update_ast() - Delete and free AST entry
858 * @soc: SoC handle
859 * @peer: peer to which ast node belongs
860 * @ast_entry: AST entry of the node
861 * @flags: wds or hmwds
862 *
863 * This function update the AST entry to the roamed peer and soc tables
864 * It assumes caller has taken the ast lock to protect the access to these
865 * tables
866 *
867 * Return: 0 if ast entry is updated successfully
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530868 * -1 failure
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530869 */
870int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
871 struct dp_ast_entry *ast_entry, uint32_t flags)
872{
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530873 int ret = -1;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530874 struct dp_peer *old_peer;
875
phadimand2e88e32019-01-23 12:58:43 +0530876 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
phadimane9fb5472018-10-30 16:53:05 +0530877 "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
878 __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
879 peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
880 peer->mac_addr.raw);
881
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530882 if (ast_entry->delete_in_progress)
883 return ret;
884
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530885 if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +0530886 (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
887 (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
888 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530889 return 0;
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530890
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530891 old_peer = ast_entry->peer;
892 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530893
894 ast_entry->peer = peer;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530895 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
896 ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
897 ast_entry->vdev_id = peer->vdev->vdev_id;
898 ast_entry->is_active = TRUE;
899 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
900
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530901 ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530902 peer->vdev->osif_vdev,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530903 ast_entry->mac_addr.raw,
904 peer->mac_addr.raw,
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530905 flags);
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530906
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530907 return ret;
908}
909
910/*
911 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
912 * @soc: SoC handle
913 * @ast_entry: AST entry of the node
914 *
915 * This function gets the pdev_id from the ast entry.
916 *
917 * Return: (uint8_t) pdev_id
918 */
919uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
920 struct dp_ast_entry *ast_entry)
921{
922 return ast_entry->pdev_id;
923}
924
925/*
926 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
927 * @soc: SoC handle
928 * @ast_entry: AST entry of the node
929 *
930 * This function gets the next hop from the ast entry.
931 *
932 * Return: (uint8_t) next_hop
933 */
934uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
935 struct dp_ast_entry *ast_entry)
936{
937 return ast_entry->next_hop;
938}
939
940/*
941 * dp_peer_ast_set_type() - set type from the ast entry
942 * @soc: SoC handle
943 * @ast_entry: AST entry of the node
944 *
945 * This function sets the type in the ast entry.
946 *
947 * Return:
948 */
949void dp_peer_ast_set_type(struct dp_soc *soc,
950 struct dp_ast_entry *ast_entry,
951 enum cdp_txrx_ast_entry_type type)
952{
953 ast_entry->type = type;
954}
955
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800956#else
957int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530958 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
959 uint32_t flags)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800960{
961 return 1;
962}
963
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530964void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800965{
966}
967
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530968int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
969 struct dp_ast_entry *ast_entry, uint32_t flags)
970{
971 return 1;
972}
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800973
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530974struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
975 uint8_t *ast_mac_addr)
976{
977 return NULL;
978}
979
980struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
981 uint8_t *ast_mac_addr,
982 uint8_t pdev_id)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800983{
984 return NULL;
985}
986
987static int dp_peer_ast_hash_attach(struct dp_soc *soc)
988{
989 return 0;
990}
991
992static inline void dp_peer_map_ast(struct dp_soc *soc,
993 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +0530994 uint8_t vdev_id, uint16_t ast_hash)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800995{
996 return;
997}
998
999static void dp_peer_ast_hash_detach(struct dp_soc *soc)
1000{
1001}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301002
1003void dp_peer_ast_set_type(struct dp_soc *soc,
1004 struct dp_ast_entry *ast_entry,
1005 enum cdp_txrx_ast_entry_type type)
1006{
1007}
1008
1009uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
1010 struct dp_ast_entry *ast_entry)
1011{
1012 return 0xff;
1013}
1014
1015
1016uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
1017 struct dp_ast_entry *ast_entry)
1018{
1019 return 0xff;
1020}
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -08001021#endif
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301022
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301023void dp_peer_ast_send_wds_del(struct dp_soc *soc,
1024 struct dp_ast_entry *ast_entry)
1025{
1026 struct dp_peer *peer = ast_entry->peer;
1027 struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
1028
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301029 if (ast_entry->delete_in_progress)
1030 return;
1031
Chaithanya Garrepalli9ff4c542019-01-07 23:03:09 +05301032 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
1033 "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
1034 __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
1035 peer->vdev->vdev_id, ast_entry->mac_addr.raw,
1036 ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
1037
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +05301038 if (ast_entry->next_hop) {
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301039 cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +05301040 ast_entry->mac_addr.raw,
1041 ast_entry->type);
1042 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301043
1044 ast_entry->delete_in_progress = true;
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301045}
1046
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301047static void dp_peer_ast_free_entry(struct dp_soc *soc,
1048 struct dp_ast_entry *ast_entry)
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301049{
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301050 struct dp_peer *peer = ast_entry->peer;
1051 void *cookie = NULL;
1052 txrx_ast_free_cb cb = NULL;
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301053
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +05301054 /*
1055 * release the reference only if it is mapped
1056 * to ast_table
1057 */
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301058
1059 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepallie10f87b2018-10-18 00:14:11 +05301060 if (ast_entry->is_mapped)
1061 soc->ast_table[ast_entry->ast_idx] = NULL;
Tallapragada Kalyan887fb5d2018-10-24 18:27:58 +05301062
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301063 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301064 DP_STATS_INC(soc, ast.deleted, 1);
1065 dp_peer_ast_hash_remove(soc, ast_entry);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301066
1067 cb = ast_entry->callback;
1068 cookie = ast_entry->cookie;
1069 ast_entry->callback = NULL;
1070 ast_entry->cookie = NULL;
1071
1072 if (ast_entry == peer->self_ast_entry)
1073 peer->self_ast_entry = NULL;
1074
1075 qdf_spin_unlock_bh(&soc->ast_lock);
1076
1077 if (cb) {
1078 cb(soc->ctrl_psoc,
1079 soc,
1080 cookie,
1081 CDP_TXRX_AST_DELETED);
1082 }
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301083 qdf_mem_free(ast_entry);
Tallapragada Kalyana7023622018-12-03 19:29:52 +05301084 soc->num_ast_entries--;
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301085}
Kiran Venkatappaed35f442018-07-19 22:22:29 +05301086
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05301087struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001088 uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001089{
1090 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1091 unsigned index;
1092 struct dp_peer *peer;
1093
1094 if (mac_addr_is_aligned) {
1095 mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
1096 } else {
1097 qdf_mem_copy(
1098 &local_mac_addr_aligned.raw[0],
1099 peer_mac_addr, DP_MAC_ADDR_LEN);
1100 mac_addr = &local_mac_addr_aligned;
1101 }
1102 index = dp_peer_find_hash_index(soc, mac_addr);
1103 qdf_spin_lock_bh(&soc->peer_ref_mutex);
1104 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1105#if ATH_SUPPORT_WRAP
1106 /* ProxySTA may have multiple BSS peer with same MAC address,
1107 * modified find will take care of finding the correct BSS peer.
1108 */
1109 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05301110 ((peer->vdev->vdev_id == vdev_id) ||
1111 (vdev_id == DP_VDEV_ALL))) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001112#else
1113 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
1114#endif
1115 /* found it - increment the ref count before releasing
1116 * the lock
1117 */
1118 qdf_atomic_inc(&peer->ref_cnt);
1119 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1120 return peer;
1121 }
1122 }
1123 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
1124 return NULL; /* failure */
1125}
1126
1127void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
1128{
1129 unsigned index;
1130 struct dp_peer *tmppeer = NULL;
1131 int found = 0;
1132
1133 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
1134 /* Check if tail is not empty before delete*/
1135 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
1136 /*
1137 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
1138 * by the caller.
1139 * The caller needs to hold the lock from the time the peer object's
1140 * reference count is decremented and tested up through the time the
1141 * reference to the peer object is removed from the hash table, by
1142 * this function.
1143 * Holding the lock only while removing the peer object reference
1144 * from the hash table keeps the hash table consistent, but does not
1145 * protect against a new HL tx context starting to use the peer object
1146 * if it looks up the peer object from its MAC address just after the
1147 * peer ref count is decremented to zero, but just before the peer
1148 * object reference is removed from the hash table.
1149 */
1150 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
1151 if (tmppeer == peer) {
1152 found = 1;
1153 break;
1154 }
1155 }
1156 QDF_ASSERT(found);
1157 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
1158}
1159
1160void dp_peer_find_hash_erase(struct dp_soc *soc)
1161{
1162 int i;
1163
1164 /*
1165 * Not really necessary to take peer_ref_mutex lock - by this point,
1166 * it's known that the soc is no longer in use.
1167 */
1168 for (i = 0; i <= soc->peer_hash.mask; i++) {
1169 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
1170 struct dp_peer *peer, *peer_next;
1171
1172 /*
1173 * TAILQ_FOREACH_SAFE must be used here to avoid any
1174 * memory access violation after peer is freed
1175 */
1176 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
1177 hash_list_elem, peer_next) {
1178 /*
1179 * Don't remove the peer from the hash table -
1180 * that would modify the list we are currently
1181 * traversing, and it's not necessary anyway.
1182 */
1183 /*
1184 * Artificially adjust the peer's ref count to
1185 * 1, so it will get deleted by
1186 * dp_peer_unref_delete.
1187 */
1188 /* set to zero */
1189 qdf_atomic_init(&peer->ref_cnt);
1190 /* incr to one */
1191 qdf_atomic_inc(&peer->ref_cnt);
1192 dp_peer_unref_delete(peer);
1193 }
1194 }
1195 }
1196}
1197
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301198static void dp_peer_ast_table_detach(struct dp_soc *soc)
1199{
1200 qdf_mem_free(soc->ast_table);
1201}
1202
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001203static void dp_peer_find_map_detach(struct dp_soc *soc)
1204{
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001205 qdf_mem_free(soc->peer_id_to_obj_map);
1206}
1207
1208int dp_peer_find_attach(struct dp_soc *soc)
1209{
1210 if (dp_peer_find_map_attach(soc))
1211 return 1;
1212
1213 if (dp_peer_find_hash_attach(soc)) {
1214 dp_peer_find_map_detach(soc);
1215 return 1;
1216 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301217
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301218 if (dp_peer_ast_table_attach(soc)) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301219 dp_peer_find_hash_detach(soc);
1220 dp_peer_find_map_detach(soc);
1221 return 1;
1222 }
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301223
1224 if (dp_peer_ast_hash_attach(soc)) {
1225 dp_peer_ast_table_detach(soc);
1226 dp_peer_find_hash_detach(soc);
1227 dp_peer_find_map_detach(soc);
1228 return 1;
1229 }
1230
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001231 return 0; /* success */
1232}
1233
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05301234void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001235 union hal_reo_status *reo_status)
1236{
1237 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
1238 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
1239
1240 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
1241 DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
1242 queue_status->header.status, rx_tid->tid);
1243 return;
1244 }
1245
1246 DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
1247 "ssn: %d\n"
1248 "curr_idx : %d\n"
1249 "pn_31_0 : %08x\n"
1250 "pn_63_32 : %08x\n"
1251 "pn_95_64 : %08x\n"
1252 "pn_127_96 : %08x\n"
1253 "last_rx_enq_tstamp : %08x\n"
1254 "last_rx_deq_tstamp : %08x\n"
1255 "rx_bitmap_31_0 : %08x\n"
1256 "rx_bitmap_63_32 : %08x\n"
1257 "rx_bitmap_95_64 : %08x\n"
1258 "rx_bitmap_127_96 : %08x\n"
1259 "rx_bitmap_159_128 : %08x\n"
1260 "rx_bitmap_191_160 : %08x\n"
1261 "rx_bitmap_223_192 : %08x\n"
Karunakar Dasineni3da08112017-06-15 14:42:39 -07001262 "rx_bitmap_255_224 : %08x\n",
1263 rx_tid->tid,
1264 queue_status->ssn, queue_status->curr_idx,
1265 queue_status->pn_31_0, queue_status->pn_63_32,
1266 queue_status->pn_95_64, queue_status->pn_127_96,
1267 queue_status->last_rx_enq_tstamp,
1268 queue_status->last_rx_deq_tstamp,
1269 queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
1270 queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
1271 queue_status->rx_bitmap_159_128,
1272 queue_status->rx_bitmap_191_160,
1273 queue_status->rx_bitmap_223_192,
1274 queue_status->rx_bitmap_255_224);
1275
1276 DP_TRACE_STATS(FATAL,
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001277 "curr_mpdu_cnt : %d\n"
1278 "curr_msdu_cnt : %d\n"
1279 "fwd_timeout_cnt : %d\n"
1280 "fwd_bar_cnt : %d\n"
1281 "dup_cnt : %d\n"
1282 "frms_in_order_cnt : %d\n"
1283 "bar_rcvd_cnt : %d\n"
1284 "mpdu_frms_cnt : %d\n"
1285 "msdu_frms_cnt : %d\n"
1286 "total_byte_cnt : %d\n"
1287 "late_recv_mpdu_cnt : %d\n"
1288 "win_jump_2k : %d\n"
1289 "hole_cnt : %d\n",
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001290 queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
1291 queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
1292 queue_status->dup_cnt, queue_status->frms_in_order_cnt,
1293 queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
1294 queue_status->msdu_frms_cnt, queue_status->total_cnt,
1295 queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
1296 queue_status->hole_cnt);
sumedh baikadye3947bd2017-11-29 19:19:25 -08001297
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001298 DP_PRINT_STATS("Addba Req : %d\n"
1299 "Addba Resp : %d\n"
1300 "Addba Resp success : %d\n"
1301 "Addba Resp failed : %d\n"
1302 "Delba Req received : %d\n"
1303 "Delba Tx success : %d\n"
1304 "Delba Tx Fail : %d\n"
1305 "BA window size : %d\n"
1306 "Pn size : %d\n",
1307 rx_tid->num_of_addba_req,
1308 rx_tid->num_of_addba_resp,
1309 rx_tid->num_addba_rsp_success,
1310 rx_tid->num_addba_rsp_failed,
1311 rx_tid->num_of_delba_req,
1312 rx_tid->delba_tx_success_cnt,
1313 rx_tid->delba_tx_fail_cnt,
1314 rx_tid->ba_win_size,
1315 rx_tid->pn_size);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07001316}
1317
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301318static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301319 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
1320 uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001321{
1322 struct dp_peer *peer;
1323
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05301324 QDF_ASSERT(peer_id <= soc->max_peers);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001325 /* check if there's already a peer object with this MAC address */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001326 peer = dp_peer_find_hash_find(soc, peer_mac_addr,
1327 0 /* is aligned */, vdev_id);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301328 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1329 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
1330 __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
1331 peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
1332 peer_mac_addr[4], peer_mac_addr[5]);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001333
1334 if (peer) {
1335 /* peer's ref count was already incremented by
1336 * peer_find_hash_find
1337 */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301338 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001339 "%s: ref_cnt: %d", __func__,
1340 qdf_atomic_read(&peer->ref_cnt));
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301341 if (!soc->peer_id_to_obj_map[peer_id])
1342 soc->peer_id_to_obj_map[peer_id] = peer;
1343 else {
1344 /* Peer map event came for peer_id which
1345 * is already mapped, this is not expected
1346 */
1347 QDF_ASSERT(0);
1348 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001349
1350 if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
1351 /* TBDXXX: assert for now */
1352 QDF_ASSERT(0);
1353 }
1354
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301355 return peer;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301356 }
1357
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301358 return NULL;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301359}
1360
1361/**
1362 * dp_rx_peer_map_handler() - handle peer map event from firmware
1363 * @soc_handle - genereic soc handle
1364 * @peeri_id - peer_id from firmware
1365 * @hw_peer_id - ast index for this peer
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301366 * @vdev_id - vdev ID
1367 * @peer_mac_addr - mac address of the peer
1368 * @ast_hash - ast hash value
1369 * @is_wds - flag to indicate peer map event for WDS ast entry
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301370 *
1371 * associate the peer_id that firmware provided with peer entry
1372 * and update the ast table in the host with the hw_peer_id.
1373 *
1374 * Return: none
1375 */
1376
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001377void
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301378dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
1379 uint16_t hw_peer_id, uint8_t vdev_id,
1380 uint8_t *peer_mac_addr, uint16_t ast_hash,
1381 uint8_t is_wds)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001382{
1383 struct dp_soc *soc = (struct dp_soc *)soc_handle;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301384 struct dp_peer *peer = NULL;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301385 enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301386
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301387 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1388 "peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac %02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d",
1389 soc, peer_id,
1390 hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
1391 peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
1392 peer_mac_addr[5], vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001393
Tallapragada Kalyana7023622018-12-03 19:29:52 +05301394 if ((hw_peer_id < 0) ||
1395 (hw_peer_id >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301396 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301397 "invalid hw_peer_id: %d", hw_peer_id);
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05301398 qdf_assert_always(0);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301399 }
1400
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301401 /* Peer map event for WDS ast entry get the peer from
1402 * obj map
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05301403 */
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301404 if (is_wds) {
1405 peer = soc->peer_id_to_obj_map[peer_id];
1406 } else {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301407 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301408 hw_peer_id, vdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301409
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301410 if (peer) {
1411 /*
Lin Bai83fb60c2018-10-16 16:23:36 +08001412 * For every peer Map message search and set if bss_peer
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301413 */
1414 if (!(qdf_mem_cmp(peer->mac_addr.raw,
1415 peer->vdev->mac_addr.raw,
1416 DP_MAC_ADDR_LEN))) {
1417 QDF_TRACE(QDF_MODULE_ID_DP,
1418 QDF_TRACE_LEVEL_INFO_HIGH,
1419 "vdev bss_peer!!!!");
1420 peer->bss_peer = 1;
1421 peer->vdev->vap_bss_peer = peer;
1422 }
1423
1424 if (peer->vdev->opmode == wlan_op_mode_sta)
1425 peer->vdev->bss_ast_hash = ast_hash;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301426
1427 /* Add ast entry incase self ast entry is
1428 * deleted due to DP CP sync issue
1429 *
1430 * self_ast_entry is modified in peer create
1431 * and peer unmap path which cannot run in
1432 * parllel with peer map, no lock need before
1433 * referring it
1434 */
1435 if (!peer->self_ast_entry) {
1436 QDF_TRACE(QDF_MODULE_ID_DP,
1437 QDF_TRACE_LEVEL_INFO_HIGH,
1438 "Add self ast from map %pM",
1439 peer_mac_addr);
1440 dp_peer_add_ast(soc, peer,
1441 peer_mac_addr,
1442 type, 0);
1443 }
1444
sumedh baikady68450ab2018-03-23 18:36:29 -07001445 }
Anish Nataraj0dae6762018-03-02 22:31:45 +05301446 }
1447
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301448 dp_peer_map_ast(soc, peer, peer_mac_addr,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301449 hw_peer_id, vdev_id, ast_hash);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001450}
1451
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301452/**
1453 * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
1454 * @soc_handle - genereic soc handle
1455 * @peeri_id - peer_id from firmware
1456 * @vdev_id - vdev ID
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301457 * @mac_addr - mac address of the peer or wds entry
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301458 * @is_wds - flag to indicate peer map event for WDS ast entry
1459 *
1460 * Return: none
1461 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001462void
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301463dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301464 uint8_t vdev_id, uint8_t *mac_addr,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05301465 uint8_t is_wds)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001466{
1467 struct dp_peer *peer;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301468 struct dp_ast_entry *ast_entry;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001469 struct dp_soc *soc = (struct dp_soc *)soc_handle;
1470 uint8_t i;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301471
1472 peer = __dp_peer_find_by_id(soc, peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001473
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001474 /*
1475 * Currently peer IDs are assigned for vdevs as well as peers.
1476 * If the peer ID is for a vdev, then the peer pointer stored
1477 * in peer_id_to_obj_map will be NULL.
1478 */
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301479 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301480 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1481 "%s: Received unmap event for invalid peer_id %u",
1482 __func__, peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001483 return;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301484 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001485
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301486 /* If V2 Peer map messages are enabled AST entry has to be freed here
1487 */
1488 if (soc->is_peer_map_unmap_v2) {
1489
1490 qdf_spin_lock_bh(&soc->ast_lock);
1491 ast_entry = dp_peer_ast_list_find(soc, peer,
1492 mac_addr);
1493
1494 if (!ast_entry) {
1495 /* in case of qwrap we have multiple BSS peers
1496 * with same mac address
1497 *
1498 * AST entry for this mac address will be created
1499 * only for one peer
1500 */
1501 if (peer->vdev->proxysta_vdev) {
1502 qdf_spin_unlock_bh(&soc->ast_lock);
1503 goto peer_unmap;
1504 }
1505
1506 /* Ideally we should not enter this case where
1507 * ast_entry is not present in host table and
1508 * we received a unmap event
1509 */
1510 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
1511 "%s:%d AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u\n",
1512 __func__, __LINE__, peer, peer->peer_ids[0],
1513 peer->mac_addr.raw, mac_addr, vdev_id,
1514 is_wds);
1515
Chaithanya Garrepalli9ff4c542019-01-07 23:03:09 +05301516 qdf_spin_unlock_bh(&soc->ast_lock);
1517
1518 if (!is_wds)
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301519 goto peer_unmap;
Chaithanya Garrepalli9ff4c542019-01-07 23:03:09 +05301520
1521 return;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301522 }
1523 qdf_spin_unlock_bh(&soc->ast_lock);
1524
1525 /* Reuse the AST entry if delete_in_progress
1526 * not set
1527 */
1528 if (ast_entry->delete_in_progress)
1529 dp_peer_ast_free_entry(soc, ast_entry);
1530
1531 if (is_wds)
1532 return;
1533 }
1534
1535peer_unmap:
1536 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1537 "peer_unmap_event (soc:%pK) peer_id %d peer %pK",
1538 soc, peer_id, peer);
1539
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001540 soc->peer_id_to_obj_map[peer_id] = NULL;
1541 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1542 if (peer->peer_ids[i] == peer_id) {
1543 peer->peer_ids[i] = HTT_INVALID_PEER;
1544 break;
1545 }
1546 }
1547
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301548 if (soc->cdp_soc.ol_ops->peer_unmap_event) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05301549 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
Subhranil Choudhury9bcfecf2019-02-28 13:41:45 +05301550 peer_id, vdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301551 }
1552
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001553 /*
1554 * Remove a reference to the peer.
1555 * If there are no more references, delete the peer object.
1556 */
1557 dp_peer_unref_delete(peer);
1558}
1559
1560void
1561dp_peer_find_detach(struct dp_soc *soc)
1562{
1563 dp_peer_find_map_detach(soc);
1564 dp_peer_find_hash_detach(soc);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301565 dp_peer_ast_hash_detach(soc);
Tallapragada Kalyanc7413082019-03-07 21:22:10 +05301566 dp_peer_ast_table_detach(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001567}
1568
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001569static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1570 union hal_reo_status *reo_status)
1571{
1572 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001573
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001574 if ((reo_status->rx_queue_status.header.status !=
1575 HAL_REO_CMD_SUCCESS) &&
1576 (reo_status->rx_queue_status.header.status !=
1577 HAL_REO_CMD_DRAIN)) {
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001578 /* Should not happen normally. Just print error for now */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301579 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1580 "%s: Rx tid HW desc update failed(%d): tid %d",
1581 __func__,
1582 reo_status->rx_queue_status.header.status,
1583 rx_tid->tid);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001584 }
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001585}
1586
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001587/*
Leo Chang5ea93a42016-11-03 12:39:49 -07001588 * dp_find_peer_by_addr - find peer instance by mac address
1589 * @dev: physical device instance
1590 * @peer_mac_addr: peer mac address
1591 * @local_id: local id for the peer
1592 *
1593 * Return: peer instance pointer
1594 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001595void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
Leo Chang5ea93a42016-11-03 12:39:49 -07001596 uint8_t *local_id)
1597{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001598 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07001599 struct dp_peer *peer;
1600
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05301601 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05301602
Leo Chang5ea93a42016-11-03 12:39:49 -07001603 if (!peer)
1604 return NULL;
1605
1606 /* Multiple peer ids? How can know peer id? */
1607 *local_id = peer->local_id;
Krunal Sonic96a1162019-02-21 11:33:26 -08001608 dp_verbose_debug("peer %pK id %d", peer, *local_id);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001609
1610 /* ref_cnt is incremented inside dp_peer_find_hash_find().
1611 * Decrement it here.
1612 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05301613 dp_peer_unref_delete(peer);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001614
Leo Chang5ea93a42016-11-03 12:39:49 -07001615 return peer;
1616}
1617
1618/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001619 * dp_rx_tid_update_wifi3() – Update receive TID state
1620 * @peer: Datapath peer handle
1621 * @tid: TID
1622 * @ba_window_size: BlockAck window size
1623 * @start_seq: Starting sequence number
1624 *
1625 * Return: 0 on success, error code on failure
1626 */
Jeff Johnson416168b2017-01-06 09:42:43 -08001627static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1628 ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001629{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001630 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1631 struct dp_soc *soc = peer->vdev->pdev->soc;
1632 struct hal_reo_cmd_params params;
1633
1634 qdf_mem_zero(&params, sizeof(params));
1635
1636 params.std.need_status = 1;
1637 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1638 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1639 params.u.upd_queue_params.update_ba_window_size = 1;
1640 params.u.upd_queue_params.ba_window_size = ba_window_size;
1641
1642 if (start_seq < IEEE80211_SEQ_MAX) {
1643 params.u.upd_queue_params.update_ssn = 1;
1644 params.u.upd_queue_params.ssn = start_seq;
1645 }
1646
Pramod Simhab17d0672017-03-06 17:20:13 -08001647 dp_set_ssn_valid_flag(&params, 0);
1648
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001649 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001650
1651 rx_tid->ba_win_size = ba_window_size;
Gyanranjan Hazarika7f9c0502018-07-25 23:26:16 -07001652 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1653 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1654 peer->vdev->pdev->ctrl_pdev,
1655 peer->vdev->vdev_id, peer->mac_addr.raw,
1656 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1657
1658 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001659 return 0;
1660}
1661
1662/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001663 * dp_reo_desc_free() - Callback free reo descriptor memory after
1664 * HW cache flush
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001665 *
1666 * @soc: DP SOC handle
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001667 * @cb_ctxt: Callback context
1668 * @reo_status: REO command status
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001669 */
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001670static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1671 union hal_reo_status *reo_status)
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001672{
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001673 struct reo_desc_list_node *freedesc =
1674 (struct reo_desc_list_node *)cb_ctxt;
1675 struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001676
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001677 if ((reo_status->fl_cache_status.header.status !=
1678 HAL_REO_CMD_SUCCESS) &&
1679 (reo_status->fl_cache_status.header.status !=
1680 HAL_REO_CMD_DRAIN)) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301681 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1682 "%s: Rx tid HW desc flush failed(%d): tid %d",
1683 __func__,
1684 reo_status->rx_queue_status.header.status,
1685 freedesc->rx_tid.tid);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001686 }
chenguo8df4d462018-12-19 16:33:14 +08001687 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1688 "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
1689 (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001690 qdf_mem_unmap_nbytes_single(soc->osdev,
1691 rx_tid->hw_qdesc_paddr,
1692 QDF_DMA_BIDIRECTIONAL,
1693 rx_tid->hw_qdesc_alloc_size);
1694 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1695 qdf_mem_free(freedesc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001696}
1697
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001698#if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1699/* Hawkeye emulation requires bus address to be >= 0x50000000 */
1700static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1701{
1702 if (dma_addr < 0x50000000)
1703 return QDF_STATUS_E_FAILURE;
1704 else
1705 return QDF_STATUS_SUCCESS;
1706}
1707#else
1708static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1709{
1710 return QDF_STATUS_SUCCESS;
1711}
1712#endif
1713
1714
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001715/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001716 * dp_rx_tid_setup_wifi3() – Setup receive TID state
1717 * @peer: Datapath peer handle
1718 * @tid: TID
1719 * @ba_window_size: BlockAck window size
1720 * @start_seq: Starting sequence number
1721 *
1722 * Return: 0 on success, error code on failure
1723 */
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001724int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001725 uint32_t ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001726{
1727 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1728 struct dp_vdev *vdev = peer->vdev;
1729 struct dp_soc *soc = vdev->pdev->soc;
1730 uint32_t hw_qdesc_size;
1731 uint32_t hw_qdesc_align;
1732 int hal_pn_type;
1733 void *hw_qdesc_vaddr;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001734 uint32_t alloc_tries = 0;
nobeljfdfe7ea2018-06-19 18:08:25 -07001735 int err = QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001736
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05301737 if (peer->delete_in_progress ||
1738 !qdf_atomic_read(&peer->is_default_route_set))
Karunakar Dasineni372647d2018-01-15 22:27:39 -08001739 return QDF_STATUS_E_FAILURE;
1740
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001741 rx_tid->ba_win_size = ba_window_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001742 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1743 return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1744 start_seq);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001745 rx_tid->delba_tx_status = 0;
1746 rx_tid->ppdu_id_2k = 0;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001747 rx_tid->num_of_addba_req = 0;
1748 rx_tid->num_of_delba_req = 0;
1749 rx_tid->num_of_addba_resp = 0;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001750 rx_tid->num_addba_rsp_failed = 0;
1751 rx_tid->num_addba_rsp_success = 0;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001752 rx_tid->delba_tx_success_cnt = 0;
1753 rx_tid->delba_tx_fail_cnt = 0;
1754 rx_tid->statuscode = 0;
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001755
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001756 /* TODO: Allocating HW queue descriptors based on max BA window size
1757 * for all QOS TIDs so that same descriptor can be used later when
1758 * ADDBA request is recevied. This should be changed to allocate HW
1759 * queue descriptors based on BA window size being negotiated (0 for
1760 * non BA cases), and reallocate when BA window size changes and also
1761 * send WMI message to FW to change the REO queue descriptor in Rx
1762 * peer entry as part of dp_rx_tid_update.
1763 */
1764 if (tid != DP_NON_QOS_TID)
1765 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001766 HAL_RX_MAX_BA_WINDOW, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001767 else
1768 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001769 ba_window_size, tid);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001770
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001771 hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1772 /* To avoid unnecessary extra allocation for alignment, try allocating
1773 * exact size and see if we already have aligned address.
1774 */
1775 rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001776
1777try_desc_alloc:
1778 rx_tid->hw_qdesc_vaddr_unaligned =
1779 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001780
1781 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301782 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1783 "%s: Rx tid HW desc alloc failed: tid %d",
1784 __func__, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001785 return QDF_STATUS_E_NOMEM;
1786 }
1787
1788 if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1789 hw_qdesc_align) {
1790 /* Address allocated above is not alinged. Allocate extra
1791 * memory for alignment
1792 */
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001793 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001794 rx_tid->hw_qdesc_vaddr_unaligned =
Pramod Simha6b23f752017-03-30 11:54:18 -07001795 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1796 hw_qdesc_align - 1);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001797
1798 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301799 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1800 "%s: Rx tid HW desc alloc failed: tid %d",
1801 __func__, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001802 return QDF_STATUS_E_NOMEM;
1803 }
1804
Pramod Simha6b23f752017-03-30 11:54:18 -07001805 hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1806 rx_tid->hw_qdesc_vaddr_unaligned,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001807 hw_qdesc_align);
Pramod Simha6b23f752017-03-30 11:54:18 -07001808
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301809 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1810 "%s: Total Size %d Aligned Addr %pK",
1811 __func__, rx_tid->hw_qdesc_alloc_size,
1812 hw_qdesc_vaddr);
Pramod Simha6b23f752017-03-30 11:54:18 -07001813
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001814 } else {
1815 hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001816 }
1817
1818 /* TODO: Ensure that sec_type is set before ADDBA is received.
1819 * Currently this is set based on htt indication
1820 * HTT_T2H_MSG_TYPE_SEC_IND from target
1821 */
1822 switch (peer->security[dp_sec_ucast].sec_type) {
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301823 case cdp_sec_type_tkip_nomic:
1824 case cdp_sec_type_aes_ccmp:
1825 case cdp_sec_type_aes_ccmp_256:
1826 case cdp_sec_type_aes_gcmp:
1827 case cdp_sec_type_aes_gcmp_256:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001828 hal_pn_type = HAL_PN_WPA;
1829 break;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301830 case cdp_sec_type_wapi:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001831 if (vdev->opmode == wlan_op_mode_ap)
1832 hal_pn_type = HAL_PN_WAPI_EVEN;
1833 else
1834 hal_pn_type = HAL_PN_WAPI_UNEVEN;
1835 break;
1836 default:
1837 hal_pn_type = HAL_PN_NONE;
1838 break;
1839 }
1840
1841 hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1842 hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1843
Pramod Simha6b23f752017-03-30 11:54:18 -07001844 qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001845 QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
Pramod Simha6b23f752017-03-30 11:54:18 -07001846 &(rx_tid->hw_qdesc_paddr));
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001847
Pramod Simha6b23f752017-03-30 11:54:18 -07001848 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001849 QDF_STATUS_SUCCESS) {
nobeljfdfe7ea2018-06-19 18:08:25 -07001850 if (alloc_tries++ < 10) {
1851 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1852 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001853 goto try_desc_alloc;
nobeljfdfe7ea2018-06-19 18:08:25 -07001854 } else {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301855 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1856 "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
1857 __func__, tid);
nobeljfdfe7ea2018-06-19 18:08:25 -07001858 err = QDF_STATUS_E_NOMEM;
1859 goto error;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001860 }
1861 }
1862
Leo Chang5ea93a42016-11-03 12:39:49 -07001863 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
nobeljfdfe7ea2018-06-19 18:08:25 -07001864 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1865 vdev->pdev->ctrl_pdev, peer->vdev->vdev_id,
1866 peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1867 1, ba_window_size)) {
1868 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1869 "%s: Failed to send reo queue setup to FW - tid %d\n",
1870 __func__, tid);
1871 err = QDF_STATUS_E_FAILURE;
1872 goto error;
1873 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001874 }
1875 return 0;
nobeljfdfe7ea2018-06-19 18:08:25 -07001876error:
1877 if (NULL != rx_tid->hw_qdesc_vaddr_unaligned) {
1878 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
1879 QDF_STATUS_SUCCESS)
1880 qdf_mem_unmap_nbytes_single(
1881 soc->osdev,
1882 rx_tid->hw_qdesc_paddr,
1883 QDF_DMA_BIDIRECTIONAL,
1884 rx_tid->hw_qdesc_alloc_size);
1885 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1886 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1887 }
1888 return err;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001889}
1890
1891/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001892 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1893 * after deleting the entries (ie., setting valid=0)
1894 *
1895 * @soc: DP SOC handle
1896 * @cb_ctxt: Callback context
1897 * @reo_status: REO command status
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001898 */
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001899static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1900 union hal_reo_status *reo_status)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001901{
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001902 struct reo_desc_list_node *freedesc =
1903 (struct reo_desc_list_node *)cb_ctxt;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001904 uint32_t list_size;
1905 struct reo_desc_list_node *desc;
1906 unsigned long curr_ts = qdf_get_system_timestamp();
1907 uint32_t desc_size, tot_desc_size;
1908 struct hal_reo_cmd_params params;
1909
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001910 if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1911 qdf_mem_zero(reo_status, sizeof(*reo_status));
1912 reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1913 dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1914 return;
1915 } else if (reo_status->rx_queue_status.header.status !=
1916 HAL_REO_CMD_SUCCESS) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001917 /* Should not happen normally. Just print error for now */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301918 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1919 "%s: Rx tid HW desc deletion failed(%d): tid %d",
1920 __func__,
1921 reo_status->rx_queue_status.header.status,
1922 freedesc->rx_tid.tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001923 }
1924
Houston Hoffman41b912c2017-08-30 14:27:51 -07001925 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Aditya Sathishded018e2018-07-02 16:25:21 +05301926 "%s: rx_tid: %d status: %d", __func__,
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001927 freedesc->rx_tid.tid,
1928 reo_status->rx_queue_status.header.status);
Krishna Kumaar Natarajan1741dc42017-01-26 19:24:48 -08001929
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001930 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1931 freedesc->free_ts = curr_ts;
1932 qdf_list_insert_back_size(&soc->reo_desc_freelist,
1933 (qdf_list_node_t *)freedesc, &list_size);
1934
1935 while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1936 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1937 ((list_size >= REO_DESC_FREELIST_SIZE) ||
1938 ((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1939 struct dp_rx_tid *rx_tid;
1940
1941 qdf_list_remove_front(&soc->reo_desc_freelist,
1942 (qdf_list_node_t **)&desc);
1943 list_size--;
1944 rx_tid = &desc->rx_tid;
1945
1946 /* Flush and invalidate REO descriptor from HW cache: Base and
1947 * extension descriptors should be flushed separately */
Karunakar Dasineni26ebbe42018-05-31 07:59:10 -07001948 tot_desc_size = rx_tid->hw_qdesc_alloc_size;
1949 /* Get base descriptor size by passing non-qos TID */
1950 desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
1951 DP_NON_QOS_TID);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001952
1953 /* Flush reo extension descriptors */
1954 while ((tot_desc_size -= desc_size) > 0) {
1955 qdf_mem_zero(&params, sizeof(params));
1956 params.std.addr_lo =
1957 ((uint64_t)(rx_tid->hw_qdesc_paddr) +
1958 tot_desc_size) & 0xffffffff;
1959 params.std.addr_hi =
1960 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1961
1962 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1963 CMD_FLUSH_CACHE,
1964 &params,
1965 NULL,
1966 NULL)) {
1967 QDF_TRACE(QDF_MODULE_ID_DP,
1968 QDF_TRACE_LEVEL_ERROR,
1969 "%s: fail to send CMD_CACHE_FLUSH:"
Aditya Sathishded018e2018-07-02 16:25:21 +05301970 "tid %d desc %pK", __func__,
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001971 rx_tid->tid,
1972 (void *)(rx_tid->hw_qdesc_paddr));
1973 }
1974 }
1975
1976 /* Flush base descriptor */
1977 qdf_mem_zero(&params, sizeof(params));
1978 params.std.need_status = 1;
1979 params.std.addr_lo =
1980 (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1981 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1982
1983 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1984 CMD_FLUSH_CACHE,
1985 &params,
1986 dp_reo_desc_free,
1987 (void *)desc)) {
1988 union hal_reo_status reo_status;
1989 /*
1990 * If dp_reo_send_cmd return failure, related TID queue desc
1991 * should be unmapped. Also locally reo_desc, together with
1992 * TID queue desc also need to be freed accordingly.
1993 *
1994 * Here invoke desc_free function directly to do clean up.
1995 */
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05301996 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1997 "%s: fail to send REO cmd to flush cache: tid %d",
1998 __func__, rx_tid->tid);
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001999 qdf_mem_zero(&reo_status, sizeof(reo_status));
2000 reo_status.fl_cache_status.header.status = 0;
2001 dp_reo_desc_free(soc, (void *)desc, &reo_status);
2002 }
2003 }
2004 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002005}
2006
2007/*
2008 * dp_rx_tid_delete_wifi3() – Delete receive TID queue
2009 * @peer: Datapath peer handle
2010 * @tid: TID
2011 *
2012 * Return: 0 on success, error code on failure
2013 */
Jeff Johnson416168b2017-01-06 09:42:43 -08002014static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002015{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002016 struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
2017 struct dp_soc *soc = peer->vdev->pdev->soc;
2018 struct hal_reo_cmd_params params;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002019 struct reo_desc_list_node *freedesc =
2020 qdf_mem_malloc(sizeof(*freedesc));
Lin Baifca76402017-12-11 15:03:49 +08002021
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002022 if (!freedesc) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302023 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2024 "%s: malloc failed for freedesc: tid %d",
2025 __func__, tid);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08002026 return -ENOMEM;
2027 }
2028
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002029 freedesc->rx_tid = *rx_tid;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002030
2031 qdf_mem_zero(&params, sizeof(params));
2032
Karunakar Dasineni6a526752018-08-02 08:56:19 -07002033 params.std.need_status = 1;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002034 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
2035 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2036 params.u.upd_queue_params.update_vld = 1;
2037 params.u.upd_queue_params.vld = 0;
2038
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002039 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2040 dp_rx_tid_delete_cb, (void *)freedesc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002041
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08002042 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
2043 rx_tid->hw_qdesc_alloc_size = 0;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002044 rx_tid->hw_qdesc_paddr = 0;
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08002045
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002046 return 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002047}
2048
Pramod Simhab17d0672017-03-06 17:20:13 -08002049#ifdef DP_LFR
2050static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
2051{
2052 int tid;
2053
2054 for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
2055 dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302056 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2057 "Setting up TID %d for peer %pK peer->local_id %d",
2058 tid, peer, peer->local_id);
Pramod Simhab17d0672017-03-06 17:20:13 -08002059 }
2060}
2061#else
2062static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
2063#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002064/*
2065 * dp_peer_rx_init() – Initialize receive TID state
2066 * @pdev: Datapath pdev
2067 * @peer: Datapath peer
2068 *
2069 */
2070void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
2071{
2072 int tid;
2073 struct dp_rx_tid *rx_tid;
2074 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2075 rx_tid = &peer->rx_tid[tid];
2076 rx_tid->array = &rx_tid->base;
2077 rx_tid->base.head = rx_tid->base.tail = NULL;
2078 rx_tid->tid = tid;
2079 rx_tid->defrag_timeout_ms = 0;
2080 rx_tid->ba_win_size = 0;
2081 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2082
2083 rx_tid->defrag_waitlist_elem.tqe_next = NULL;
2084 rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002085 }
2086
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002087 peer->active_ba_session_cnt = 0;
2088 peer->hw_buffer_size = 0;
2089 peer->kill_256_sessions = 0;
2090
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002091 /* Setup default (non-qos) rx tid queue */
2092 dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002093
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08002094 /* Setup rx tid queue for TID 0.
2095 * Other queues will be setup on receiving first packet, which will cause
2096 * NULL REO queue error
2097 */
2098 dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
2099
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002100 /*
Pramod Simhab17d0672017-03-06 17:20:13 -08002101 * Setup the rest of TID's to handle LFR
2102 */
2103 dp_peer_setup_remaining_tids(peer);
2104
2105 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002106 * Set security defaults: no PN check, no security. The target may
2107 * send a HTT SEC_IND message to overwrite these defaults.
2108 */
2109 peer->security[dp_sec_ucast].sec_type =
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302110 peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002111}
2112
2113/*
2114 * dp_peer_rx_cleanup() – Cleanup receive TID state
2115 * @vdev: Datapath vdev
2116 * @peer: Datapath peer
2117 *
2118 */
2119void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2120{
2121 int tid;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002122 uint32_t tid_delete_mask = 0;
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07002123
2124 DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002125 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002126 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2127
2128 qdf_spin_lock_bh(&rx_tid->tid_lock);
Karunakar Dasinenif8ec0cb2019-01-29 13:07:05 -08002129 if (!peer->bss_peer) {
Lin Baif1c577e2018-05-22 20:45:42 +08002130 /* Cleanup defrag related resource */
2131 dp_rx_defrag_waitlist_remove(peer, tid);
2132 dp_rx_reorder_flush_frag(peer, tid);
Karunakar Dasinenif8ec0cb2019-01-29 13:07:05 -08002133 }
2134
2135 if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2136 dp_rx_tid_delete_wifi3(peer, tid);
Lin Baif1c577e2018-05-22 20:45:42 +08002137
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002138 tid_delete_mask |= (1 << tid);
2139 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002140 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002141 }
2142#ifdef notyet /* See if FW can remove queues as part of peer cleanup */
2143 if (soc->ol_ops->peer_rx_reorder_queue_remove) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05302144 soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002145 peer->vdev->vdev_id, peer->mac_addr.raw,
2146 tid_delete_mask);
2147 }
2148#endif
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002149 for (tid = 0; tid < DP_MAX_TIDS; tid++)
2150 qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002151}
2152
2153/*
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002154 * dp_peer_cleanup() – Cleanup peer information
2155 * @vdev: Datapath vdev
2156 * @peer: Datapath peer
2157 *
2158 */
2159void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
2160{
2161 peer->last_assoc_rcvd = 0;
2162 peer->last_disassoc_rcvd = 0;
2163 peer->last_deauth_rcvd = 0;
2164
2165 /* cleanup the Rx reorder queues for this peer */
2166 dp_peer_rx_cleanup(vdev, peer);
2167}
2168
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002169/* dp_teardown_256_ba_session() - Teardown sessions using 256
2170 * window size when a request with
2171 * 64 window size is received.
2172 * This is done as a WAR since HW can
2173 * have only one setting per peer (64 or 256).
sumedh baikady61cbe852018-10-09 11:04:34 -07002174 * For HKv2, we use per tid buffersize setting
2175 * for 0 to per_tid_basize_max_tid. For tid
2176 * more than per_tid_basize_max_tid we use HKv1
2177 * method.
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002178 * @peer: Datapath peer
2179 *
2180 * Return: void
2181 */
2182static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
2183{
2184 uint8_t delba_rcode = 0;
2185 int tid;
2186 struct dp_rx_tid *rx_tid = NULL;
2187
sumedh baikady61cbe852018-10-09 11:04:34 -07002188 tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
2189 for (; tid < DP_MAX_TIDS; tid++) {
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002190 rx_tid = &peer->rx_tid[tid];
2191 qdf_spin_lock_bh(&rx_tid->tid_lock);
2192
2193 if (rx_tid->ba_win_size <= 64) {
2194 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2195 continue;
2196 } else {
2197 if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
2198 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2199 /* send delba */
2200 if (!rx_tid->delba_tx_status) {
2201 rx_tid->delba_tx_retry++;
2202 rx_tid->delba_tx_status = 1;
2203 rx_tid->delba_rcode =
2204 IEEE80211_REASON_QOS_SETUP_REQUIRED;
2205 delba_rcode = rx_tid->delba_rcode;
2206
2207 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2208 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2209 peer->vdev->pdev->ctrl_pdev,
2210 peer->ctrl_peer,
2211 peer->mac_addr.raw,
2212 tid, peer->vdev->ctrl_vdev,
2213 delba_rcode);
2214 } else {
2215 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2216 }
2217 } else {
2218 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2219 }
2220 }
2221 }
2222}
2223
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002224/*
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002225* dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002226*
2227* @peer: Datapath peer handle
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002228* @tid: TID number
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002229* @status: tx completion status
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002230* Return: 0 on success, error code on failure
2231*/
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002232int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
2233 uint8_t tid, int status)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002234{
2235 struct dp_peer *peer = (struct dp_peer *)peer_handle;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002236 struct dp_rx_tid *rx_tid = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002237
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002238 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302239 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002240 "%s: Peer is NULL!\n", __func__);
2241 return QDF_STATUS_E_FAILURE;
2242 }
2243 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002244 qdf_spin_lock_bh(&rx_tid->tid_lock);
2245 if (status) {
2246 rx_tid->num_addba_rsp_failed++;
2247 dp_rx_tid_update_wifi3(peer, tid, 1, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002248 rx_tid->ba_status = DP_RX_BA_INACTIVE;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002249 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302250 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002251 "%s: Rx Tid- %d addba rsp tx completion failed!",
2252 __func__, tid);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002253 return QDF_STATUS_SUCCESS;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002254 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002255
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002256 rx_tid->num_addba_rsp_success++;
2257 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
2258 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302259 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002260 "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
2261 __func__, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002262 return QDF_STATUS_E_FAILURE;
2263 }
2264
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05302265 if (!qdf_atomic_read(&peer->is_default_route_set)) {
2266 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302267 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05302268 "%s: default route is not set for peer: %pM",
2269 __func__, peer->mac_addr.raw);
2270 return QDF_STATUS_E_FAILURE;
2271 }
2272
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002273 /* First Session */
2274 if (peer->active_ba_session_cnt == 0) {
2275 if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
2276 peer->hw_buffer_size = 256;
2277 else
2278 peer->hw_buffer_size = 64;
2279 }
2280
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002281 rx_tid->ba_status = DP_RX_BA_ACTIVE;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002282
2283 peer->active_ba_session_cnt++;
2284
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002285 qdf_spin_unlock_bh(&rx_tid->tid_lock);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002286
2287 /* Kill any session having 256 buffer size
2288 * when 64 buffer size request is received.
2289 * Also, latch on to 64 as new buffer size.
2290 */
2291 if (peer->kill_256_sessions) {
2292 dp_teardown_256_ba_sessions(peer);
2293 peer->kill_256_sessions = 0;
2294 }
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002295 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002296}
2297
2298/*
2299* dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
2300*
2301* @peer: Datapath peer handle
2302* @tid: TID number
2303* @dialogtoken: output dialogtoken
2304* @statuscode: output dialogtoken
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002305* @buffersize: Output BA window size
2306* @batimeout: Output BA timeout
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002307*/
2308void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
2309 uint8_t *dialogtoken, uint16_t *statuscode,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002310 uint16_t *buffersize, uint16_t *batimeout)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002311{
2312 struct dp_peer *peer = (struct dp_peer *)peer_handle;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002313 struct dp_rx_tid *rx_tid = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002314
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002315 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302316 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002317 "%s: Peer is NULL!\n", __func__);
2318 return;
2319 }
2320 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002321 qdf_spin_lock_bh(&rx_tid->tid_lock);
sumedh baikadye3947bd2017-11-29 19:19:25 -08002322 rx_tid->num_of_addba_resp++;
Jeff Johnson97a1cc52018-05-06 15:28:56 -07002323 /* setup ADDBA response parameters */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002324 *dialogtoken = rx_tid->dialogtoken;
2325 *statuscode = rx_tid->statuscode;
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002326 *buffersize = rx_tid->ba_win_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002327 *batimeout = 0;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002328 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2329}
2330
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002331/* dp_check_ba_buffersize() - Check buffer size in request
2332 * and latch onto this size based on
2333 * size used in first active session.
2334 * @peer: Datapath peer
2335 * @tid: Tid
2336 * @buffersize: Block ack window size
2337 *
2338 * Return: void
2339 */
2340static void dp_check_ba_buffersize(struct dp_peer *peer,
2341 uint16_t tid,
2342 uint16_t buffersize)
2343{
2344 struct dp_rx_tid *rx_tid = NULL;
2345
2346 rx_tid = &peer->rx_tid[tid];
sumedh baikady61cbe852018-10-09 11:04:34 -07002347 if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
2348 tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002349 rx_tid->ba_win_size = buffersize;
sumedh baikady61cbe852018-10-09 11:04:34 -07002350 return;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002351 } else {
sumedh baikady61cbe852018-10-09 11:04:34 -07002352 if (peer->active_ba_session_cnt == 0) {
2353 rx_tid->ba_win_size = buffersize;
2354 } else {
2355 if (peer->hw_buffer_size == 64) {
2356 if (buffersize <= 64)
2357 rx_tid->ba_win_size = buffersize;
2358 else
2359 rx_tid->ba_win_size = peer->hw_buffer_size;
2360 } else if (peer->hw_buffer_size == 256) {
2361 if (buffersize > 64) {
2362 rx_tid->ba_win_size = buffersize;
2363 } else {
2364 rx_tid->ba_win_size = buffersize;
2365 peer->hw_buffer_size = 64;
2366 peer->kill_256_sessions = 1;
2367 }
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002368 }
2369 }
2370 }
2371}
2372
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002373/*
2374 * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
2375 *
2376 * @peer: Datapath peer handle
2377 * @dialogtoken: dialogtoken from ADDBA frame
2378 * @tid: TID number
2379 * @batimeout: BA timeout
2380 * @buffersize: BA window size
2381 * @startseqnum: Start seq. number received in BA sequence control
2382 *
2383 * Return: 0 on success, error code on failure
2384 */
2385int dp_addba_requestprocess_wifi3(void *peer_handle,
2386 uint8_t dialogtoken,
2387 uint16_t tid, uint16_t batimeout,
2388 uint16_t buffersize,
2389 uint16_t startseqnum)
2390{
2391 struct dp_peer *peer = (struct dp_peer *)peer_handle;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002392 struct dp_rx_tid *rx_tid = NULL;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002393
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002394 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302395 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002396 "%s: Peer is NULL!\n", __func__);
2397 return QDF_STATUS_E_FAILURE;
2398 }
2399 rx_tid = &peer->rx_tid[tid];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002400 qdf_spin_lock_bh(&rx_tid->tid_lock);
2401 rx_tid->num_of_addba_req++;
2402 if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
sumedh baikady6d66c7e2019-02-28 15:20:18 -08002403 rx_tid->hw_qdesc_vaddr_unaligned)) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002404 dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2405 rx_tid->ba_status = DP_RX_BA_INACTIVE;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002406 peer->active_ba_session_cnt--;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002407 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302408 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002409 "%s: Rx Tid- %d hw qdesc is already setup",
2410 __func__, tid);
2411 return QDF_STATUS_E_FAILURE;
2412 }
2413
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002414 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2415 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2416 return QDF_STATUS_E_FAILURE;
2417 }
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002418 dp_check_ba_buffersize(peer, tid, buffersize);
2419
sumedh baikady61cbe852018-10-09 11:04:34 -07002420 if (dp_rx_tid_setup_wifi3(peer, tid,
2421 rx_tid->ba_win_size, startseqnum)) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002422 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2423 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2424 return QDF_STATUS_E_FAILURE;
2425 }
2426 rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
2427
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002428 rx_tid->dialogtoken = dialogtoken;
2429 rx_tid->startseqnum = startseqnum;
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002430
2431 if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
2432 rx_tid->statuscode = rx_tid->userstatuscode;
2433 else
2434 rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
2435
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002436 qdf_spin_unlock_bh(&rx_tid->tid_lock);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002437
2438 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002439}
2440
2441/*
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002442* dp_set_addba_response() – Set a user defined ADDBA response status code
2443*
2444* @peer: Datapath peer handle
2445* @tid: TID number
2446* @statuscode: response status code to be set
2447*/
2448void dp_set_addba_response(void *peer_handle, uint8_t tid,
2449 uint16_t statuscode)
2450{
2451 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2452 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2453
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002454 qdf_spin_lock_bh(&rx_tid->tid_lock);
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002455 rx_tid->userstatuscode = statuscode;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002456 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002457}
2458
2459/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002460* dp_rx_delba_process_wifi3() – Process DELBA from peer
2461* @peer: Datapath peer handle
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002462* @tid: TID number
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002463* @reasoncode: Reason code received in DELBA frame
2464*
2465* Return: 0 on success, error code on failure
2466*/
2467int dp_delba_process_wifi3(void *peer_handle,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07002468 int tid, uint16_t reasoncode)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002469{
2470 struct dp_peer *peer = (struct dp_peer *)peer_handle;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002471 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2472
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002473 qdf_spin_lock_bh(&rx_tid->tid_lock);
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002474 if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
2475 rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002476 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002477 return QDF_STATUS_E_FAILURE;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002478 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002479 /* TODO: See if we can delete the existing REO queue descriptor and
2480 * replace with a new one without queue extenstion descript to save
2481 * memory
2482 */
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002483 rx_tid->delba_rcode = reasoncode;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002484 rx_tid->num_of_delba_req++;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08002485 dp_rx_tid_update_wifi3(peer, tid, 1, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002486
2487 rx_tid->ba_status = DP_RX_BA_INACTIVE;
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002488 peer->active_ba_session_cnt--;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002489 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002490 return 0;
2491}
2492
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002493/*
2494 * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
2495 *
2496 * @peer: Datapath peer handle
2497 * @tid: TID number
2498 * @status: tx completion status
2499 * Return: 0 on success, error code on failure
2500 */
2501
2502int dp_delba_tx_completion_wifi3(void *peer_handle,
2503 uint8_t tid, int status)
2504{
2505 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2506 struct dp_rx_tid *rx_tid = NULL;
2507
2508 if (!peer || peer->delete_in_progress) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302509 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002510 "%s: Peer is NULL!", __func__);
2511 return QDF_STATUS_E_FAILURE;
2512 }
2513 rx_tid = &peer->rx_tid[tid];
2514 qdf_spin_lock_bh(&rx_tid->tid_lock);
2515 if (status) {
2516 rx_tid->delba_tx_fail_cnt++;
2517 if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
2518 rx_tid->delba_tx_retry = 0;
2519 rx_tid->delba_tx_status = 0;
2520 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2521 } else {
2522 rx_tid->delba_tx_retry++;
2523 rx_tid->delba_tx_status = 1;
2524 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2525 peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
2526 peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002527 peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
2528 rx_tid->delba_rcode);
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002529 }
2530 return QDF_STATUS_SUCCESS;
2531 } else {
2532 rx_tid->delba_tx_success_cnt++;
2533 rx_tid->delba_tx_retry = 0;
2534 rx_tid->delba_tx_status = 0;
2535 }
sumedh baikadyfaadbb62018-08-21 21:13:42 -07002536 if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
2537 dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2538 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2539 peer->active_ba_session_cnt--;
2540 }
2541 if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
2542 dp_rx_tid_update_wifi3(peer, tid, 1, 0);
2543 rx_tid->ba_status = DP_RX_BA_INACTIVE;
2544 }
sumedh baikadydf4a57c2018-04-08 22:19:22 -07002545 qdf_spin_unlock_bh(&rx_tid->tid_lock);
2546
2547 return QDF_STATUS_SUCCESS;
2548}
2549
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002550void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
2551 qdf_nbuf_t msdu_list)
2552{
2553 while (msdu_list) {
2554 qdf_nbuf_t msdu = msdu_list;
2555
2556 msdu_list = qdf_nbuf_next(msdu_list);
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302557 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2558 "discard rx %pK from partly-deleted peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
2559 msdu, peer,
2560 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2561 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2562 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002563 qdf_nbuf_free(msdu);
2564 }
2565}
2566
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302567
2568/**
2569 * dp_set_pn_check_wifi3() - enable PN check in REO for security
2570 * @peer: Datapath peer handle
2571 * @vdev: Datapath vdev
2572 * @pdev - data path device instance
2573 * @sec_type - security type
2574 * @rx_pn - Receive pn starting number
2575 *
2576 */
2577
2578void
2579dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
2580{
2581 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2582 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
2583 struct dp_pdev *pdev;
2584 struct dp_soc *soc;
2585 int i;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002586 uint8_t pn_size;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302587 struct hal_reo_cmd_params params;
2588
2589 /* preconditions */
2590 qdf_assert(vdev);
2591
2592 pdev = vdev->pdev;
2593 soc = pdev->soc;
2594
2595
2596 qdf_mem_zero(&params, sizeof(params));
2597
2598 params.std.need_status = 1;
2599 params.u.upd_queue_params.update_pn_valid = 1;
2600 params.u.upd_queue_params.update_pn_size = 1;
2601 params.u.upd_queue_params.update_pn = 1;
2602 params.u.upd_queue_params.update_pn_check_needed = 1;
Gurumoorthi Gnanasambandhand733cd72018-06-12 17:05:52 +05302603 params.u.upd_queue_params.update_svld = 1;
2604 params.u.upd_queue_params.svld = 0;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302605
2606 peer->security[dp_sec_ucast].sec_type = sec_type;
2607
2608 switch (sec_type) {
2609 case cdp_sec_type_tkip_nomic:
2610 case cdp_sec_type_aes_ccmp:
2611 case cdp_sec_type_aes_ccmp_256:
2612 case cdp_sec_type_aes_gcmp:
2613 case cdp_sec_type_aes_gcmp_256:
2614 params.u.upd_queue_params.pn_check_needed = 1;
2615 params.u.upd_queue_params.pn_size = 48;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002616 pn_size = 48;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302617 break;
2618 case cdp_sec_type_wapi:
2619 params.u.upd_queue_params.pn_check_needed = 1;
2620 params.u.upd_queue_params.pn_size = 128;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002621 pn_size = 128;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302622 if (vdev->opmode == wlan_op_mode_ap) {
2623 params.u.upd_queue_params.pn_even = 1;
2624 params.u.upd_queue_params.update_pn_even = 1;
2625 } else {
2626 params.u.upd_queue_params.pn_uneven = 1;
2627 params.u.upd_queue_params.update_pn_uneven = 1;
2628 }
2629 break;
2630 default:
2631 params.u.upd_queue_params.pn_check_needed = 0;
sumedh baikadye3947bd2017-11-29 19:19:25 -08002632 pn_size = 0;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302633 break;
2634 }
2635
2636
2637 for (i = 0; i < DP_MAX_TIDS; i++) {
2638 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002639 qdf_spin_lock_bh(&rx_tid->tid_lock);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302640 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2641 params.std.addr_lo =
2642 rx_tid->hw_qdesc_paddr & 0xffffffff;
2643 params.std.addr_hi =
2644 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2645
Krunal Sonid3eb8bc2018-11-12 19:06:15 -08002646 if (pn_size) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302647 QDF_TRACE(QDF_MODULE_ID_DP,
Krunal Sonid3eb8bc2018-11-12 19:06:15 -08002648 QDF_TRACE_LEVEL_INFO_HIGH,
2649 "%s PN set for TID:%d pn:%x:%x:%x:%x",
2650 __func__, i, rx_pn[3], rx_pn[2],
2651 rx_pn[1], rx_pn[0]);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302652 params.u.upd_queue_params.update_pn_valid = 1;
2653 params.u.upd_queue_params.pn_31_0 = rx_pn[0];
2654 params.u.upd_queue_params.pn_63_32 = rx_pn[1];
2655 params.u.upd_queue_params.pn_95_64 = rx_pn[2];
2656 params.u.upd_queue_params.pn_127_96 = rx_pn[3];
2657 }
sumedh baikadye3947bd2017-11-29 19:19:25 -08002658 rx_tid->pn_size = pn_size;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302659 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
2660 dp_rx_tid_update_cb, rx_tid);
2661 } else {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302662 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2663 "PN Check not setup for TID :%d ", i);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302664 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08002665 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05302666 }
2667}
2668
2669
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002670void
2671dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07002672 enum cdp_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002673 u_int32_t *rx_pn)
2674{
2675 struct dp_soc *soc = (struct dp_soc *)soc_handle;
2676 struct dp_peer *peer;
2677 int sec_index;
2678
2679 peer = dp_peer_find_by_id(soc, peer_id);
2680 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302681 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2682 "Couldn't find peer from ID %d - skipping security inits",
2683 peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002684 return;
2685 }
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302686 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2687 "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): %s key of type %d",
2688 peer,
2689 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2690 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2691 peer->mac_addr.raw[4], peer->mac_addr.raw[5],
2692 is_unicast ? "ucast" : "mcast",
2693 sec_type);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002694 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
2695 peer->security[sec_index].sec_type = sec_type;
Leo Chang5ea93a42016-11-03 12:39:49 -07002696#ifdef notyet /* TODO: See if this is required for defrag support */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002697 /* michael key only valid for TKIP, but for simplicity,
2698 * copy it anyway
2699 */
2700 qdf_mem_copy(
2701 &peer->security[sec_index].michael_key[0],
2702 michael_key,
2703 sizeof(peer->security[sec_index].michael_key));
2704#ifdef BIG_ENDIAN_HOST
2705 OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
2706 sizeof(peer->security[sec_index].michael_key));
2707#endif /* BIG_ENDIAN_HOST */
2708#endif
2709
2710#ifdef notyet /* TODO: Check if this is required for wifi3.0 */
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07002711 if (sec_type != cdp_sec_type_wapi) {
hangtianfe681a52019-01-16 17:16:28 +08002712 qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002713 } else {
2714 for (i = 0; i < DP_MAX_TIDS; i++) {
2715 /*
2716 * Setting PN valid bit for WAPI sec_type,
2717 * since WAPI PN has to be started with predefined value
2718 */
2719 peer->tids_last_pn_valid[i] = 1;
2720 qdf_mem_copy(
2721 (u_int8_t *) &peer->tids_last_pn[i],
2722 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
2723 peer->tids_last_pn[i].pn128[1] =
2724 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
2725 peer->tids_last_pn[i].pn128[0] =
2726 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
2727 }
2728 }
2729#endif
2730 /* TODO: Update HW TID queue with PN check parameters (pn type for
2731 * all security types and last pn for WAPI) once REO command API
2732 * is available
2733 */
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302734
2735 dp_peer_unref_del_find_by_id(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002736}
2737
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05302738#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07002739/**
2740 * dp_register_peer() - Register peer into physical device
2741 * @pdev - data path device instance
2742 * @sta_desc - peer description
2743 *
2744 * Register peer into physical device
2745 *
2746 * Return: QDF_STATUS_SUCCESS registration success
2747 * QDF_STATUS_E_FAULT peer not found
2748 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002749QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07002750 struct ol_txrx_desc_type *sta_desc)
2751{
2752 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002753 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002754
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002755 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2756 sta_desc->sta_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002757 if (!peer)
2758 return QDF_STATUS_E_FAULT;
2759
2760 qdf_spin_lock_bh(&peer->peer_info_lock);
2761 peer->state = OL_TXRX_PEER_STATE_CONN;
2762 qdf_spin_unlock_bh(&peer->peer_info_lock);
2763
2764 return QDF_STATUS_SUCCESS;
2765}
2766
2767/**
2768 * dp_clear_peer() - remove peer from physical device
2769 * @pdev - data path device instance
2770 * @sta_id - local peer id
2771 *
2772 * remove peer from physical device
2773 *
2774 * Return: QDF_STATUS_SUCCESS registration success
2775 * QDF_STATUS_E_FAULT peer not found
2776 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002777QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07002778{
2779 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002780 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002781
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002782 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002783 if (!peer)
2784 return QDF_STATUS_E_FAULT;
2785
2786 qdf_spin_lock_bh(&peer->peer_info_lock);
2787 peer->state = OL_TXRX_PEER_STATE_DISC;
2788 qdf_spin_unlock_bh(&peer->peer_info_lock);
2789
2790 return QDF_STATUS_SUCCESS;
2791}
2792
2793/**
2794 * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2795 * @pdev - data path device instance
2796 * @vdev - virtual interface instance
2797 * @peer_addr - peer mac address
2798 * @peer_id - local peer id with target mac address
2799 *
2800 * Find peer by peer mac address within vdev
2801 *
2802 * Return: peer instance void pointer
2803 * NULL cannot find target peer
2804 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002805void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2806 struct cdp_vdev *vdev_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07002807 uint8_t *peer_addr, uint8_t *local_id)
2808{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002809 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2810 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002811 struct dp_peer *peer;
2812
Jeff Johnson3f217e22017-09-18 10:13:35 -07002813 DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05302814 peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
Jeff Johnson3f217e22017-09-18 10:13:35 -07002815 DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
Leo Chang5ea93a42016-11-03 12:39:49 -07002816
2817 if (!peer)
2818 return NULL;
2819
Krunal Soni304792a2018-06-28 14:18:30 -07002820 if (peer->vdev != vdev) {
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05302821 dp_peer_unref_delete(peer);
Leo Chang5ea93a42016-11-03 12:39:49 -07002822 return NULL;
Krunal Soni304792a2018-06-28 14:18:30 -07002823 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002824
2825 *local_id = peer->local_id;
Yun Park11d46e02017-11-27 10:51:53 -08002826 DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002827
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002828 /* ref_cnt is incremented inside dp_peer_find_hash_find().
2829 * Decrement it here.
2830 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05302831 dp_peer_unref_delete(peer);
Leo Chang5ea93a42016-11-03 12:39:49 -07002832
2833 return peer;
2834}
2835
2836/**
2837 * dp_local_peer_id() - Find local peer id within peer instance
2838 * @peer - peer instance
2839 *
2840 * Find local peer id within peer instance
2841 *
2842 * Return: local peer id
2843 */
2844uint16_t dp_local_peer_id(void *peer)
2845{
2846 return ((struct dp_peer *)peer)->local_id;
2847}
2848
2849/**
2850 * dp_peer_find_by_local_id() - Find peer by local peer id
2851 * @pdev - data path device instance
2852 * @local_peer_id - local peer id want to find
2853 *
2854 * Find peer by local peer id within physical device
2855 *
2856 * Return: peer instance void pointer
2857 * NULL cannot find target peer
2858 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002859void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07002860{
2861 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002862 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002863
Ryan Hsu9d56e3a2018-06-06 16:20:05 -07002864 if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
Mohit Khanna890818b2018-07-23 11:41:08 -07002865 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2866 "Incorrect local id %u", local_id);
Ryan Hsu9d56e3a2018-06-06 16:20:05 -07002867 return NULL;
2868 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002869 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2870 peer = pdev->local_peer_ids.map[local_id];
2871 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Yun Park11d46e02017-11-27 10:51:53 -08002872 DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002873 return peer;
2874}
2875
2876/**
2877 * dp_peer_state_update() - update peer local state
2878 * @pdev - data path device instance
2879 * @peer_addr - peer mac address
2880 * @state - new peer local state
2881 *
2882 * update peer local state
2883 *
2884 * Return: QDF_STATUS_SUCCESS registration success
2885 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002886QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
Leo Chang5ea93a42016-11-03 12:39:49 -07002887 enum ol_txrx_peer_state state)
2888{
2889 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002890 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002891
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05302892 peer = dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
Ankit Gupta6fb389b2017-01-03 12:23:45 -08002893 if (NULL == peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302894 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2895 "Failed to find peer for: [%pM]", peer_mac);
Ankit Gupta6fb389b2017-01-03 12:23:45 -08002896 return QDF_STATUS_E_FAILURE;
2897 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002898 peer->state = state;
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002899
Jeff Johnson3f217e22017-09-18 10:13:35 -07002900 DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002901 /* ref_cnt is incremented inside dp_peer_find_hash_find().
2902 * Decrement it here.
2903 */
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05302904 dp_peer_unref_delete(peer);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002905
Leo Chang5ea93a42016-11-03 12:39:49 -07002906 return QDF_STATUS_SUCCESS;
2907}
2908
2909/**
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002910 * dp_get_vdevid() - Get virtual interface id which peer registered
Leo Chang5ea93a42016-11-03 12:39:49 -07002911 * @peer - peer instance
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002912 * @vdev_id - virtual interface id which peer registered
Leo Chang5ea93a42016-11-03 12:39:49 -07002913 *
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002914 * Get virtual interface id which peer registered
Leo Chang5ea93a42016-11-03 12:39:49 -07002915 *
2916 * Return: QDF_STATUS_SUCCESS registration success
2917 */
2918QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2919{
2920 struct dp_peer *peer = peer_handle;
2921
Jeff Johnson3f217e22017-09-18 10:13:35 -07002922 DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
Leo Chang5ea93a42016-11-03 12:39:49 -07002923 peer, peer->vdev, peer->vdev->vdev_id);
2924 *vdev_id = peer->vdev->vdev_id;
2925 return QDF_STATUS_SUCCESS;
2926}
2927
Yun Park601d0d82017-08-28 21:49:31 -07002928struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2929 uint8_t sta_id)
Yun Parkfde6b9e2017-06-26 17:13:11 -07002930{
Yun Park601d0d82017-08-28 21:49:31 -07002931 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Yun Parkfde6b9e2017-06-26 17:13:11 -07002932 struct dp_peer *peer = NULL;
Yun Parkfde6b9e2017-06-26 17:13:11 -07002933
2934 if (sta_id >= WLAN_MAX_STA_COUNT) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302935 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Yun Parkfde6b9e2017-06-26 17:13:11 -07002936 "Invalid sta id passed");
2937 return NULL;
2938 }
2939
Yun Parkfde6b9e2017-06-26 17:13:11 -07002940 if (!pdev) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302941 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Yun Parkfde6b9e2017-06-26 17:13:11 -07002942 "PDEV not found for sta_id [%d]", sta_id);
2943 return NULL;
2944 }
2945
2946 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2947 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05302948 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Yun Parkfde6b9e2017-06-26 17:13:11 -07002949 "PEER [%d] not found", sta_id);
2950 return NULL;
2951 }
2952
2953 return (struct cdp_vdev *)peer->vdev;
2954}
2955
Leo Chang5ea93a42016-11-03 12:39:49 -07002956/**
2957 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2958 * @peer - peer instance
2959 *
2960 * Get virtual interface instance which peer belongs
2961 *
2962 * Return: virtual interface instance pointer
2963 * NULL in case cannot find
2964 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002965struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07002966{
2967 struct dp_peer *peer = peer_handle;
2968
Mohit Khanna7ac554b2018-05-24 11:58:13 -07002969 DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002970 return (struct cdp_vdev *)peer->vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07002971}
2972
2973/**
2974 * dp_peer_get_peer_mac_addr() - Get peer mac address
2975 * @peer - peer instance
2976 *
2977 * Get peer mac address
2978 *
2979 * Return: peer mac address pointer
2980 * NULL in case cannot find
2981 */
2982uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2983{
2984 struct dp_peer *peer = peer_handle;
2985 uint8_t *mac;
2986
2987 mac = peer->mac_addr.raw;
Jeff Johnson3f217e22017-09-18 10:13:35 -07002988 DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
Leo Chang5ea93a42016-11-03 12:39:49 -07002989 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2990 return peer->mac_addr.raw;
2991}
2992
2993/**
2994 * dp_get_peer_state() - Get local peer state
2995 * @peer - peer instance
2996 *
2997 * Get local peer state
2998 *
2999 * Return: peer status
3000 */
3001int dp_get_peer_state(void *peer_handle)
3002{
3003 struct dp_peer *peer = peer_handle;
3004
Yun Park11d46e02017-11-27 10:51:53 -08003005 DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
Leo Chang5ea93a42016-11-03 12:39:49 -07003006 return peer->state;
3007}
3008
3009/**
3010 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
3011 * @pdev - data path device instance
3012 *
3013 * local peer id pool alloc for physical device
3014 *
3015 * Return: none
3016 */
3017void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3018{
3019 int i;
3020
3021 /* point the freelist to the first ID */
3022 pdev->local_peer_ids.freelist = 0;
3023
3024 /* link each ID to the next one */
3025 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3026 pdev->local_peer_ids.pool[i] = i + 1;
3027 pdev->local_peer_ids.map[i] = NULL;
3028 }
3029
3030 /* link the last ID to itself, to mark the end of the list */
3031 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3032 pdev->local_peer_ids.pool[i] = i;
3033
3034 qdf_spinlock_create(&pdev->local_peer_ids.lock);
3035 DP_TRACE(INFO, "Peer pool init");
3036}
3037
3038/**
3039 * dp_local_peer_id_alloc() - allocate local peer id
3040 * @pdev - data path device instance
3041 * @peer - new peer instance
3042 *
3043 * allocate local peer id
3044 *
3045 * Return: none
3046 */
3047void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3048{
3049 int i;
3050
3051 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3052 i = pdev->local_peer_ids.freelist;
3053 if (pdev->local_peer_ids.pool[i] == i) {
3054 /* the list is empty, except for the list-end marker */
3055 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3056 } else {
3057 /* take the head ID and advance the freelist */
3058 peer->local_id = i;
3059 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3060 pdev->local_peer_ids.map[i] = peer;
3061 }
3062 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Jeff Johnson3f217e22017-09-18 10:13:35 -07003063 DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07003064}
3065
3066/**
3067 * dp_local_peer_id_free() - remove local peer id
3068 * @pdev - data path device instance
3069 * @peer - peer instance should be removed
3070 *
3071 * remove local peer id
3072 *
3073 * Return: none
3074 */
3075void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3076{
3077 int i = peer->local_id;
3078 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3079 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3080 return;
3081 }
3082
3083 /* put this ID on the head of the freelist */
3084 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3085 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3086 pdev->local_peer_ids.freelist = i;
3087 pdev->local_peer_ids.map[i] = NULL;
3088 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3089}
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05303090#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05303091
3092/**
3093 * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
3094 * @soc_handle: DP SOC handle
3095 * @peer_id:peer_id of the peer
3096 *
3097 * return: vdev_id of the vap
3098 */
3099uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
3100 uint16_t peer_id, uint8_t *peer_mac)
3101{
3102 struct dp_soc *soc = (struct dp_soc *)soc_handle;
3103 struct dp_peer *peer;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303104 uint8_t vdev_id;
Ishank Jain1e7401c2017-02-17 15:38:39 +05303105
3106 peer = dp_peer_find_by_id(soc, peer_id);
3107
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303108 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3109 "soc %pK peer_id %d", soc, peer_id);
Ishank Jain1e7401c2017-02-17 15:38:39 +05303110
3111 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303112 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3113 "peer not found ");
Ishank Jain1e7401c2017-02-17 15:38:39 +05303114 return CDP_INVALID_VDEV_ID;
3115 }
3116
3117 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303118 vdev_id = peer->vdev->vdev_id;
3119
3120 dp_peer_unref_del_find_by_id(peer);
3121
3122 return vdev_id;
Ishank Jain1e7401c2017-02-17 15:38:39 +05303123}
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003124
3125/**
3126 * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
3127 * @peer: DP peer handle
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303128 * @dp_stats_cmd_cb: REO command callback function
3129 * @cb_ctxt: Callback context
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003130 *
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303131 * Return: none
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003132 */
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303133void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
3134 void *cb_ctxt)
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003135{
3136 struct dp_soc *soc = peer->vdev->pdev->soc;
3137 struct hal_reo_cmd_params params;
3138 int i;
3139
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303140 if (!dp_stats_cmd_cb)
3141 return;
3142
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003143 qdf_mem_zero(&params, sizeof(params));
3144 for (i = 0; i < DP_MAX_TIDS; i++) {
3145 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
3146 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
3147 params.std.need_status = 1;
3148 params.std.addr_lo =
3149 rx_tid->hw_qdesc_paddr & 0xffffffff;
3150 params.std.addr_hi =
3151 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05303152
3153 if (cb_ctxt) {
3154 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3155 &params, dp_stats_cmd_cb, cb_ctxt);
3156 } else {
3157 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
3158 &params, dp_stats_cmd_cb, rx_tid);
3159 }
Karunakar Dasineni3da08112017-06-15 14:42:39 -07003160
3161 /* Flush REO descriptor from HW cache to update stats
3162 * in descriptor memory. This is to help debugging */
3163 qdf_mem_zero(&params, sizeof(params));
3164 params.std.need_status = 0;
3165 params.std.addr_lo =
3166 rx_tid->hw_qdesc_paddr & 0xffffffff;
3167 params.std.addr_hi =
3168 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08003169 params.u.fl_cache_params.flush_no_inval = 1;
Karunakar Dasineni3da08112017-06-15 14:42:39 -07003170 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
3171 NULL);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003172 }
3173 }
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07003174}
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05303175
Pramod Simha6e10cb22018-06-20 12:05:44 -07003176void dp_set_michael_key(struct cdp_peer *peer_handle,
3177 bool is_unicast, uint32_t *key)
3178{
3179 struct dp_peer *peer = (struct dp_peer *)peer_handle;
3180 uint8_t sec_index = is_unicast ? 1 : 0;
3181
3182 if (!peer) {
Chaitanya Kiran Godavarthie0b34142019-01-16 17:05:15 +05303183 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Pramod Simha6e10cb22018-06-20 12:05:44 -07003184 "peer not found ");
3185 return;
3186 }
3187
3188 qdf_mem_copy(&peer->security[sec_index].michael_key[0],
3189 key, IEEE80211_WEP_MICLEN);
3190}
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303191
3192bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
3193{
3194 struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
3195
3196 if (peer) {
3197 /*
3198 * Decrement the peer ref which is taken as part of
3199 * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
3200 */
3201 dp_peer_unref_del_find_by_id(peer);
3202
3203 return true;
3204 }
3205
3206 return false;
3207}