blob: a69ebf68dc3e560ed2c819fda3cf2889cece52a0 [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
Harilakshmi Deshkumar1ea21092017-05-08 21:16:27 +053016 * PERFORMANCE OF THIS SOFTWARE.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070017 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Balamurugan Mahalingamf72cb1f2018-06-25 12:18:34 +053021#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070022#include "dp_htt.h"
23#include "dp_types.h"
24#include "dp_internal.h"
Jeff Johnson2cb8fc72016-12-17 10:45:08 -080025#include "dp_peer.h"
Lin Baif1c577e2018-05-22 20:45:42 +080026#include "dp_rx_defrag.h"
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070027#include <hal_api.h>
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -080028#include <hal_reo.h>
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080029#ifdef CONFIG_MCL
30#include <cds_ieee80211_common.h>
Yun Parkfde6b9e2017-06-26 17:13:11 -070031#include <cds_api.h>
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080032#endif
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080033#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080034#include <wlan_cfg.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070035
Pramod Simhab17d0672017-03-06 17:20:13 -080036#ifdef DP_LFR
37static inline void
38dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
39 uint8_t valid)
40{
41 params->u.upd_queue_params.update_svld = 1;
42 params->u.upd_queue_params.svld = valid;
43 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +053044 "%s: Setting SSN valid bit to %d",
Pramod Simhab17d0672017-03-06 17:20:13 -080045 __func__, valid);
46}
47#else
48static inline void
49dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
50 uint8_t valid) {};
51#endif
52
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070053static inline int dp_peer_find_mac_addr_cmp(
54 union dp_align_mac_addr *mac_addr1,
55 union dp_align_mac_addr *mac_addr2)
56{
57 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
58 /*
59 * Intentionally use & rather than &&.
60 * because the operands are binary rather than generic boolean,
61 * the functionality is equivalent.
62 * Using && has the advantage of short-circuited evaluation,
63 * but using & has the advantage of no conditional branching,
64 * which is a more significant benefit.
65 */
66 &
67 (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
68}
69
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070070static int dp_peer_find_map_attach(struct dp_soc *soc)
71{
72 uint32_t max_peers, peer_map_size;
73
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +053074 max_peers = soc->max_peers;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070075 /* allocate the peer ID -> peer object map */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070076 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Aditya Sathishded018e2018-07-02 16:25:21 +053077 "\n<=== cfg max peer id %d ====>", max_peers);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070078 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
79 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
80 if (!soc->peer_id_to_obj_map) {
81 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +053082 "%s: peer map memory allocation failed", __func__);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070083 return QDF_STATUS_E_NOMEM;
84 }
85
86 /*
87 * The peer_id_to_obj_map doesn't really need to be initialized,
88 * since elements are only used after they have been individually
89 * initialized.
90 * However, it is convenient for debugging to have all elements
91 * that are not in use set to 0.
92 */
93 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070094 return 0; /* success */
95}
96
97static int dp_log2_ceil(unsigned value)
98{
99 unsigned tmp = value;
100 int log2 = -1;
101
102 while (tmp) {
103 log2++;
104 tmp >>= 1;
105 }
106 if (1 << log2 != value)
107 log2++;
108 return log2;
109}
110
111static int dp_peer_find_add_id_to_obj(
112 struct dp_peer *peer,
113 uint16_t peer_id)
114{
115 int i;
116
117 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
118 if (peer->peer_ids[i] == HTT_INVALID_PEER) {
119 peer->peer_ids[i] = peer_id;
120 return 0; /* success */
121 }
122 }
123 return QDF_STATUS_E_FAILURE; /* failure */
124}
125
126#define DP_PEER_HASH_LOAD_MULT 2
127#define DP_PEER_HASH_LOAD_SHIFT 0
128
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530129#define DP_AST_HASH_LOAD_MULT 2
130#define DP_AST_HASH_LOAD_SHIFT 0
131
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700132static int dp_peer_find_hash_attach(struct dp_soc *soc)
133{
134 int i, hash_elems, log2;
135
136 /* allocate the peer MAC address -> peer object hash table */
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +0530137 hash_elems = soc->max_peers;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700138 hash_elems *= DP_PEER_HASH_LOAD_MULT;
139 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
140 log2 = dp_log2_ceil(hash_elems);
141 hash_elems = 1 << log2;
142
143 soc->peer_hash.mask = hash_elems - 1;
144 soc->peer_hash.idx_bits = log2;
145 /* allocate an array of TAILQ peer object lists */
146 soc->peer_hash.bins = qdf_mem_malloc(
147 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
148 if (!soc->peer_hash.bins)
149 return QDF_STATUS_E_NOMEM;
150
151 for (i = 0; i < hash_elems; i++)
152 TAILQ_INIT(&soc->peer_hash.bins[i]);
153
154 return 0;
155}
156
157static void dp_peer_find_hash_detach(struct dp_soc *soc)
158{
159 qdf_mem_free(soc->peer_hash.bins);
160}
161
162static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
163 union dp_align_mac_addr *mac_addr)
164{
165 unsigned index;
166
167 index =
168 mac_addr->align2.bytes_ab ^
169 mac_addr->align2.bytes_cd ^
170 mac_addr->align2.bytes_ef;
171 index ^= index >> soc->peer_hash.idx_bits;
172 index &= soc->peer_hash.mask;
173 return index;
174}
175
176
177void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
178{
179 unsigned index;
180
181 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
182 qdf_spin_lock_bh(&soc->peer_ref_mutex);
183 /*
184 * It is important to add the new peer at the tail of the peer list
185 * with the bin index. Together with having the hash_find function
186 * search from head to tail, this ensures that if two entries with
187 * the same MAC address are stored, the one added first will be
188 * found first.
189 */
190 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
191 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
192}
193
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530194#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530195/*
196 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
197 * @soc: SoC handle
198 *
199 * Return: None
200 */
201static int dp_peer_ast_hash_attach(struct dp_soc *soc)
202{
203 int i, hash_elems, log2;
204
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +0530205 hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530206 DP_AST_HASH_LOAD_SHIFT);
207
208 log2 = dp_log2_ceil(hash_elems);
209 hash_elems = 1 << log2;
210
211 soc->ast_hash.mask = hash_elems - 1;
212 soc->ast_hash.idx_bits = log2;
213
214 /* allocate an array of TAILQ peer object lists */
215 soc->ast_hash.bins = qdf_mem_malloc(
216 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
217 dp_ast_entry)));
218
219 if (!soc->ast_hash.bins)
220 return QDF_STATUS_E_NOMEM;
221
222 for (i = 0; i < hash_elems; i++)
223 TAILQ_INIT(&soc->ast_hash.bins[i]);
224
225 return 0;
226}
227
228/*
229 * dp_peer_ast_hash_detach() - Free AST Hash table
230 * @soc: SoC handle
231 *
232 * Return: None
233 */
234static void dp_peer_ast_hash_detach(struct dp_soc *soc)
235{
236 qdf_mem_free(soc->ast_hash.bins);
237}
238
239/*
240 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
241 * @soc: SoC handle
242 *
243 * Return: AST hash
244 */
245static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
246 union dp_align_mac_addr *mac_addr)
247{
248 uint32_t index;
249
250 index =
251 mac_addr->align2.bytes_ab ^
252 mac_addr->align2.bytes_cd ^
253 mac_addr->align2.bytes_ef;
254 index ^= index >> soc->ast_hash.idx_bits;
255 index &= soc->ast_hash.mask;
256 return index;
257}
258
259/*
260 * dp_peer_ast_hash_add() - Add AST entry into hash table
261 * @soc: SoC handle
262 *
263 * This function adds the AST entry into SoC AST hash table
264 * It assumes caller has taken the ast lock to protect the access to this table
265 *
266 * Return: None
267 */
268static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
269 struct dp_ast_entry *ase)
270{
271 uint32_t index;
272
273 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
274 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
275}
276
277/*
278 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
279 * @soc: SoC handle
280 *
281 * This function removes the AST entry from soc AST hash table
282 * It assumes caller has taken the ast lock to protect the access to this table
283 *
284 * Return: None
285 */
286static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
287 struct dp_ast_entry *ase)
288{
289 unsigned index;
290 struct dp_ast_entry *tmpase;
291 int found = 0;
292
293 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
294 /* Check if tail is not empty before delete*/
295 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
296
297 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
298 if (tmpase == ase) {
299 found = 1;
300 break;
301 }
302 }
303
304 QDF_ASSERT(found);
305 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
306}
307
308/*
309 * dp_peer_ast_hash_find() - Find AST entry by MAC address
310 * @soc: SoC handle
311 *
312 * It assumes caller has taken the ast lock to protect the access to
313 * AST hash table
314 *
315 * Return: AST entry
316 */
317struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530318 uint8_t *ast_mac_addr)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530319{
320 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
321 unsigned index;
322 struct dp_ast_entry *ase;
323
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530324 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530325 ast_mac_addr, DP_MAC_ADDR_LEN);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530326 mac_addr = &local_mac_addr_aligned;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530327
328 index = dp_peer_ast_hash_index(soc, mac_addr);
329 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
330 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
331 return ase;
332 }
333 }
334
335 return NULL;
336}
337
338/*
339 * dp_peer_map_ast() - Map the ast entry with HW AST Index
340 * @soc: SoC handle
341 * @peer: peer to which ast node belongs
342 * @mac_addr: MAC address of ast node
343 * @hw_peer_id: HW AST Index returned by target in peer map event
344 * @vdev_id: vdev id for VAP to which the peer belongs to
345 *
346 * Return: None
347 */
348static inline void dp_peer_map_ast(struct dp_soc *soc,
349 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
350 uint8_t vdev_id)
351{
352 struct dp_ast_entry *ast_entry;
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530353 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
354 bool ast_entry_found = FALSE;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530355
356 if (!peer) {
357 return;
358 }
359
360 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530361 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530362 __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
363 mac_addr[1], mac_addr[2], mac_addr[3],
364 mac_addr[4], mac_addr[5]);
365
366 qdf_spin_lock_bh(&soc->ast_lock);
367 TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
368 if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw,
369 DP_MAC_ADDR_LEN))) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530370 ast_entry->ast_idx = hw_peer_id;
371 soc->ast_table[hw_peer_id] = ast_entry;
372 ast_entry->is_active = TRUE;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530373 peer_type = ast_entry->type;
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530374 ast_entry_found = TRUE;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530375 }
376 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530377
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530378 if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) {
379 if (soc->cdp_soc.ol_ops->peer_map_event) {
380 soc->cdp_soc.ol_ops->peer_map_event(
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +0530381 soc->ctrl_psoc, peer->peer_ids[0],
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530382 hw_peer_id, vdev_id,
383 mac_addr, peer_type);
384 }
385 } else {
386 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530387 "AST entry not found");
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530388 }
389
390 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530391 return;
392}
393
394/*
395 * dp_peer_add_ast() - Allocate and add AST entry into peer list
396 * @soc: SoC handle
397 * @peer: peer to which ast node belongs
398 * @mac_addr: MAC address of ast node
399 * @is_self: Is this base AST entry with peer mac address
400 *
Jeff Johnsonbd6e61f2018-05-06 17:11:15 -0700401 * This API is used by WDS source port learning function to
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530402 * add a new AST entry into peer AST list
403 *
404 * Return: 0 if new entry is allocated,
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530405 * -1 if entry add failed
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530406 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530407int dp_peer_add_ast(struct dp_soc *soc,
408 struct dp_peer *peer,
409 uint8_t *mac_addr,
410 enum cdp_txrx_ast_entry_type type,
411 uint32_t flags)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530412{
413 struct dp_ast_entry *ast_entry;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530414 struct dp_vdev *vdev = peer->vdev;
415 uint8_t next_node_mac[6];
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530416 int ret = -1;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530417
418 if (!vdev) {
419 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
420 FL("Peers vdev is NULL"));
421 QDF_ASSERT(0);
422 return ret;
423 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530424
425 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530426 "%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x",
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530427 __func__, peer, mac_addr[0], mac_addr[1], mac_addr[2],
428 mac_addr[3], mac_addr[4], mac_addr[5]);
429
430 qdf_spin_lock_bh(&soc->ast_lock);
431
432 /* If AST entry already exists , just return from here */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530433 ast_entry = dp_peer_ast_hash_find(soc, mac_addr);
434
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +0530435 if (ast_entry) {
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530436 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +0530437 ast_entry->is_active = TRUE;
438
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530439 qdf_spin_unlock_bh(&soc->ast_lock);
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530440 return 0;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530441 }
442
443 ast_entry = (struct dp_ast_entry *)
444 qdf_mem_malloc(sizeof(struct dp_ast_entry));
445
446 if (!ast_entry) {
447 qdf_spin_unlock_bh(&soc->ast_lock);
448 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
449 FL("fail to allocate ast_entry"));
450 QDF_ASSERT(0);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530451 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530452 }
453
454 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
455 ast_entry->peer = peer;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530456 ast_entry->pdev_id = vdev->pdev->pdev_id;
457 ast_entry->vdev_id = vdev->vdev_id;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530458
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530459 switch (type) {
460 case CDP_TXRX_AST_TYPE_STATIC:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530461 peer->self_ast_entry = ast_entry;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530462 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530463 break;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530464 case CDP_TXRX_AST_TYPE_SELF:
465 peer->self_ast_entry = ast_entry;
466 ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
467 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530468 case CDP_TXRX_AST_TYPE_WDS:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530469 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530470 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530471 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530472 case CDP_TXRX_AST_TYPE_WDS_HM:
473 ast_entry->next_hop = 1;
474 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
475 break;
476 case CDP_TXRX_AST_TYPE_MEC:
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530477 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530478 ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530479 break;
480 default:
481 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
482 FL("Incorrect AST entry type"));
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530483 }
484
485 ast_entry->is_active = TRUE;
486 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530487 DP_STATS_INC(soc, ast.added, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530488 dp_peer_ast_hash_add(soc, ast_entry);
489 qdf_spin_unlock_bh(&soc->ast_lock);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530490
Ruchi, Agrawald536f882018-03-02 15:51:23 +0530491 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530492 qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
Ruchi, Agrawald536f882018-03-02 15:51:23 +0530493 else
494 qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530495
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530496 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
497 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) {
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530498 if (QDF_STATUS_SUCCESS ==
499 soc->cdp_soc.ol_ops->peer_add_wds_entry(
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530500 peer->vdev->osif_vdev,
501 mac_addr,
502 next_node_mac,
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530503 flags))
504 return 0;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530505 }
506
507 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530508}
509
510/*
511 * dp_peer_del_ast() - Delete and free AST entry
512 * @soc: SoC handle
513 * @ast_entry: AST entry of the node
514 *
515 * This function removes the AST entry from peer and soc tables
516 * It assumes caller has taken the ast lock to protect the access to these
517 * tables
518 *
519 * Return: None
520 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530521void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530522{
523 struct dp_peer *peer = ast_entry->peer;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530524
525 if (ast_entry->next_hop)
526 soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
527 ast_entry->mac_addr.raw);
528
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530529 soc->ast_table[ast_entry->ast_idx] = NULL;
530 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530531 DP_STATS_INC(soc, ast.deleted, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530532 dp_peer_ast_hash_remove(soc, ast_entry);
533 qdf_mem_free(ast_entry);
534}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530535
536/*
537 * dp_peer_update_ast() - Delete and free AST entry
538 * @soc: SoC handle
539 * @peer: peer to which ast node belongs
540 * @ast_entry: AST entry of the node
541 * @flags: wds or hmwds
542 *
543 * This function update the AST entry to the roamed peer and soc tables
544 * It assumes caller has taken the ast lock to protect the access to these
545 * tables
546 *
547 * Return: 0 if ast entry is updated successfully
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530548 * -1 failure
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530549 */
550int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
551 struct dp_ast_entry *ast_entry, uint32_t flags)
552{
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530553 int ret = -1;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530554 struct dp_peer *old_peer;
555
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530556 if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
557 (ast_entry->type == CDP_TXRX_AST_TYPE_SELF))
558 return 0;
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530559
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530560 old_peer = ast_entry->peer;
561 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530562
563 ast_entry->peer = peer;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530564 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
565 ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
566 ast_entry->vdev_id = peer->vdev->vdev_id;
567 ast_entry->is_active = TRUE;
568 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
569
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530570 ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530571 peer->vdev->osif_vdev,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530572 ast_entry->mac_addr.raw,
573 peer->mac_addr.raw,
Pamidipati, Vijayd578db12018-04-09 23:03:12 +0530574 flags);
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530575
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530576 return ret;
577}
578
579/*
580 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
581 * @soc: SoC handle
582 * @ast_entry: AST entry of the node
583 *
584 * This function gets the pdev_id from the ast entry.
585 *
586 * Return: (uint8_t) pdev_id
587 */
588uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
589 struct dp_ast_entry *ast_entry)
590{
591 return ast_entry->pdev_id;
592}
593
594/*
595 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
596 * @soc: SoC handle
597 * @ast_entry: AST entry of the node
598 *
599 * This function gets the next hop from the ast entry.
600 *
601 * Return: (uint8_t) next_hop
602 */
603uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
604 struct dp_ast_entry *ast_entry)
605{
606 return ast_entry->next_hop;
607}
608
609/*
610 * dp_peer_ast_set_type() - set type from the ast entry
611 * @soc: SoC handle
612 * @ast_entry: AST entry of the node
613 *
614 * This function sets the type in the ast entry.
615 *
616 * Return:
617 */
618void dp_peer_ast_set_type(struct dp_soc *soc,
619 struct dp_ast_entry *ast_entry,
620 enum cdp_txrx_ast_entry_type type)
621{
622 ast_entry->type = type;
623}
624
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800625#else
626int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530627 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
628 uint32_t flags)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800629{
630 return 1;
631}
632
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530633void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800634{
635}
636
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530637int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
638 struct dp_ast_entry *ast_entry, uint32_t flags)
639{
640 return 1;
641}
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800642
643struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530644 uint8_t *ast_mac_addr)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800645{
646 return NULL;
647}
648
649static int dp_peer_ast_hash_attach(struct dp_soc *soc)
650{
651 return 0;
652}
653
654static inline void dp_peer_map_ast(struct dp_soc *soc,
655 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
656 uint8_t vdev_id)
657{
658 return;
659}
660
661static void dp_peer_ast_hash_detach(struct dp_soc *soc)
662{
663}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530664
665void dp_peer_ast_set_type(struct dp_soc *soc,
666 struct dp_ast_entry *ast_entry,
667 enum cdp_txrx_ast_entry_type type)
668{
669}
670
671uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
672 struct dp_ast_entry *ast_entry)
673{
674 return 0xff;
675}
676
677
678uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
679 struct dp_ast_entry *ast_entry)
680{
681 return 0xff;
682}
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800683#endif
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530684
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +0530685struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700686 uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700687{
688 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
689 unsigned index;
690 struct dp_peer *peer;
691
692 if (mac_addr_is_aligned) {
693 mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
694 } else {
695 qdf_mem_copy(
696 &local_mac_addr_aligned.raw[0],
697 peer_mac_addr, DP_MAC_ADDR_LEN);
698 mac_addr = &local_mac_addr_aligned;
699 }
700 index = dp_peer_find_hash_index(soc, mac_addr);
701 qdf_spin_lock_bh(&soc->peer_ref_mutex);
702 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
703#if ATH_SUPPORT_WRAP
704 /* ProxySTA may have multiple BSS peer with same MAC address,
705 * modified find will take care of finding the correct BSS peer.
706 */
707 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +0530708 ((peer->vdev->vdev_id == vdev_id) ||
709 (vdev_id == DP_VDEV_ALL))) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700710#else
711 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
712#endif
713 /* found it - increment the ref count before releasing
714 * the lock
715 */
716 qdf_atomic_inc(&peer->ref_cnt);
717 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
718 return peer;
719 }
720 }
721 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
722 return NULL; /* failure */
723}
724
725void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
726{
727 unsigned index;
728 struct dp_peer *tmppeer = NULL;
729 int found = 0;
730
731 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
732 /* Check if tail is not empty before delete*/
733 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
734 /*
735 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
736 * by the caller.
737 * The caller needs to hold the lock from the time the peer object's
738 * reference count is decremented and tested up through the time the
739 * reference to the peer object is removed from the hash table, by
740 * this function.
741 * Holding the lock only while removing the peer object reference
742 * from the hash table keeps the hash table consistent, but does not
743 * protect against a new HL tx context starting to use the peer object
744 * if it looks up the peer object from its MAC address just after the
745 * peer ref count is decremented to zero, but just before the peer
746 * object reference is removed from the hash table.
747 */
748 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
749 if (tmppeer == peer) {
750 found = 1;
751 break;
752 }
753 }
754 QDF_ASSERT(found);
755 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
756}
757
758void dp_peer_find_hash_erase(struct dp_soc *soc)
759{
760 int i;
761
762 /*
763 * Not really necessary to take peer_ref_mutex lock - by this point,
764 * it's known that the soc is no longer in use.
765 */
766 for (i = 0; i <= soc->peer_hash.mask; i++) {
767 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
768 struct dp_peer *peer, *peer_next;
769
770 /*
771 * TAILQ_FOREACH_SAFE must be used here to avoid any
772 * memory access violation after peer is freed
773 */
774 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
775 hash_list_elem, peer_next) {
776 /*
777 * Don't remove the peer from the hash table -
778 * that would modify the list we are currently
779 * traversing, and it's not necessary anyway.
780 */
781 /*
782 * Artificially adjust the peer's ref count to
783 * 1, so it will get deleted by
784 * dp_peer_unref_delete.
785 */
786 /* set to zero */
787 qdf_atomic_init(&peer->ref_cnt);
788 /* incr to one */
789 qdf_atomic_inc(&peer->ref_cnt);
790 dp_peer_unref_delete(peer);
791 }
792 }
793 }
794}
795
796static void dp_peer_find_map_detach(struct dp_soc *soc)
797{
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700798 qdf_mem_free(soc->peer_id_to_obj_map);
799}
800
801int dp_peer_find_attach(struct dp_soc *soc)
802{
803 if (dp_peer_find_map_attach(soc))
804 return 1;
805
806 if (dp_peer_find_hash_attach(soc)) {
807 dp_peer_find_map_detach(soc);
808 return 1;
809 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530810
811 if (dp_peer_ast_hash_attach(soc)) {
812 dp_peer_find_hash_detach(soc);
813 dp_peer_find_map_detach(soc);
814 return 1;
815 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700816 return 0; /* success */
817}
818
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +0530819void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
Karunakar Dasineni93f633c2017-06-02 19:04:46 -0700820 union hal_reo_status *reo_status)
821{
822 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
823 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
824
825 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
826 DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
827 queue_status->header.status, rx_tid->tid);
828 return;
829 }
830
831 DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
832 "ssn: %d\n"
833 "curr_idx : %d\n"
834 "pn_31_0 : %08x\n"
835 "pn_63_32 : %08x\n"
836 "pn_95_64 : %08x\n"
837 "pn_127_96 : %08x\n"
838 "last_rx_enq_tstamp : %08x\n"
839 "last_rx_deq_tstamp : %08x\n"
840 "rx_bitmap_31_0 : %08x\n"
841 "rx_bitmap_63_32 : %08x\n"
842 "rx_bitmap_95_64 : %08x\n"
843 "rx_bitmap_127_96 : %08x\n"
844 "rx_bitmap_159_128 : %08x\n"
845 "rx_bitmap_191_160 : %08x\n"
846 "rx_bitmap_223_192 : %08x\n"
Karunakar Dasineni3da08112017-06-15 14:42:39 -0700847 "rx_bitmap_255_224 : %08x\n",
848 rx_tid->tid,
849 queue_status->ssn, queue_status->curr_idx,
850 queue_status->pn_31_0, queue_status->pn_63_32,
851 queue_status->pn_95_64, queue_status->pn_127_96,
852 queue_status->last_rx_enq_tstamp,
853 queue_status->last_rx_deq_tstamp,
854 queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
855 queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
856 queue_status->rx_bitmap_159_128,
857 queue_status->rx_bitmap_191_160,
858 queue_status->rx_bitmap_223_192,
859 queue_status->rx_bitmap_255_224);
860
861 DP_TRACE_STATS(FATAL,
Karunakar Dasineni93f633c2017-06-02 19:04:46 -0700862 "curr_mpdu_cnt : %d\n"
863 "curr_msdu_cnt : %d\n"
864 "fwd_timeout_cnt : %d\n"
865 "fwd_bar_cnt : %d\n"
866 "dup_cnt : %d\n"
867 "frms_in_order_cnt : %d\n"
868 "bar_rcvd_cnt : %d\n"
869 "mpdu_frms_cnt : %d\n"
870 "msdu_frms_cnt : %d\n"
871 "total_byte_cnt : %d\n"
872 "late_recv_mpdu_cnt : %d\n"
873 "win_jump_2k : %d\n"
874 "hole_cnt : %d\n",
Karunakar Dasineni93f633c2017-06-02 19:04:46 -0700875 queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
876 queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
877 queue_status->dup_cnt, queue_status->frms_in_order_cnt,
878 queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
879 queue_status->msdu_frms_cnt, queue_status->total_cnt,
880 queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
881 queue_status->hole_cnt);
sumedh baikadye3947bd2017-11-29 19:19:25 -0800882
883 DP_PRINT_STATS("Num of Addba Req = %d\n", rx_tid->num_of_addba_req);
884 DP_PRINT_STATS("Num of Addba Resp = %d\n", rx_tid->num_of_addba_resp);
Sumedh Baikady1c61e062018-02-12 22:25:47 -0800885 DP_PRINT_STATS("Num of Addba Resp successful = %d\n",
886 rx_tid->num_addba_rsp_success);
887 DP_PRINT_STATS("Num of Addba Resp failed = %d\n",
888 rx_tid->num_addba_rsp_failed);
sumedh baikadye3947bd2017-11-29 19:19:25 -0800889 DP_PRINT_STATS("Num of Delba Req = %d\n", rx_tid->num_of_delba_req);
890 DP_PRINT_STATS("BA window size = %d\n", rx_tid->ba_win_size);
891 DP_PRINT_STATS("Pn size = %d\n", rx_tid->pn_size);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -0700892}
893
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530894static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530895 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
896 uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700897{
898 struct dp_peer *peer;
899
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +0530900 QDF_ASSERT(peer_id <= soc->max_peers);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700901 /* check if there's already a peer object with this MAC address */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700902 peer = dp_peer_find_hash_find(soc, peer_mac_addr,
903 0 /* is aligned */, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700904 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530905 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700906 __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
907 peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
908 peer_mac_addr[4], peer_mac_addr[5]);
909
910 if (peer) {
911 /* peer's ref count was already incremented by
912 * peer_find_hash_find
913 */
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -0800914 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
915 "%s: ref_cnt: %d", __func__,
916 qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700917 soc->peer_id_to_obj_map[peer_id] = peer;
918
919 if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
920 /* TBDXXX: assert for now */
921 QDF_ASSERT(0);
922 }
923
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530924 return peer;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530925 }
926
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530927 return NULL;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530928}
929
930/**
931 * dp_rx_peer_map_handler() - handle peer map event from firmware
932 * @soc_handle - genereic soc handle
933 * @peeri_id - peer_id from firmware
934 * @hw_peer_id - ast index for this peer
935 * vdev_id - vdev ID
936 * peer_mac_addr - macc assress of the peer
937 *
938 * associate the peer_id that firmware provided with peer entry
939 * and update the ast table in the host with the hw_peer_id.
940 *
941 * Return: none
942 */
943
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700944void
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530945dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
946 uint8_t vdev_id, uint8_t *peer_mac_addr)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700947{
948 struct dp_soc *soc = (struct dp_soc *)soc_handle;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530949 struct dp_peer *peer = NULL;
950
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700951 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -0700952 "peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
Aditya Sathishded018e2018-07-02 16:25:21 +0530953 "%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id,
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530954 hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
955 peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
956 peer_mac_addr[5], vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700957
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530958 peer = soc->peer_id_to_obj_map[peer_id];
959
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530960 if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530961 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
962 "invalid hw_peer_id: %d", hw_peer_id);
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530963 qdf_assert_always(0);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530964 }
965
966 /*
967 * check if peer already exists for this peer_id, if so
968 * this peer map event is in response for a wds peer add
969 * wmi command sent during wds source port learning.
970 * in this case just add the ast entry to the existing
971 * peer ast_list.
972 */
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530973 if (!peer)
974 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
975 hw_peer_id, vdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +0530976
sumedh baikady68450ab2018-03-23 18:36:29 -0700977 if (peer) {
978 qdf_assert_always(peer->vdev);
979 /*
980 * For every peer MAp message search and set if bss_peer
981 */
982 if (!(qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw,
983 DP_MAC_ADDR_LEN))) {
984 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Anish Nataraj0dae6762018-03-02 22:31:45 +0530985 "vdev bss_peer!!!!");
sumedh baikady68450ab2018-03-23 18:36:29 -0700986 peer->bss_peer = 1;
987 peer->vdev->vap_bss_peer = peer;
988 }
Anish Nataraj0dae6762018-03-02 22:31:45 +0530989 }
990
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530991 dp_peer_map_ast(soc, peer, peer_mac_addr,
992 hw_peer_id, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700993}
994
995void
996dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
997{
998 struct dp_peer *peer;
999 struct dp_soc *soc = (struct dp_soc *)soc_handle;
1000 uint8_t i;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301001
1002 peer = __dp_peer_find_by_id(soc, peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001003
1004 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05301005 "peer_unmap_event (soc:%pK) peer_id %d peer %pK",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001006 soc, peer_id, peer);
1007
1008 /*
1009 * Currently peer IDs are assigned for vdevs as well as peers.
1010 * If the peer ID is for a vdev, then the peer pointer stored
1011 * in peer_id_to_obj_map will be NULL.
1012 */
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301013 if (!peer) {
1014 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1015 "%s: Received unmap event for invalid peer_id"
Aditya Sathishded018e2018-07-02 16:25:21 +05301016 " %u", __func__, peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001017 return;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301018 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001019
1020 soc->peer_id_to_obj_map[peer_id] = NULL;
1021 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1022 if (peer->peer_ids[i] == peer_id) {
1023 peer->peer_ids[i] = HTT_INVALID_PEER;
1024 break;
1025 }
1026 }
1027
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301028 if (soc->cdp_soc.ol_ops->peer_unmap_event) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05301029 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301030 peer_id);
1031 }
1032
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001033 /*
1034 * Remove a reference to the peer.
1035 * If there are no more references, delete the peer object.
1036 */
1037 dp_peer_unref_delete(peer);
1038}
1039
1040void
1041dp_peer_find_detach(struct dp_soc *soc)
1042{
1043 dp_peer_find_map_detach(soc);
1044 dp_peer_find_hash_detach(soc);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301045 dp_peer_ast_hash_detach(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001046}
1047
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001048static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1049 union hal_reo_status *reo_status)
1050{
1051 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001052
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001053 if ((reo_status->rx_queue_status.header.status !=
1054 HAL_REO_CMD_SUCCESS) &&
1055 (reo_status->rx_queue_status.header.status !=
1056 HAL_REO_CMD_DRAIN)) {
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001057 /* Should not happen normally. Just print error for now */
1058 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301059 "%s: Rx tid HW desc update failed(%d): tid %d",
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001060 __func__,
1061 reo_status->rx_queue_status.header.status,
1062 rx_tid->tid);
1063 }
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001064}
1065
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001066/*
Leo Chang5ea93a42016-11-03 12:39:49 -07001067 * dp_find_peer_by_addr - find peer instance by mac address
1068 * @dev: physical device instance
1069 * @peer_mac_addr: peer mac address
1070 * @local_id: local id for the peer
1071 *
1072 * Return: peer instance pointer
1073 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001074void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
Leo Chang5ea93a42016-11-03 12:39:49 -07001075 uint8_t *local_id)
1076{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001077 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07001078 struct dp_peer *peer;
1079
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05301080 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05301081
Leo Chang5ea93a42016-11-03 12:39:49 -07001082 if (!peer)
1083 return NULL;
1084
1085 /* Multiple peer ids? How can know peer id? */
1086 *local_id = peer->local_id;
Jeff Johnson3f217e22017-09-18 10:13:35 -07001087 DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001088
1089 /* ref_cnt is incremented inside dp_peer_find_hash_find().
1090 * Decrement it here.
1091 */
1092 qdf_atomic_dec(&peer->ref_cnt);
1093
Leo Chang5ea93a42016-11-03 12:39:49 -07001094 return peer;
1095}
1096
1097/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001098 * dp_rx_tid_update_wifi3() – Update receive TID state
1099 * @peer: Datapath peer handle
1100 * @tid: TID
1101 * @ba_window_size: BlockAck window size
1102 * @start_seq: Starting sequence number
1103 *
1104 * Return: 0 on success, error code on failure
1105 */
Jeff Johnson416168b2017-01-06 09:42:43 -08001106static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1107 ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001108{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001109 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1110 struct dp_soc *soc = peer->vdev->pdev->soc;
1111 struct hal_reo_cmd_params params;
1112
1113 qdf_mem_zero(&params, sizeof(params));
1114
1115 params.std.need_status = 1;
1116 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1117 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1118 params.u.upd_queue_params.update_ba_window_size = 1;
1119 params.u.upd_queue_params.ba_window_size = ba_window_size;
1120
1121 if (start_seq < IEEE80211_SEQ_MAX) {
1122 params.u.upd_queue_params.update_ssn = 1;
1123 params.u.upd_queue_params.ssn = start_seq;
1124 }
1125
Pramod Simhab17d0672017-03-06 17:20:13 -08001126 dp_set_ssn_valid_flag(&params, 0);
1127
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001128 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001129
1130 rx_tid->ba_win_size = ba_window_size;
Gyanranjan Hazarika7f9c0502018-07-25 23:26:16 -07001131 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
1132 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1133 peer->vdev->pdev->ctrl_pdev,
1134 peer->vdev->vdev_id, peer->mac_addr.raw,
1135 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
1136
1137 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001138 return 0;
1139}
1140
1141/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001142 * dp_reo_desc_free() - Callback free reo descriptor memory after
1143 * HW cache flush
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001144 *
1145 * @soc: DP SOC handle
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001146 * @cb_ctxt: Callback context
1147 * @reo_status: REO command status
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001148 */
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001149static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1150 union hal_reo_status *reo_status)
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001151{
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001152 struct reo_desc_list_node *freedesc =
1153 (struct reo_desc_list_node *)cb_ctxt;
1154 struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001155
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001156 if ((reo_status->fl_cache_status.header.status !=
1157 HAL_REO_CMD_SUCCESS) &&
1158 (reo_status->fl_cache_status.header.status !=
1159 HAL_REO_CMD_DRAIN)) {
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001160 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301161 "%s: Rx tid HW desc flush failed(%d): tid %d",
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001162 __func__,
1163 reo_status->rx_queue_status.header.status,
1164 freedesc->rx_tid.tid);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001165 }
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001166 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Aditya Sathishded018e2018-07-02 16:25:21 +05301167 "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001168 (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1169 qdf_mem_unmap_nbytes_single(soc->osdev,
1170 rx_tid->hw_qdesc_paddr,
1171 QDF_DMA_BIDIRECTIONAL,
1172 rx_tid->hw_qdesc_alloc_size);
1173 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1174 qdf_mem_free(freedesc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001175}
1176
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001177#if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1178/* Hawkeye emulation requires bus address to be >= 0x50000000 */
1179static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1180{
1181 if (dma_addr < 0x50000000)
1182 return QDF_STATUS_E_FAILURE;
1183 else
1184 return QDF_STATUS_SUCCESS;
1185}
1186#else
1187static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1188{
1189 return QDF_STATUS_SUCCESS;
1190}
1191#endif
1192
1193
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001194/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001195 * dp_rx_tid_setup_wifi3() – Setup receive TID state
1196 * @peer: Datapath peer handle
1197 * @tid: TID
1198 * @ba_window_size: BlockAck window size
1199 * @start_seq: Starting sequence number
1200 *
1201 * Return: 0 on success, error code on failure
1202 */
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001203int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001204 uint32_t ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001205{
1206 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1207 struct dp_vdev *vdev = peer->vdev;
1208 struct dp_soc *soc = vdev->pdev->soc;
1209 uint32_t hw_qdesc_size;
1210 uint32_t hw_qdesc_align;
1211 int hal_pn_type;
1212 void *hw_qdesc_vaddr;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001213 uint32_t alloc_tries = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001214
Karunakar Dasineni372647d2018-01-15 22:27:39 -08001215 if (peer->delete_in_progress)
1216 return QDF_STATUS_E_FAILURE;
1217
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001218 rx_tid->ba_win_size = ba_window_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001219 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1220 return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1221 start_seq);
sumedh baikadye3947bd2017-11-29 19:19:25 -08001222 rx_tid->num_of_addba_req = 0;
1223 rx_tid->num_of_delba_req = 0;
1224 rx_tid->num_of_addba_resp = 0;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001225 rx_tid->num_addba_rsp_failed = 0;
1226 rx_tid->num_addba_rsp_success = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001227#ifdef notyet
1228 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
1229#else
1230 /* TODO: Allocating HW queue descriptors based on max BA window size
1231 * for all QOS TIDs so that same descriptor can be used later when
1232 * ADDBA request is recevied. This should be changed to allocate HW
1233 * queue descriptors based on BA window size being negotiated (0 for
1234 * non BA cases), and reallocate when BA window size changes and also
1235 * send WMI message to FW to change the REO queue descriptor in Rx
1236 * peer entry as part of dp_rx_tid_update.
1237 */
1238 if (tid != DP_NON_QOS_TID)
1239 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1240 HAL_RX_MAX_BA_WINDOW);
1241 else
1242 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1243 ba_window_size);
1244#endif
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001245
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001246 hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1247 /* To avoid unnecessary extra allocation for alignment, try allocating
1248 * exact size and see if we already have aligned address.
1249 */
1250 rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001251
1252try_desc_alloc:
1253 rx_tid->hw_qdesc_vaddr_unaligned =
1254 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001255
1256 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1257 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301258 "%s: Rx tid HW desc alloc failed: tid %d",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001259 __func__, tid);
1260 return QDF_STATUS_E_NOMEM;
1261 }
1262
1263 if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1264 hw_qdesc_align) {
1265 /* Address allocated above is not alinged. Allocate extra
1266 * memory for alignment
1267 */
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001268 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001269 rx_tid->hw_qdesc_vaddr_unaligned =
Pramod Simha6b23f752017-03-30 11:54:18 -07001270 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1271 hw_qdesc_align - 1);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001272
1273 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1274 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301275 "%s: Rx tid HW desc alloc failed: tid %d",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001276 __func__, tid);
1277 return QDF_STATUS_E_NOMEM;
1278 }
1279
Pramod Simha6b23f752017-03-30 11:54:18 -07001280 hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1281 rx_tid->hw_qdesc_vaddr_unaligned,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001282 hw_qdesc_align);
Pramod Simha6b23f752017-03-30 11:54:18 -07001283
1284 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301285 "%s: Total Size %d Aligned Addr %pK",
Pramod Simha6b23f752017-03-30 11:54:18 -07001286 __func__, rx_tid->hw_qdesc_alloc_size,
1287 hw_qdesc_vaddr);
1288
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001289 } else {
1290 hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001291 }
1292
1293 /* TODO: Ensure that sec_type is set before ADDBA is received.
1294 * Currently this is set based on htt indication
1295 * HTT_T2H_MSG_TYPE_SEC_IND from target
1296 */
1297 switch (peer->security[dp_sec_ucast].sec_type) {
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301298 case cdp_sec_type_tkip_nomic:
1299 case cdp_sec_type_aes_ccmp:
1300 case cdp_sec_type_aes_ccmp_256:
1301 case cdp_sec_type_aes_gcmp:
1302 case cdp_sec_type_aes_gcmp_256:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001303 hal_pn_type = HAL_PN_WPA;
1304 break;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301305 case cdp_sec_type_wapi:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001306 if (vdev->opmode == wlan_op_mode_ap)
1307 hal_pn_type = HAL_PN_WAPI_EVEN;
1308 else
1309 hal_pn_type = HAL_PN_WAPI_UNEVEN;
1310 break;
1311 default:
1312 hal_pn_type = HAL_PN_NONE;
1313 break;
1314 }
1315
1316 hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1317 hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1318
Pramod Simha6b23f752017-03-30 11:54:18 -07001319 qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001320 QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
Pramod Simha6b23f752017-03-30 11:54:18 -07001321 &(rx_tid->hw_qdesc_paddr));
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001322
Pramod Simha6b23f752017-03-30 11:54:18 -07001323 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001324 QDF_STATUS_SUCCESS) {
1325 if (alloc_tries++ < 10)
1326 goto try_desc_alloc;
1327 else {
1328 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301329 "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001330 __func__, tid);
1331 return QDF_STATUS_E_NOMEM;
1332 }
1333 }
1334
Leo Chang5ea93a42016-11-03 12:39:49 -07001335 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
Manoj Ekbote1f2c0b52017-02-11 23:24:43 -08001336 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05301337 vdev->pdev->ctrl_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001338 peer->vdev->vdev_id, peer->mac_addr.raw,
Gyanranjan Hazarika7f9c0502018-07-25 23:26:16 -07001339 rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
Dhanashri Atre8abb9ee2016-11-22 17:41:02 -08001340
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001341 }
1342 return 0;
1343}
1344
1345/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001346 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1347 * after deleting the entries (ie., setting valid=0)
1348 *
1349 * @soc: DP SOC handle
1350 * @cb_ctxt: Callback context
1351 * @reo_status: REO command status
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001352 */
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001353static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1354 union hal_reo_status *reo_status)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001355{
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001356 struct reo_desc_list_node *freedesc =
1357 (struct reo_desc_list_node *)cb_ctxt;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001358 uint32_t list_size;
1359 struct reo_desc_list_node *desc;
1360 unsigned long curr_ts = qdf_get_system_timestamp();
1361 uint32_t desc_size, tot_desc_size;
1362 struct hal_reo_cmd_params params;
1363
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001364 if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1365 qdf_mem_zero(reo_status, sizeof(*reo_status));
1366 reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1367 dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1368 return;
1369 } else if (reo_status->rx_queue_status.header.status !=
1370 HAL_REO_CMD_SUCCESS) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001371 /* Should not happen normally. Just print error for now */
1372 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301373 "%s: Rx tid HW desc deletion failed(%d): tid %d",
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001374 __func__,
1375 reo_status->rx_queue_status.header.status,
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001376 freedesc->rx_tid.tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001377 }
1378
Houston Hoffman41b912c2017-08-30 14:27:51 -07001379 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Aditya Sathishded018e2018-07-02 16:25:21 +05301380 "%s: rx_tid: %d status: %d", __func__,
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001381 freedesc->rx_tid.tid,
1382 reo_status->rx_queue_status.header.status);
Krishna Kumaar Natarajan1741dc42017-01-26 19:24:48 -08001383
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001384 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1385 freedesc->free_ts = curr_ts;
1386 qdf_list_insert_back_size(&soc->reo_desc_freelist,
1387 (qdf_list_node_t *)freedesc, &list_size);
1388
1389 while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1390 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1391 ((list_size >= REO_DESC_FREELIST_SIZE) ||
1392 ((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1393 struct dp_rx_tid *rx_tid;
1394
1395 qdf_list_remove_front(&soc->reo_desc_freelist,
1396 (qdf_list_node_t **)&desc);
1397 list_size--;
1398 rx_tid = &desc->rx_tid;
1399
1400 /* Flush and invalidate REO descriptor from HW cache: Base and
1401 * extension descriptors should be flushed separately */
1402 tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1403 rx_tid->ba_win_size);
1404 desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0);
1405
1406 /* Flush reo extension descriptors */
1407 while ((tot_desc_size -= desc_size) > 0) {
1408 qdf_mem_zero(&params, sizeof(params));
1409 params.std.addr_lo =
1410 ((uint64_t)(rx_tid->hw_qdesc_paddr) +
1411 tot_desc_size) & 0xffffffff;
1412 params.std.addr_hi =
1413 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1414
1415 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1416 CMD_FLUSH_CACHE,
1417 &params,
1418 NULL,
1419 NULL)) {
1420 QDF_TRACE(QDF_MODULE_ID_DP,
1421 QDF_TRACE_LEVEL_ERROR,
1422 "%s: fail to send CMD_CACHE_FLUSH:"
Aditya Sathishded018e2018-07-02 16:25:21 +05301423 "tid %d desc %pK", __func__,
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001424 rx_tid->tid,
1425 (void *)(rx_tid->hw_qdesc_paddr));
1426 }
1427 }
1428
1429 /* Flush base descriptor */
1430 qdf_mem_zero(&params, sizeof(params));
1431 params.std.need_status = 1;
1432 params.std.addr_lo =
1433 (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1434 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1435
1436 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1437 CMD_FLUSH_CACHE,
1438 &params,
1439 dp_reo_desc_free,
1440 (void *)desc)) {
1441 union hal_reo_status reo_status;
1442 /*
1443 * If dp_reo_send_cmd return failure, related TID queue desc
1444 * should be unmapped. Also locally reo_desc, together with
1445 * TID queue desc also need to be freed accordingly.
1446 *
1447 * Here invoke desc_free function directly to do clean up.
1448 */
1449 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301450 "%s: fail to send REO cmd to flush cache: tid %d",
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001451 __func__, rx_tid->tid);
1452 qdf_mem_zero(&reo_status, sizeof(reo_status));
1453 reo_status.fl_cache_status.header.status = 0;
1454 dp_reo_desc_free(soc, (void *)desc, &reo_status);
1455 }
1456 }
1457 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001458}
1459
1460/*
1461 * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1462 * @peer: Datapath peer handle
1463 * @tid: TID
1464 *
1465 * Return: 0 on success, error code on failure
1466 */
Jeff Johnson416168b2017-01-06 09:42:43 -08001467static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001468{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001469 struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1470 struct dp_soc *soc = peer->vdev->pdev->soc;
1471 struct hal_reo_cmd_params params;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001472 struct reo_desc_list_node *freedesc =
1473 qdf_mem_malloc(sizeof(*freedesc));
Lin Baifca76402017-12-11 15:03:49 +08001474
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001475 if (!freedesc) {
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08001476 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301477 "%s: malloc failed for freedesc: tid %d",
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001478 __func__, tid);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08001479 return -ENOMEM;
1480 }
1481
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001482 freedesc->rx_tid = *rx_tid;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001483
1484 qdf_mem_zero(&params, sizeof(params));
1485
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001486 params.std.need_status = 0;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001487 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1488 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1489 params.u.upd_queue_params.update_vld = 1;
1490 params.u.upd_queue_params.vld = 0;
1491
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001492 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1493 dp_rx_tid_delete_cb, (void *)freedesc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001494
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08001495 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1496 rx_tid->hw_qdesc_alloc_size = 0;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001497 rx_tid->hw_qdesc_paddr = 0;
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08001498
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001499 return 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001500}
1501
Pramod Simhab17d0672017-03-06 17:20:13 -08001502#ifdef DP_LFR
1503static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1504{
1505 int tid;
1506
1507 for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1508 dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1509 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05301510 "Setting up TID %d for peer %pK peer->local_id %d",
Pramod Simhab17d0672017-03-06 17:20:13 -08001511 tid, peer, peer->local_id);
1512 }
1513}
1514#else
1515static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1516#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001517/*
1518 * dp_peer_rx_init() – Initialize receive TID state
1519 * @pdev: Datapath pdev
1520 * @peer: Datapath peer
1521 *
1522 */
1523void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1524{
1525 int tid;
1526 struct dp_rx_tid *rx_tid;
1527 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1528 rx_tid = &peer->rx_tid[tid];
1529 rx_tid->array = &rx_tid->base;
1530 rx_tid->base.head = rx_tid->base.tail = NULL;
1531 rx_tid->tid = tid;
1532 rx_tid->defrag_timeout_ms = 0;
1533 rx_tid->ba_win_size = 0;
1534 rx_tid->ba_status = DP_RX_BA_INACTIVE;
1535
1536 rx_tid->defrag_waitlist_elem.tqe_next = NULL;
1537 rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
1538
1539#ifdef notyet /* TODO: See if this is required for exception handling */
1540 /* invalid sequence number */
1541 peer->tids_last_seq[tid] = 0xffff;
1542#endif
1543 }
1544
1545 /* Setup default (non-qos) rx tid queue */
1546 dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001547
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001548 /* Setup rx tid queue for TID 0.
1549 * Other queues will be setup on receiving first packet, which will cause
1550 * NULL REO queue error
1551 */
1552 dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1553
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001554 /*
Pramod Simhab17d0672017-03-06 17:20:13 -08001555 * Setup the rest of TID's to handle LFR
1556 */
1557 dp_peer_setup_remaining_tids(peer);
1558
1559 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001560 * Set security defaults: no PN check, no security. The target may
1561 * send a HTT SEC_IND message to overwrite these defaults.
1562 */
1563 peer->security[dp_sec_ucast].sec_type =
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301564 peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001565}
1566
1567/*
1568 * dp_peer_rx_cleanup() – Cleanup receive TID state
1569 * @vdev: Datapath vdev
1570 * @peer: Datapath peer
1571 *
1572 */
1573void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1574{
1575 int tid;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001576 uint32_t tid_delete_mask = 0;
1577 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001578 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1579
1580 qdf_spin_lock_bh(&rx_tid->tid_lock);
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001581 if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001582 dp_rx_tid_delete_wifi3(peer, tid);
Lin Baif1c577e2018-05-22 20:45:42 +08001583
1584 /* Cleanup defrag related resource */
1585 dp_rx_defrag_waitlist_remove(peer, tid);
1586 dp_rx_reorder_flush_frag(peer, tid);
1587
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001588 tid_delete_mask |= (1 << tid);
1589 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001590 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001591 }
1592#ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1593 if (soc->ol_ops->peer_rx_reorder_queue_remove) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05301594 soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001595 peer->vdev->vdev_id, peer->mac_addr.raw,
1596 tid_delete_mask);
1597 }
1598#endif
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001599 for (tid = 0; tid < DP_MAX_TIDS; tid++)
1600 qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001601}
1602
1603/*
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08001604 * dp_peer_cleanup() – Cleanup peer information
1605 * @vdev: Datapath vdev
1606 * @peer: Datapath peer
1607 *
1608 */
1609void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1610{
1611 peer->last_assoc_rcvd = 0;
1612 peer->last_disassoc_rcvd = 0;
1613 peer->last_deauth_rcvd = 0;
1614
1615 /* cleanup the Rx reorder queues for this peer */
1616 dp_peer_rx_cleanup(vdev, peer);
1617}
1618
1619/*
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001620* dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001621*
1622* @peer: Datapath peer handle
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001623* @tid: TID number
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001624* @status: tx completion status
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001625* Return: 0 on success, error code on failure
1626*/
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001627int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
1628 uint8_t tid, int status)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001629{
1630 struct dp_peer *peer = (struct dp_peer *)peer_handle;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001631 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1632
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001633 qdf_spin_lock_bh(&rx_tid->tid_lock);
1634 if (status) {
1635 rx_tid->num_addba_rsp_failed++;
1636 dp_rx_tid_update_wifi3(peer, tid, 1, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001637 rx_tid->ba_status = DP_RX_BA_INACTIVE;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001638 qdf_spin_unlock_bh(&rx_tid->tid_lock);
1639 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1640 "%s: Rx Tid- %d addba rsp tx completion failed!",
1641 __func__, tid);
1642 return 0;
1643 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001644
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001645 rx_tid->num_addba_rsp_success++;
1646 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1647 qdf_spin_unlock_bh(&rx_tid->tid_lock);
1648 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1649 "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
1650 __func__, tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001651 return QDF_STATUS_E_FAILURE;
1652 }
1653
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001654 if (dp_rx_tid_update_wifi3(peer, tid, rx_tid->ba_win_size,
1655 rx_tid->startseqnum)) {
1656 qdf_spin_unlock_bh(&rx_tid->tid_lock);
1657 return QDF_STATUS_E_FAILURE;
1658 }
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08001659 if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1660 rx_tid->statuscode = rx_tid->userstatuscode;
1661 else
1662 rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1663
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001664 rx_tid->ba_status = DP_RX_BA_ACTIVE;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001665 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001666 return 0;
1667}
1668
1669/*
1670* dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
1671*
1672* @peer: Datapath peer handle
1673* @tid: TID number
1674* @dialogtoken: output dialogtoken
1675* @statuscode: output dialogtoken
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07001676* @buffersize: Output BA window size
1677* @batimeout: Output BA timeout
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001678*/
1679void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
1680 uint8_t *dialogtoken, uint16_t *statuscode,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001681 uint16_t *buffersize, uint16_t *batimeout)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001682{
1683 struct dp_peer *peer = (struct dp_peer *)peer_handle;
1684 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1685
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001686 qdf_spin_lock_bh(&rx_tid->tid_lock);
sumedh baikadye3947bd2017-11-29 19:19:25 -08001687 rx_tid->num_of_addba_resp++;
Jeff Johnson97a1cc52018-05-06 15:28:56 -07001688 /* setup ADDBA response parameters */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001689 *dialogtoken = rx_tid->dialogtoken;
1690 *statuscode = rx_tid->statuscode;
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001691 *buffersize = rx_tid->ba_win_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001692 *batimeout = 0;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001693 qdf_spin_unlock_bh(&rx_tid->tid_lock);
1694}
1695
1696/*
1697 * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
1698 *
1699 * @peer: Datapath peer handle
1700 * @dialogtoken: dialogtoken from ADDBA frame
1701 * @tid: TID number
1702 * @batimeout: BA timeout
1703 * @buffersize: BA window size
1704 * @startseqnum: Start seq. number received in BA sequence control
1705 *
1706 * Return: 0 on success, error code on failure
1707 */
1708int dp_addba_requestprocess_wifi3(void *peer_handle,
1709 uint8_t dialogtoken,
1710 uint16_t tid, uint16_t batimeout,
1711 uint16_t buffersize,
1712 uint16_t startseqnum)
1713{
1714 struct dp_peer *peer = (struct dp_peer *)peer_handle;
1715 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1716
1717 qdf_spin_lock_bh(&rx_tid->tid_lock);
1718 rx_tid->num_of_addba_req++;
1719 if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
1720 rx_tid->hw_qdesc_vaddr_unaligned != NULL) ||
1721 (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) {
1722 dp_rx_tid_update_wifi3(peer, tid, 1, 0);
1723 rx_tid->ba_status = DP_RX_BA_INACTIVE;
1724 qdf_spin_unlock_bh(&rx_tid->tid_lock);
1725 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1726 "%s: Rx Tid- %d hw qdesc is already setup",
1727 __func__, tid);
1728 return QDF_STATUS_E_FAILURE;
1729 }
1730
1731 if (dp_rx_tid_setup_wifi3(peer, tid, 1, 0)) {
1732 rx_tid->ba_status = DP_RX_BA_INACTIVE;
1733 qdf_spin_unlock_bh(&rx_tid->tid_lock);
1734 return QDF_STATUS_E_FAILURE;
1735 }
1736 rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
1737
1738 rx_tid->ba_win_size = buffersize;
1739 rx_tid->dialogtoken = dialogtoken;
1740 rx_tid->startseqnum = startseqnum;
1741 qdf_spin_unlock_bh(&rx_tid->tid_lock);
1742 return 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001743}
1744
1745/*
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08001746* dp_set_addba_response() – Set a user defined ADDBA response status code
1747*
1748* @peer: Datapath peer handle
1749* @tid: TID number
1750* @statuscode: response status code to be set
1751*/
1752void dp_set_addba_response(void *peer_handle, uint8_t tid,
1753 uint16_t statuscode)
1754{
1755 struct dp_peer *peer = (struct dp_peer *)peer_handle;
1756 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1757
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001758 qdf_spin_lock_bh(&rx_tid->tid_lock);
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08001759 rx_tid->userstatuscode = statuscode;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001760 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08001761}
1762
1763/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001764* dp_rx_delba_process_wifi3() – Process DELBA from peer
1765* @peer: Datapath peer handle
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001766* @tid: TID number
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001767* @reasoncode: Reason code received in DELBA frame
1768*
1769* Return: 0 on success, error code on failure
1770*/
1771int dp_delba_process_wifi3(void *peer_handle,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001772 int tid, uint16_t reasoncode)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001773{
1774 struct dp_peer *peer = (struct dp_peer *)peer_handle;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001775 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1776
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001777 qdf_spin_lock_bh(&rx_tid->tid_lock);
1778 if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1779 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001780 return QDF_STATUS_E_FAILURE;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001781 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001782 /* TODO: See if we can delete the existing REO queue descriptor and
1783 * replace with a new one without queue extenstion descript to save
1784 * memory
1785 */
sumedh baikadye3947bd2017-11-29 19:19:25 -08001786 rx_tid->num_of_delba_req++;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001787 dp_rx_tid_update_wifi3(peer, tid, 1, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001788
1789 rx_tid->ba_status = DP_RX_BA_INACTIVE;
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001790 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001791 return 0;
1792}
1793
1794void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
1795 qdf_nbuf_t msdu_list)
1796{
1797 while (msdu_list) {
1798 qdf_nbuf_t msdu = msdu_list;
1799
1800 msdu_list = qdf_nbuf_next(msdu_list);
1801 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001802 "discard rx %pK from partly-deleted peer %pK "
Aditya Sathishded018e2018-07-02 16:25:21 +05301803 "(%02x:%02x:%02x:%02x:%02x:%02x)",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001804 msdu, peer,
1805 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1806 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1807 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
1808 qdf_nbuf_free(msdu);
1809 }
1810}
1811
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301812
1813/**
1814 * dp_set_pn_check_wifi3() - enable PN check in REO for security
1815 * @peer: Datapath peer handle
1816 * @vdev: Datapath vdev
1817 * @pdev - data path device instance
1818 * @sec_type - security type
1819 * @rx_pn - Receive pn starting number
1820 *
1821 */
1822
1823void
1824dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
1825{
1826 struct dp_peer *peer = (struct dp_peer *)peer_handle;
1827 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
1828 struct dp_pdev *pdev;
1829 struct dp_soc *soc;
1830 int i;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001831 uint8_t pn_size;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301832 struct hal_reo_cmd_params params;
1833
1834 /* preconditions */
1835 qdf_assert(vdev);
1836
1837 pdev = vdev->pdev;
1838 soc = pdev->soc;
1839
1840
1841 qdf_mem_zero(&params, sizeof(params));
1842
1843 params.std.need_status = 1;
1844 params.u.upd_queue_params.update_pn_valid = 1;
1845 params.u.upd_queue_params.update_pn_size = 1;
1846 params.u.upd_queue_params.update_pn = 1;
1847 params.u.upd_queue_params.update_pn_check_needed = 1;
Gurumoorthi Gnanasambandhand733cd72018-06-12 17:05:52 +05301848 params.u.upd_queue_params.update_svld = 1;
1849 params.u.upd_queue_params.svld = 0;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301850
1851 peer->security[dp_sec_ucast].sec_type = sec_type;
1852
1853 switch (sec_type) {
1854 case cdp_sec_type_tkip_nomic:
1855 case cdp_sec_type_aes_ccmp:
1856 case cdp_sec_type_aes_ccmp_256:
1857 case cdp_sec_type_aes_gcmp:
1858 case cdp_sec_type_aes_gcmp_256:
1859 params.u.upd_queue_params.pn_check_needed = 1;
1860 params.u.upd_queue_params.pn_size = 48;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001861 pn_size = 48;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301862 break;
1863 case cdp_sec_type_wapi:
1864 params.u.upd_queue_params.pn_check_needed = 1;
1865 params.u.upd_queue_params.pn_size = 128;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001866 pn_size = 128;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301867 if (vdev->opmode == wlan_op_mode_ap) {
1868 params.u.upd_queue_params.pn_even = 1;
1869 params.u.upd_queue_params.update_pn_even = 1;
1870 } else {
1871 params.u.upd_queue_params.pn_uneven = 1;
1872 params.u.upd_queue_params.update_pn_uneven = 1;
1873 }
1874 break;
1875 default:
1876 params.u.upd_queue_params.pn_check_needed = 0;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001877 pn_size = 0;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301878 break;
1879 }
1880
1881
1882 for (i = 0; i < DP_MAX_TIDS; i++) {
1883 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001884 qdf_spin_lock_bh(&rx_tid->tid_lock);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301885 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
1886 params.std.addr_lo =
1887 rx_tid->hw_qdesc_paddr & 0xffffffff;
1888 params.std.addr_hi =
1889 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1890
1891 if (sec_type != cdp_sec_type_wapi) {
1892 params.u.upd_queue_params.update_pn_valid = 0;
1893 } else {
1894 /*
1895 * Setting PN valid bit for WAPI sec_type,
1896 * since WAPI PN has to be started with
1897 * predefined value
1898 */
1899 params.u.upd_queue_params.update_pn_valid = 1;
1900 params.u.upd_queue_params.pn_31_0 = rx_pn[0];
1901 params.u.upd_queue_params.pn_63_32 = rx_pn[1];
1902 params.u.upd_queue_params.pn_95_64 = rx_pn[2];
1903 params.u.upd_queue_params.pn_127_96 = rx_pn[3];
1904 }
sumedh baikadye3947bd2017-11-29 19:19:25 -08001905 rx_tid->pn_size = pn_size;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301906 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1907 dp_rx_tid_update_cb, rx_tid);
1908 } else {
1909 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05301910 "PN Check not setup for TID :%d ", i);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301911 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08001912 qdf_spin_unlock_bh(&rx_tid->tid_lock);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301913 }
1914}
1915
1916
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001917void
1918dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
1919 enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
1920 u_int32_t *rx_pn)
1921{
1922 struct dp_soc *soc = (struct dp_soc *)soc_handle;
1923 struct dp_peer *peer;
1924 int sec_index;
1925
1926 peer = dp_peer_find_by_id(soc, peer_id);
1927 if (!peer) {
1928 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301929 "Couldn't find peer from ID %d - skipping security inits",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001930 peer_id);
1931 return;
1932 }
1933 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001934 "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
Aditya Sathishded018e2018-07-02 16:25:21 +05301935 "%s key of type %d",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001936 peer,
1937 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1938 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1939 peer->mac_addr.raw[4], peer->mac_addr.raw[5],
1940 is_unicast ? "ucast" : "mcast",
1941 sec_type);
1942 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
1943 peer->security[sec_index].sec_type = sec_type;
Leo Chang5ea93a42016-11-03 12:39:49 -07001944#ifdef notyet /* TODO: See if this is required for defrag support */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001945 /* michael key only valid for TKIP, but for simplicity,
1946 * copy it anyway
1947 */
1948 qdf_mem_copy(
1949 &peer->security[sec_index].michael_key[0],
1950 michael_key,
1951 sizeof(peer->security[sec_index].michael_key));
1952#ifdef BIG_ENDIAN_HOST
1953 OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
1954 sizeof(peer->security[sec_index].michael_key));
1955#endif /* BIG_ENDIAN_HOST */
1956#endif
1957
1958#ifdef notyet /* TODO: Check if this is required for wifi3.0 */
1959 if (sec_type != htt_sec_type_wapi) {
1960 qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
1961 } else {
1962 for (i = 0; i < DP_MAX_TIDS; i++) {
1963 /*
1964 * Setting PN valid bit for WAPI sec_type,
1965 * since WAPI PN has to be started with predefined value
1966 */
1967 peer->tids_last_pn_valid[i] = 1;
1968 qdf_mem_copy(
1969 (u_int8_t *) &peer->tids_last_pn[i],
1970 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
1971 peer->tids_last_pn[i].pn128[1] =
1972 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
1973 peer->tids_last_pn[i].pn128[0] =
1974 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
1975 }
1976 }
1977#endif
1978 /* TODO: Update HW TID queue with PN check parameters (pn type for
1979 * all security types and last pn for WAPI) once REO command API
1980 * is available
1981 */
1982}
1983
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05301984#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07001985/**
1986 * dp_register_peer() - Register peer into physical device
1987 * @pdev - data path device instance
1988 * @sta_desc - peer description
1989 *
1990 * Register peer into physical device
1991 *
1992 * Return: QDF_STATUS_SUCCESS registration success
1993 * QDF_STATUS_E_FAULT peer not found
1994 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001995QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07001996 struct ol_txrx_desc_type *sta_desc)
1997{
1998 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001999 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002000
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002001 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
2002 sta_desc->sta_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002003 if (!peer)
2004 return QDF_STATUS_E_FAULT;
2005
2006 qdf_spin_lock_bh(&peer->peer_info_lock);
2007 peer->state = OL_TXRX_PEER_STATE_CONN;
2008 qdf_spin_unlock_bh(&peer->peer_info_lock);
2009
2010 return QDF_STATUS_SUCCESS;
2011}
2012
2013/**
2014 * dp_clear_peer() - remove peer from physical device
2015 * @pdev - data path device instance
2016 * @sta_id - local peer id
2017 *
2018 * remove peer from physical device
2019 *
2020 * Return: QDF_STATUS_SUCCESS registration success
2021 * QDF_STATUS_E_FAULT peer not found
2022 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002023QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07002024{
2025 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002026 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002027
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002028 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002029 if (!peer)
2030 return QDF_STATUS_E_FAULT;
2031
2032 qdf_spin_lock_bh(&peer->peer_info_lock);
2033 peer->state = OL_TXRX_PEER_STATE_DISC;
2034 qdf_spin_unlock_bh(&peer->peer_info_lock);
2035
2036 return QDF_STATUS_SUCCESS;
2037}
2038
2039/**
2040 * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
2041 * @pdev - data path device instance
2042 * @vdev - virtual interface instance
2043 * @peer_addr - peer mac address
2044 * @peer_id - local peer id with target mac address
2045 *
2046 * Find peer by peer mac address within vdev
2047 *
2048 * Return: peer instance void pointer
2049 * NULL cannot find target peer
2050 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002051void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
2052 struct cdp_vdev *vdev_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07002053 uint8_t *peer_addr, uint8_t *local_id)
2054{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002055 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
2056 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002057 struct dp_peer *peer;
2058
Jeff Johnson3f217e22017-09-18 10:13:35 -07002059 DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05302060 peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
Jeff Johnson3f217e22017-09-18 10:13:35 -07002061 DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
Leo Chang5ea93a42016-11-03 12:39:49 -07002062
2063 if (!peer)
2064 return NULL;
2065
Krunal Soni304792a2018-06-28 14:18:30 -07002066 if (peer->vdev != vdev) {
2067 qdf_atomic_dec(&peer->ref_cnt);
Leo Chang5ea93a42016-11-03 12:39:49 -07002068 return NULL;
Krunal Soni304792a2018-06-28 14:18:30 -07002069 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002070
2071 *local_id = peer->local_id;
Yun Park11d46e02017-11-27 10:51:53 -08002072 DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002073
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002074 /* ref_cnt is incremented inside dp_peer_find_hash_find().
2075 * Decrement it here.
2076 */
2077 qdf_atomic_dec(&peer->ref_cnt);
Leo Chang5ea93a42016-11-03 12:39:49 -07002078
2079 return peer;
2080}
2081
2082/**
2083 * dp_local_peer_id() - Find local peer id within peer instance
2084 * @peer - peer instance
2085 *
2086 * Find local peer id within peer instance
2087 *
2088 * Return: local peer id
2089 */
2090uint16_t dp_local_peer_id(void *peer)
2091{
2092 return ((struct dp_peer *)peer)->local_id;
2093}
2094
2095/**
2096 * dp_peer_find_by_local_id() - Find peer by local peer id
2097 * @pdev - data path device instance
2098 * @local_peer_id - local peer id want to find
2099 *
2100 * Find peer by local peer id within physical device
2101 *
2102 * Return: peer instance void pointer
2103 * NULL cannot find target peer
2104 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002105void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07002106{
2107 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002108 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002109
Ryan Hsu9d56e3a2018-06-06 16:20:05 -07002110 if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
Mohit Khanna890818b2018-07-23 11:41:08 -07002111 QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
2112 "Incorrect local id %u", local_id);
Ryan Hsu9d56e3a2018-06-06 16:20:05 -07002113 return NULL;
2114 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002115 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2116 peer = pdev->local_peer_ids.map[local_id];
2117 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Yun Park11d46e02017-11-27 10:51:53 -08002118 DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002119 return peer;
2120}
2121
2122/**
2123 * dp_peer_state_update() - update peer local state
2124 * @pdev - data path device instance
2125 * @peer_addr - peer mac address
2126 * @state - new peer local state
2127 *
2128 * update peer local state
2129 *
2130 * Return: QDF_STATUS_SUCCESS registration success
2131 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002132QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
Leo Chang5ea93a42016-11-03 12:39:49 -07002133 enum ol_txrx_peer_state state)
2134{
2135 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002136 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002137
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05302138 peer = dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
Ankit Gupta6fb389b2017-01-03 12:23:45 -08002139 if (NULL == peer) {
2140 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2141 "Failed to find peer for: [%pM]", peer_mac);
2142 return QDF_STATUS_E_FAILURE;
2143 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002144 peer->state = state;
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002145
Jeff Johnson3f217e22017-09-18 10:13:35 -07002146 DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002147 /* ref_cnt is incremented inside dp_peer_find_hash_find().
2148 * Decrement it here.
2149 */
2150 qdf_atomic_dec(&peer->ref_cnt);
2151
Leo Chang5ea93a42016-11-03 12:39:49 -07002152 return QDF_STATUS_SUCCESS;
2153}
2154
2155/**
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002156 * dp_get_vdevid() - Get virtual interface id which peer registered
Leo Chang5ea93a42016-11-03 12:39:49 -07002157 * @peer - peer instance
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002158 * @vdev_id - virtual interface id which peer registered
Leo Chang5ea93a42016-11-03 12:39:49 -07002159 *
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07002160 * Get virtual interface id which peer registered
Leo Chang5ea93a42016-11-03 12:39:49 -07002161 *
2162 * Return: QDF_STATUS_SUCCESS registration success
2163 */
2164QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2165{
2166 struct dp_peer *peer = peer_handle;
2167
Jeff Johnson3f217e22017-09-18 10:13:35 -07002168 DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
Leo Chang5ea93a42016-11-03 12:39:49 -07002169 peer, peer->vdev, peer->vdev->vdev_id);
2170 *vdev_id = peer->vdev->vdev_id;
2171 return QDF_STATUS_SUCCESS;
2172}
2173
Yun Park601d0d82017-08-28 21:49:31 -07002174struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2175 uint8_t sta_id)
Yun Parkfde6b9e2017-06-26 17:13:11 -07002176{
Yun Park601d0d82017-08-28 21:49:31 -07002177 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Yun Parkfde6b9e2017-06-26 17:13:11 -07002178 struct dp_peer *peer = NULL;
Yun Parkfde6b9e2017-06-26 17:13:11 -07002179
2180 if (sta_id >= WLAN_MAX_STA_COUNT) {
2181 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2182 "Invalid sta id passed");
2183 return NULL;
2184 }
2185
Yun Parkfde6b9e2017-06-26 17:13:11 -07002186 if (!pdev) {
2187 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2188 "PDEV not found for sta_id [%d]", sta_id);
2189 return NULL;
2190 }
2191
2192 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2193 if (!peer) {
2194 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2195 "PEER [%d] not found", sta_id);
2196 return NULL;
2197 }
2198
2199 return (struct cdp_vdev *)peer->vdev;
2200}
2201
Leo Chang5ea93a42016-11-03 12:39:49 -07002202/**
2203 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2204 * @peer - peer instance
2205 *
2206 * Get virtual interface instance which peer belongs
2207 *
2208 * Return: virtual interface instance pointer
2209 * NULL in case cannot find
2210 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002211struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07002212{
2213 struct dp_peer *peer = peer_handle;
2214
Jeff Johnson3f217e22017-09-18 10:13:35 -07002215 DP_TRACE(INFO, "peer %pK vdev %pK", peer, peer->vdev);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002216 return (struct cdp_vdev *)peer->vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07002217}
2218
2219/**
2220 * dp_peer_get_peer_mac_addr() - Get peer mac address
2221 * @peer - peer instance
2222 *
2223 * Get peer mac address
2224 *
2225 * Return: peer mac address pointer
2226 * NULL in case cannot find
2227 */
2228uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2229{
2230 struct dp_peer *peer = peer_handle;
2231 uint8_t *mac;
2232
2233 mac = peer->mac_addr.raw;
Jeff Johnson3f217e22017-09-18 10:13:35 -07002234 DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
Leo Chang5ea93a42016-11-03 12:39:49 -07002235 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2236 return peer->mac_addr.raw;
2237}
2238
2239/**
2240 * dp_get_peer_state() - Get local peer state
2241 * @peer - peer instance
2242 *
2243 * Get local peer state
2244 *
2245 * Return: peer status
2246 */
2247int dp_get_peer_state(void *peer_handle)
2248{
2249 struct dp_peer *peer = peer_handle;
2250
Yun Park11d46e02017-11-27 10:51:53 -08002251 DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
Leo Chang5ea93a42016-11-03 12:39:49 -07002252 return peer->state;
2253}
2254
2255/**
Alok Kumarfcdb1852018-07-05 18:55:48 +05302256 * dp_get_last_mgmt_timestamp() - get timestamp of last mgmt frame
2257 * @pdev: pdev handle
2258 * @ppeer_addr: peer mac addr
2259 * @subtype: management frame type
2260 * @timestamp: last timestamp
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002261 *
Alok Kumarfcdb1852018-07-05 18:55:48 +05302262 * Return: true if timestamp is retrieved for valid peer else false
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002263 */
Alok Kumarfcdb1852018-07-05 18:55:48 +05302264bool dp_get_last_mgmt_timestamp(struct cdp_pdev *ppdev, u8 *peer_addr,
2265 u8 subtype, qdf_time_t *timestamp)
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002266{
Alok Kumarfcdb1852018-07-05 18:55:48 +05302267 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
2268 unsigned int index;
2269 struct dp_peer *peer;
2270 struct dp_soc *soc;
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002271
Alok Kumarfcdb1852018-07-05 18:55:48 +05302272 bool ret = false;
2273 struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
2274
2275 soc = pdev->soc;
2276 qdf_mem_copy(
2277 &local_mac_addr_aligned.raw[0],
2278 peer_addr, DP_MAC_ADDR_LEN);
2279 mac_addr = &local_mac_addr_aligned;
2280
2281 index = dp_peer_find_hash_index(soc, mac_addr);
2282
2283 qdf_spin_lock_bh(&soc->peer_ref_mutex);
2284 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
2285#if ATH_SUPPORT_WRAP
2286 /* ProxySTA may have multiple BSS peer with same MAC address,
2287 * modified find will take care of finding the correct BSS peer.
2288 */
2289 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
2290 (peer->vdev->vdev_id == DP_VDEV_ALL)) {
2291#else
2292 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
2293#endif
2294 /* found it */
2295 switch (subtype) {
2296 case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
2297 *timestamp = peer->last_assoc_rcvd;
2298 ret = true;
2299 break;
2300 case IEEE80211_FC0_SUBTYPE_DISASSOC:
2301 case IEEE80211_FC0_SUBTYPE_DEAUTH:
2302 *timestamp = peer->last_disassoc_rcvd;
2303 ret = true;
2304 break;
2305 default:
2306 break;
2307 }
2308 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2309 return ret;
2310 }
2311 }
2312 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2313 return false; /*failure*/
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002314}
2315
2316/**
Alok Kumarfcdb1852018-07-05 18:55:48 +05302317 * dp_update_last_mgmt_timestamp() - set timestamp of last mgmt frame
2318 * @pdev: pdev handle
2319 * @ppeer_addr: peer mac addr
2320 * @timestamp: time to be set
2321 * @subtype: management frame type
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002322 *
Alok Kumarfcdb1852018-07-05 18:55:48 +05302323 * Return: true if timestamp is updated for valid peer else false
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002324 */
Alok Kumarfcdb1852018-07-05 18:55:48 +05302325
2326bool dp_update_last_mgmt_timestamp(struct cdp_pdev *ppdev, u8 *peer_addr,
2327 qdf_time_t timestamp, u8 subtype)
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002328{
Alok Kumarfcdb1852018-07-05 18:55:48 +05302329 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
2330 unsigned int index;
2331 struct dp_peer *peer;
2332 struct dp_soc *soc;
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002333
Alok Kumarfcdb1852018-07-05 18:55:48 +05302334 bool ret = false;
2335 struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002336
Alok Kumarfcdb1852018-07-05 18:55:48 +05302337 soc = pdev->soc;
2338 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
2339 peer_addr, DP_MAC_ADDR_LEN);
2340 mac_addr = &local_mac_addr_aligned;
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002341
Alok Kumarfcdb1852018-07-05 18:55:48 +05302342 index = dp_peer_find_hash_index(soc, mac_addr);
2343
2344 qdf_spin_lock_bh(&soc->peer_ref_mutex);
2345 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
2346#if ATH_SUPPORT_WRAP
2347 /* ProxySTA may have multiple BSS peer with same MAC address,
2348 * modified find will take care of finding the correct BSS peer.
2349 */
2350 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
2351 (peer->vdev->vdev_id == DP_VDEV_ALL)) {
2352#else
2353 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
2354#endif
2355 /* found it */
2356 switch (subtype) {
2357 case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
2358 peer->last_assoc_rcvd = timestamp;
2359 ret = true;
2360 break;
2361 case IEEE80211_FC0_SUBTYPE_DISASSOC:
2362 case IEEE80211_FC0_SUBTYPE_DEAUTH:
2363 peer->last_disassoc_rcvd = timestamp;
2364 ret = true;
2365 break;
2366 default:
2367 break;
2368 }
2369 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2370 return ret;
2371 }
2372 }
2373 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
2374 return false; /*failure*/
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002375}
2376
2377/**
Leo Chang5ea93a42016-11-03 12:39:49 -07002378 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2379 * @pdev - data path device instance
2380 *
2381 * local peer id pool alloc for physical device
2382 *
2383 * Return: none
2384 */
2385void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2386{
2387 int i;
2388
2389 /* point the freelist to the first ID */
2390 pdev->local_peer_ids.freelist = 0;
2391
2392 /* link each ID to the next one */
2393 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2394 pdev->local_peer_ids.pool[i] = i + 1;
2395 pdev->local_peer_ids.map[i] = NULL;
2396 }
2397
2398 /* link the last ID to itself, to mark the end of the list */
2399 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2400 pdev->local_peer_ids.pool[i] = i;
2401
2402 qdf_spinlock_create(&pdev->local_peer_ids.lock);
2403 DP_TRACE(INFO, "Peer pool init");
2404}
2405
2406/**
2407 * dp_local_peer_id_alloc() - allocate local peer id
2408 * @pdev - data path device instance
2409 * @peer - new peer instance
2410 *
2411 * allocate local peer id
2412 *
2413 * Return: none
2414 */
2415void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2416{
2417 int i;
2418
2419 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2420 i = pdev->local_peer_ids.freelist;
2421 if (pdev->local_peer_ids.pool[i] == i) {
2422 /* the list is empty, except for the list-end marker */
2423 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2424 } else {
2425 /* take the head ID and advance the freelist */
2426 peer->local_id = i;
2427 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2428 pdev->local_peer_ids.map[i] = peer;
2429 }
2430 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Jeff Johnson3f217e22017-09-18 10:13:35 -07002431 DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002432}
2433
2434/**
2435 * dp_local_peer_id_free() - remove local peer id
2436 * @pdev - data path device instance
2437 * @peer - peer instance should be removed
2438 *
2439 * remove local peer id
2440 *
2441 * Return: none
2442 */
2443void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
2444{
2445 int i = peer->local_id;
2446 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
2447 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
2448 return;
2449 }
2450
2451 /* put this ID on the head of the freelist */
2452 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2453 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
2454 pdev->local_peer_ids.freelist = i;
2455 pdev->local_peer_ids.map[i] = NULL;
2456 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2457}
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05302458#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05302459
2460/**
2461 * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
2462 * @soc_handle: DP SOC handle
2463 * @peer_id:peer_id of the peer
2464 *
2465 * return: vdev_id of the vap
2466 */
2467uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
2468 uint16_t peer_id, uint8_t *peer_mac)
2469{
2470 struct dp_soc *soc = (struct dp_soc *)soc_handle;
2471 struct dp_peer *peer;
2472
2473 peer = dp_peer_find_by_id(soc, peer_id);
2474
2475 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07002476 "soc %pK peer_id %d", soc, peer_id);
Ishank Jain1e7401c2017-02-17 15:38:39 +05302477
2478 if (!peer) {
2479 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2480 "peer not found ");
2481 return CDP_INVALID_VDEV_ID;
2482 }
2483
2484 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
2485 return peer->vdev->vdev_id;
2486}
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002487
2488/**
2489 * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
2490 * @peer: DP peer handle
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302491 * @dp_stats_cmd_cb: REO command callback function
2492 * @cb_ctxt: Callback context
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002493 *
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302494 * Return: none
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002495 */
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302496void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
2497 void *cb_ctxt)
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002498{
2499 struct dp_soc *soc = peer->vdev->pdev->soc;
2500 struct hal_reo_cmd_params params;
2501 int i;
2502
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302503 if (!dp_stats_cmd_cb)
2504 return;
2505
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002506 qdf_mem_zero(&params, sizeof(params));
2507 for (i = 0; i < DP_MAX_TIDS; i++) {
2508 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2509 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2510 params.std.need_status = 1;
2511 params.std.addr_lo =
2512 rx_tid->hw_qdesc_paddr & 0xffffffff;
2513 params.std.addr_hi =
2514 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302515
2516 if (cb_ctxt) {
2517 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2518 &params, dp_stats_cmd_cb, cb_ctxt);
2519 } else {
2520 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2521 &params, dp_stats_cmd_cb, rx_tid);
2522 }
Karunakar Dasineni3da08112017-06-15 14:42:39 -07002523
2524 /* Flush REO descriptor from HW cache to update stats
2525 * in descriptor memory. This is to help debugging */
2526 qdf_mem_zero(&params, sizeof(params));
2527 params.std.need_status = 0;
2528 params.std.addr_lo =
2529 rx_tid->hw_qdesc_paddr & 0xffffffff;
2530 params.std.addr_hi =
2531 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002532 params.u.fl_cache_params.flush_no_inval = 1;
Karunakar Dasineni3da08112017-06-15 14:42:39 -07002533 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2534 NULL);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002535 }
2536 }
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002537}
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05302538
Pramod Simha6e10cb22018-06-20 12:05:44 -07002539void dp_set_michael_key(struct cdp_peer *peer_handle,
2540 bool is_unicast, uint32_t *key)
2541{
2542 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2543 uint8_t sec_index = is_unicast ? 1 : 0;
2544
2545 if (!peer) {
2546 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2547 "peer not found ");
2548 return;
2549 }
2550
2551 qdf_mem_copy(&peer->security[sec_index].michael_key[0],
2552 key, IEEE80211_WEP_MICLEN);
2553}