blob: ffce0982913d08a4bdd619377b4cf10f13c78258 [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08002 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
Harilakshmi Deshkumar1ea21092017-05-08 21:16:27 +053016 * PERFORMANCE OF THIS SOFTWARE.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070017 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
21#include "dp_htt.h"
22#include "dp_types.h"
23#include "dp_internal.h"
Jeff Johnson2cb8fc72016-12-17 10:45:08 -080024#include "dp_peer.h"
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070025#include <hal_api.h>
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -080026#include <hal_reo.h>
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080027#ifdef CONFIG_MCL
28#include <cds_ieee80211_common.h>
Yun Parkfde6b9e2017-06-26 17:13:11 -070029#include <cds_api.h>
Venkata Sharath Chandra Manchala8e8d8f12017-01-13 00:00:58 -080030#endif
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080031#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080032#include <wlan_cfg.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070033
Pramod Simhab17d0672017-03-06 17:20:13 -080034#ifdef DP_LFR
35static inline void
36dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
37 uint8_t valid)
38{
39 params->u.upd_queue_params.update_svld = 1;
40 params->u.upd_queue_params.svld = valid;
41 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
42 "%s: Setting SSN valid bit to %d\n",
43 __func__, valid);
44}
45#else
46static inline void
47dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
48 uint8_t valid) {};
49#endif
50
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070051static inline int dp_peer_find_mac_addr_cmp(
52 union dp_align_mac_addr *mac_addr1,
53 union dp_align_mac_addr *mac_addr2)
54{
55 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
56 /*
57 * Intentionally use & rather than &&.
58 * because the operands are binary rather than generic boolean,
59 * the functionality is equivalent.
60 * Using && has the advantage of short-circuited evaluation,
61 * but using & has the advantage of no conditional branching,
62 * which is a more significant benefit.
63 */
64 &
65 (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
66}
67
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070068static int dp_peer_find_map_attach(struct dp_soc *soc)
69{
70 uint32_t max_peers, peer_map_size;
71
72 /* allocate the peer ID -> peer object map */
73 max_peers = wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1;
74 soc->max_peers = max_peers;
75 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
76 "\n<=== cfg max peer id %d ====>\n", max_peers);
77 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
78 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
79 if (!soc->peer_id_to_obj_map) {
80 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
81 "%s: peer map memory allocation failed\n", __func__);
82 return QDF_STATUS_E_NOMEM;
83 }
84
85 /*
86 * The peer_id_to_obj_map doesn't really need to be initialized,
87 * since elements are only used after they have been individually
88 * initialized.
89 * However, it is convenient for debugging to have all elements
90 * that are not in use set to 0.
91 */
92 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070093 return 0; /* success */
94}
95
96static int dp_log2_ceil(unsigned value)
97{
98 unsigned tmp = value;
99 int log2 = -1;
100
101 while (tmp) {
102 log2++;
103 tmp >>= 1;
104 }
105 if (1 << log2 != value)
106 log2++;
107 return log2;
108}
109
110static int dp_peer_find_add_id_to_obj(
111 struct dp_peer *peer,
112 uint16_t peer_id)
113{
114 int i;
115
116 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
117 if (peer->peer_ids[i] == HTT_INVALID_PEER) {
118 peer->peer_ids[i] = peer_id;
119 return 0; /* success */
120 }
121 }
122 return QDF_STATUS_E_FAILURE; /* failure */
123}
124
125#define DP_PEER_HASH_LOAD_MULT 2
126#define DP_PEER_HASH_LOAD_SHIFT 0
127
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530128#define DP_AST_HASH_LOAD_MULT 2
129#define DP_AST_HASH_LOAD_SHIFT 0
130
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700131static int dp_peer_find_hash_attach(struct dp_soc *soc)
132{
133 int i, hash_elems, log2;
134
135 /* allocate the peer MAC address -> peer object hash table */
136 hash_elems = wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1;
137 hash_elems *= DP_PEER_HASH_LOAD_MULT;
138 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
139 log2 = dp_log2_ceil(hash_elems);
140 hash_elems = 1 << log2;
141
142 soc->peer_hash.mask = hash_elems - 1;
143 soc->peer_hash.idx_bits = log2;
144 /* allocate an array of TAILQ peer object lists */
145 soc->peer_hash.bins = qdf_mem_malloc(
146 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
147 if (!soc->peer_hash.bins)
148 return QDF_STATUS_E_NOMEM;
149
150 for (i = 0; i < hash_elems; i++)
151 TAILQ_INIT(&soc->peer_hash.bins[i]);
152
153 return 0;
154}
155
156static void dp_peer_find_hash_detach(struct dp_soc *soc)
157{
158 qdf_mem_free(soc->peer_hash.bins);
159}
160
161static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
162 union dp_align_mac_addr *mac_addr)
163{
164 unsigned index;
165
166 index =
167 mac_addr->align2.bytes_ab ^
168 mac_addr->align2.bytes_cd ^
169 mac_addr->align2.bytes_ef;
170 index ^= index >> soc->peer_hash.idx_bits;
171 index &= soc->peer_hash.mask;
172 return index;
173}
174
175
176void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
177{
178 unsigned index;
179
180 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
181 qdf_spin_lock_bh(&soc->peer_ref_mutex);
182 /*
183 * It is important to add the new peer at the tail of the peer list
184 * with the bin index. Together with having the hash_find function
185 * search from head to tail, this ensures that if two entries with
186 * the same MAC address are stored, the one added first will be
187 * found first.
188 */
189 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
190 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
191}
192
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530193#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530194/*
195 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
196 * @soc: SoC handle
197 *
198 * Return: None
199 */
200static int dp_peer_ast_hash_attach(struct dp_soc *soc)
201{
202 int i, hash_elems, log2;
203
204 hash_elems = ((WLAN_UMAC_PSOC_MAX_PEERS * DP_AST_HASH_LOAD_MULT) >>
205 DP_AST_HASH_LOAD_SHIFT);
206
207 log2 = dp_log2_ceil(hash_elems);
208 hash_elems = 1 << log2;
209
210 soc->ast_hash.mask = hash_elems - 1;
211 soc->ast_hash.idx_bits = log2;
212
213 /* allocate an array of TAILQ peer object lists */
214 soc->ast_hash.bins = qdf_mem_malloc(
215 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
216 dp_ast_entry)));
217
218 if (!soc->ast_hash.bins)
219 return QDF_STATUS_E_NOMEM;
220
221 for (i = 0; i < hash_elems; i++)
222 TAILQ_INIT(&soc->ast_hash.bins[i]);
223
224 return 0;
225}
226
227/*
228 * dp_peer_ast_hash_detach() - Free AST Hash table
229 * @soc: SoC handle
230 *
231 * Return: None
232 */
233static void dp_peer_ast_hash_detach(struct dp_soc *soc)
234{
235 qdf_mem_free(soc->ast_hash.bins);
236}
237
238/*
239 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
240 * @soc: SoC handle
241 *
242 * Return: AST hash
243 */
244static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
245 union dp_align_mac_addr *mac_addr)
246{
247 uint32_t index;
248
249 index =
250 mac_addr->align2.bytes_ab ^
251 mac_addr->align2.bytes_cd ^
252 mac_addr->align2.bytes_ef;
253 index ^= index >> soc->ast_hash.idx_bits;
254 index &= soc->ast_hash.mask;
255 return index;
256}
257
258/*
259 * dp_peer_ast_hash_add() - Add AST entry into hash table
260 * @soc: SoC handle
261 *
262 * This function adds the AST entry into SoC AST hash table
263 * It assumes caller has taken the ast lock to protect the access to this table
264 *
265 * Return: None
266 */
267static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
268 struct dp_ast_entry *ase)
269{
270 uint32_t index;
271
272 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
273 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
274}
275
276/*
277 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
278 * @soc: SoC handle
279 *
280 * This function removes the AST entry from soc AST hash table
281 * It assumes caller has taken the ast lock to protect the access to this table
282 *
283 * Return: None
284 */
285static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
286 struct dp_ast_entry *ase)
287{
288 unsigned index;
289 struct dp_ast_entry *tmpase;
290 int found = 0;
291
292 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
293 /* Check if tail is not empty before delete*/
294 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
295
296 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
297 if (tmpase == ase) {
298 found = 1;
299 break;
300 }
301 }
302
303 QDF_ASSERT(found);
304 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
305}
306
307/*
308 * dp_peer_ast_hash_find() - Find AST entry by MAC address
309 * @soc: SoC handle
310 *
311 * It assumes caller has taken the ast lock to protect the access to
312 * AST hash table
313 *
314 * Return: AST entry
315 */
316struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530317 uint8_t *ast_mac_addr)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530318{
319 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
320 unsigned index;
321 struct dp_ast_entry *ase;
322
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530323 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530324 ast_mac_addr, DP_MAC_ADDR_LEN);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530325 mac_addr = &local_mac_addr_aligned;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530326
327 index = dp_peer_ast_hash_index(soc, mac_addr);
328 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
329 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
330 return ase;
331 }
332 }
333
334 return NULL;
335}
336
337/*
338 * dp_peer_map_ast() - Map the ast entry with HW AST Index
339 * @soc: SoC handle
340 * @peer: peer to which ast node belongs
341 * @mac_addr: MAC address of ast node
342 * @hw_peer_id: HW AST Index returned by target in peer map event
343 * @vdev_id: vdev id for VAP to which the peer belongs to
344 *
345 * Return: None
346 */
347static inline void dp_peer_map_ast(struct dp_soc *soc,
348 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
349 uint8_t vdev_id)
350{
351 struct dp_ast_entry *ast_entry;
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530352 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
353 bool ast_entry_found = FALSE;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530354
355 if (!peer) {
356 return;
357 }
358
359 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -0700360 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530361 __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
362 mac_addr[1], mac_addr[2], mac_addr[3],
363 mac_addr[4], mac_addr[5]);
364
365 qdf_spin_lock_bh(&soc->ast_lock);
366 TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
367 if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw,
368 DP_MAC_ADDR_LEN))) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530369 ast_entry->ast_idx = hw_peer_id;
370 soc->ast_table[hw_peer_id] = ast_entry;
371 ast_entry->is_active = TRUE;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530372 peer_type = ast_entry->type;
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530373 ast_entry_found = TRUE;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530374 }
375 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530376
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530377 if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) {
378 if (soc->cdp_soc.ol_ops->peer_map_event) {
379 soc->cdp_soc.ol_ops->peer_map_event(
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +0530380 soc->ctrl_psoc, peer->peer_ids[0],
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530381 hw_peer_id, vdev_id,
382 mac_addr, peer_type);
383 }
384 } else {
385 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530386 "AST entry not found\n");
Chandru Neginahal2a4e5d22017-11-08 12:20:49 +0530387 }
388
389 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530390 return;
391}
392
393/*
394 * dp_peer_add_ast() - Allocate and add AST entry into peer list
395 * @soc: SoC handle
396 * @peer: peer to which ast node belongs
397 * @mac_addr: MAC address of ast node
398 * @is_self: Is this base AST entry with peer mac address
399 *
400 * This API is used by WDS source port learning funtion to
401 * add a new AST entry into peer AST list
402 *
403 * Return: 0 if new entry is allocated,
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530404 * -1 if entry add failed
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530405 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530406int dp_peer_add_ast(struct dp_soc *soc,
407 struct dp_peer *peer,
408 uint8_t *mac_addr,
409 enum cdp_txrx_ast_entry_type type,
410 uint32_t flags)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530411{
412 struct dp_ast_entry *ast_entry;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530413 struct dp_vdev *vdev = peer->vdev;
414 uint8_t next_node_mac[6];
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530415 int ret = -1;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530416
417 if (!vdev) {
418 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
419 FL("Peers vdev is NULL"));
420 QDF_ASSERT(0);
421 return ret;
422 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530423
424 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -0700425 "%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x\n",
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530426 __func__, peer, mac_addr[0], mac_addr[1], mac_addr[2],
427 mac_addr[3], mac_addr[4], mac_addr[5]);
428
429 qdf_spin_lock_bh(&soc->ast_lock);
430
431 /* If AST entry already exists , just return from here */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530432 ast_entry = dp_peer_ast_hash_find(soc, mac_addr);
433
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +0530434 if (ast_entry) {
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530435 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +0530436 ast_entry->is_active = TRUE;
437
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530438 qdf_spin_unlock_bh(&soc->ast_lock);
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530439 return 0;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530440 }
441
442 ast_entry = (struct dp_ast_entry *)
443 qdf_mem_malloc(sizeof(struct dp_ast_entry));
444
445 if (!ast_entry) {
446 qdf_spin_unlock_bh(&soc->ast_lock);
447 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
448 FL("fail to allocate ast_entry"));
449 QDF_ASSERT(0);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530450 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530451 }
452
453 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
454 ast_entry->peer = peer;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530455 ast_entry->pdev_id = vdev->pdev->pdev_id;
456 ast_entry->vdev_id = vdev->vdev_id;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530457
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530458 switch (type) {
459 case CDP_TXRX_AST_TYPE_STATIC:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530460 peer->self_ast_entry = ast_entry;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530461 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530462 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530463 case CDP_TXRX_AST_TYPE_WDS:
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530464 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530465 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530466 break;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530467 case CDP_TXRX_AST_TYPE_WDS_HM:
468 ast_entry->next_hop = 1;
469 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
470 break;
471 case CDP_TXRX_AST_TYPE_MEC:
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530472 ast_entry->next_hop = 1;
Radha krishna Simha Jiguruf70f9912017-08-02 18:32:22 +0530473 ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530474 break;
475 default:
476 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
477 FL("Incorrect AST entry type"));
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530478 }
479
480 ast_entry->is_active = TRUE;
481 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530482 DP_STATS_INC(soc, ast.added, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530483 dp_peer_ast_hash_add(soc, ast_entry);
484 qdf_spin_unlock_bh(&soc->ast_lock);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530485
Ruchi, Agrawald536f882018-03-02 15:51:23 +0530486 if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530487 qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
Ruchi, Agrawald536f882018-03-02 15:51:23 +0530488 else
489 qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530490
491 if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) {
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530492 if (QDF_STATUS_SUCCESS ==
493 soc->cdp_soc.ol_ops->peer_add_wds_entry(
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530494 peer->vdev->osif_vdev,
495 mac_addr,
496 next_node_mac,
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530497 flags))
498 return 0;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530499 }
500
501 return ret;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530502}
503
504/*
505 * dp_peer_del_ast() - Delete and free AST entry
506 * @soc: SoC handle
507 * @ast_entry: AST entry of the node
508 *
509 * This function removes the AST entry from peer and soc tables
510 * It assumes caller has taken the ast lock to protect the access to these
511 * tables
512 *
513 * Return: None
514 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530515void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530516{
517 struct dp_peer *peer = ast_entry->peer;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530518
519 if (ast_entry->next_hop)
520 soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
521 ast_entry->mac_addr.raw);
522
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530523 soc->ast_table[ast_entry->ast_idx] = NULL;
524 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530525 DP_STATS_INC(soc, ast.deleted, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530526 dp_peer_ast_hash_remove(soc, ast_entry);
527 qdf_mem_free(ast_entry);
528}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530529
530/*
531 * dp_peer_update_ast() - Delete and free AST entry
532 * @soc: SoC handle
533 * @peer: peer to which ast node belongs
534 * @ast_entry: AST entry of the node
535 * @flags: wds or hmwds
536 *
537 * This function update the AST entry to the roamed peer and soc tables
538 * It assumes caller has taken the ast lock to protect the access to these
539 * tables
540 *
541 * Return: 0 if ast entry is updated successfully
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530542 * -1 failure
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530543 */
544int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
545 struct dp_ast_entry *ast_entry, uint32_t flags)
546{
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530547 int ret = -1;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530548 struct dp_peer *old_peer;
549
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530550 qdf_spin_lock_bh(&soc->ast_lock);
551
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530552 old_peer = ast_entry->peer;
553 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530554
555 ast_entry->peer = peer;
Tallapragada Kalyan7a47aac2018-02-28 22:01:59 +0530556 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
557 ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
558 ast_entry->vdev_id = peer->vdev->vdev_id;
559 ast_entry->is_active = TRUE;
560 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
561
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530562 if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) {
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530563 if (QDF_STATUS_SUCCESS ==
564 soc->cdp_soc.ol_ops->peer_update_wds_entry(
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530565 peer->vdev->osif_vdev,
566 ast_entry->mac_addr.raw,
567 peer->mac_addr.raw,
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530568 flags)) {
569 qdf_spin_unlock_bh(&soc->ast_lock);
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530570 return 0;
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530571 }
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530572 }
Ruchi, Agrawald6ba7ae2018-02-23 16:54:58 +0530573
Chaithanya Garrepalli4c7099f2018-03-23 12:20:18 +0530574 qdf_spin_unlock_bh(&soc->ast_lock);
575
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530576 return ret;
577}
578
579/*
580 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
581 * @soc: SoC handle
582 * @ast_entry: AST entry of the node
583 *
584 * This function gets the pdev_id from the ast entry.
585 *
586 * Return: (uint8_t) pdev_id
587 */
588uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
589 struct dp_ast_entry *ast_entry)
590{
591 return ast_entry->pdev_id;
592}
593
594/*
595 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
596 * @soc: SoC handle
597 * @ast_entry: AST entry of the node
598 *
599 * This function gets the next hop from the ast entry.
600 *
601 * Return: (uint8_t) next_hop
602 */
603uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
604 struct dp_ast_entry *ast_entry)
605{
606 return ast_entry->next_hop;
607}
608
609/*
610 * dp_peer_ast_set_type() - set type from the ast entry
611 * @soc: SoC handle
612 * @ast_entry: AST entry of the node
613 *
614 * This function sets the type in the ast entry.
615 *
616 * Return:
617 */
618void dp_peer_ast_set_type(struct dp_soc *soc,
619 struct dp_ast_entry *ast_entry,
620 enum cdp_txrx_ast_entry_type type)
621{
622 ast_entry->type = type;
623}
624
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800625#else
626int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530627 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
628 uint32_t flags)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800629{
630 return 1;
631}
632
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530633void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800634{
635}
636
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530637int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
638 struct dp_ast_entry *ast_entry, uint32_t flags)
639{
640 return 1;
641}
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800642
643struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530644 uint8_t *ast_mac_addr)
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800645{
646 return NULL;
647}
648
649static int dp_peer_ast_hash_attach(struct dp_soc *soc)
650{
651 return 0;
652}
653
654static inline void dp_peer_map_ast(struct dp_soc *soc,
655 struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
656 uint8_t vdev_id)
657{
658 return;
659}
660
661static void dp_peer_ast_hash_detach(struct dp_soc *soc)
662{
663}
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530664
665void dp_peer_ast_set_type(struct dp_soc *soc,
666 struct dp_ast_entry *ast_entry,
667 enum cdp_txrx_ast_entry_type type)
668{
669}
670
671uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
672 struct dp_ast_entry *ast_entry)
673{
674 return 0xff;
675}
676
677
678uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
679 struct dp_ast_entry *ast_entry)
680{
681 return 0xff;
682}
Manjunathappa Prakashc850ec62017-11-13 16:55:50 -0800683#endif
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530684
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +0530685struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700686 uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700687{
688 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
689 unsigned index;
690 struct dp_peer *peer;
691
692 if (mac_addr_is_aligned) {
693 mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
694 } else {
695 qdf_mem_copy(
696 &local_mac_addr_aligned.raw[0],
697 peer_mac_addr, DP_MAC_ADDR_LEN);
698 mac_addr = &local_mac_addr_aligned;
699 }
700 index = dp_peer_find_hash_index(soc, mac_addr);
701 qdf_spin_lock_bh(&soc->peer_ref_mutex);
702 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
703#if ATH_SUPPORT_WRAP
704 /* ProxySTA may have multiple BSS peer with same MAC address,
705 * modified find will take care of finding the correct BSS peer.
706 */
707 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
708 (peer->vdev->vdev_id == vdev_id)) {
709#else
710 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
711#endif
712 /* found it - increment the ref count before releasing
713 * the lock
714 */
715 qdf_atomic_inc(&peer->ref_cnt);
716 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
717 return peer;
718 }
719 }
720 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
721 return NULL; /* failure */
722}
723
724void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
725{
726 unsigned index;
727 struct dp_peer *tmppeer = NULL;
728 int found = 0;
729
730 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
731 /* Check if tail is not empty before delete*/
732 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
733 /*
734 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
735 * by the caller.
736 * The caller needs to hold the lock from the time the peer object's
737 * reference count is decremented and tested up through the time the
738 * reference to the peer object is removed from the hash table, by
739 * this function.
740 * Holding the lock only while removing the peer object reference
741 * from the hash table keeps the hash table consistent, but does not
742 * protect against a new HL tx context starting to use the peer object
743 * if it looks up the peer object from its MAC address just after the
744 * peer ref count is decremented to zero, but just before the peer
745 * object reference is removed from the hash table.
746 */
747 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
748 if (tmppeer == peer) {
749 found = 1;
750 break;
751 }
752 }
753 QDF_ASSERT(found);
754 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
755}
756
757void dp_peer_find_hash_erase(struct dp_soc *soc)
758{
759 int i;
760
761 /*
762 * Not really necessary to take peer_ref_mutex lock - by this point,
763 * it's known that the soc is no longer in use.
764 */
765 for (i = 0; i <= soc->peer_hash.mask; i++) {
766 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
767 struct dp_peer *peer, *peer_next;
768
769 /*
770 * TAILQ_FOREACH_SAFE must be used here to avoid any
771 * memory access violation after peer is freed
772 */
773 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
774 hash_list_elem, peer_next) {
775 /*
776 * Don't remove the peer from the hash table -
777 * that would modify the list we are currently
778 * traversing, and it's not necessary anyway.
779 */
780 /*
781 * Artificially adjust the peer's ref count to
782 * 1, so it will get deleted by
783 * dp_peer_unref_delete.
784 */
785 /* set to zero */
786 qdf_atomic_init(&peer->ref_cnt);
787 /* incr to one */
788 qdf_atomic_inc(&peer->ref_cnt);
789 dp_peer_unref_delete(peer);
790 }
791 }
792 }
793}
794
795static void dp_peer_find_map_detach(struct dp_soc *soc)
796{
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700797 qdf_mem_free(soc->peer_id_to_obj_map);
798}
799
800int dp_peer_find_attach(struct dp_soc *soc)
801{
802 if (dp_peer_find_map_attach(soc))
803 return 1;
804
805 if (dp_peer_find_hash_attach(soc)) {
806 dp_peer_find_map_detach(soc);
807 return 1;
808 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530809
810 if (dp_peer_ast_hash_attach(soc)) {
811 dp_peer_find_hash_detach(soc);
812 dp_peer_find_map_detach(soc);
813 return 1;
814 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700815 return 0; /* success */
816}
817
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +0530818void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
Karunakar Dasineni93f633c2017-06-02 19:04:46 -0700819 union hal_reo_status *reo_status)
820{
821 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
822 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
823
824 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
825 DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
826 queue_status->header.status, rx_tid->tid);
827 return;
828 }
829
830 DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
831 "ssn: %d\n"
832 "curr_idx : %d\n"
833 "pn_31_0 : %08x\n"
834 "pn_63_32 : %08x\n"
835 "pn_95_64 : %08x\n"
836 "pn_127_96 : %08x\n"
837 "last_rx_enq_tstamp : %08x\n"
838 "last_rx_deq_tstamp : %08x\n"
839 "rx_bitmap_31_0 : %08x\n"
840 "rx_bitmap_63_32 : %08x\n"
841 "rx_bitmap_95_64 : %08x\n"
842 "rx_bitmap_127_96 : %08x\n"
843 "rx_bitmap_159_128 : %08x\n"
844 "rx_bitmap_191_160 : %08x\n"
845 "rx_bitmap_223_192 : %08x\n"
Karunakar Dasineni3da08112017-06-15 14:42:39 -0700846 "rx_bitmap_255_224 : %08x\n",
847 rx_tid->tid,
848 queue_status->ssn, queue_status->curr_idx,
849 queue_status->pn_31_0, queue_status->pn_63_32,
850 queue_status->pn_95_64, queue_status->pn_127_96,
851 queue_status->last_rx_enq_tstamp,
852 queue_status->last_rx_deq_tstamp,
853 queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
854 queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
855 queue_status->rx_bitmap_159_128,
856 queue_status->rx_bitmap_191_160,
857 queue_status->rx_bitmap_223_192,
858 queue_status->rx_bitmap_255_224);
859
860 DP_TRACE_STATS(FATAL,
Karunakar Dasineni93f633c2017-06-02 19:04:46 -0700861 "curr_mpdu_cnt : %d\n"
862 "curr_msdu_cnt : %d\n"
863 "fwd_timeout_cnt : %d\n"
864 "fwd_bar_cnt : %d\n"
865 "dup_cnt : %d\n"
866 "frms_in_order_cnt : %d\n"
867 "bar_rcvd_cnt : %d\n"
868 "mpdu_frms_cnt : %d\n"
869 "msdu_frms_cnt : %d\n"
870 "total_byte_cnt : %d\n"
871 "late_recv_mpdu_cnt : %d\n"
872 "win_jump_2k : %d\n"
873 "hole_cnt : %d\n",
Karunakar Dasineni93f633c2017-06-02 19:04:46 -0700874 queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
875 queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
876 queue_status->dup_cnt, queue_status->frms_in_order_cnt,
877 queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
878 queue_status->msdu_frms_cnt, queue_status->total_cnt,
879 queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
880 queue_status->hole_cnt);
sumedh baikadye3947bd2017-11-29 19:19:25 -0800881
882 DP_PRINT_STATS("Num of Addba Req = %d\n", rx_tid->num_of_addba_req);
883 DP_PRINT_STATS("Num of Addba Resp = %d\n", rx_tid->num_of_addba_resp);
884 DP_PRINT_STATS("Num of Delba Req = %d\n", rx_tid->num_of_delba_req);
885 DP_PRINT_STATS("BA window size = %d\n", rx_tid->ba_win_size);
886 DP_PRINT_STATS("Pn size = %d\n", rx_tid->pn_size);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -0700887}
888
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530889static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530890 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
891 uint8_t vdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700892{
893 struct dp_peer *peer;
894
895 QDF_ASSERT(peer_id <= wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1);
896 /* check if there's already a peer object with this MAC address */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700897 peer = dp_peer_find_hash_find(soc, peer_mac_addr,
898 0 /* is aligned */, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700899 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -0700900 "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700901 __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
902 peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
903 peer_mac_addr[4], peer_mac_addr[5]);
904
905 if (peer) {
906 /* peer's ref count was already incremented by
907 * peer_find_hash_find
908 */
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -0800909 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
910 "%s: ref_cnt: %d", __func__,
911 qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700912 soc->peer_id_to_obj_map[peer_id] = peer;
913
914 if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
915 /* TBDXXX: assert for now */
916 QDF_ASSERT(0);
917 }
918
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530919 return peer;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530920 }
921
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530922 return NULL;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530923}
924
925/**
926 * dp_rx_peer_map_handler() - handle peer map event from firmware
927 * @soc_handle - genereic soc handle
928 * @peeri_id - peer_id from firmware
929 * @hw_peer_id - ast index for this peer
930 * vdev_id - vdev ID
931 * peer_mac_addr - macc assress of the peer
932 *
933 * associate the peer_id that firmware provided with peer entry
934 * and update the ast table in the host with the hw_peer_id.
935 *
936 * Return: none
937 */
938
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700939void
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530940dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
941 uint8_t vdev_id, uint8_t *peer_mac_addr)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700942{
943 struct dp_soc *soc = (struct dp_soc *)soc_handle;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530944 struct dp_peer *peer = NULL;
945
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700946 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -0700947 "peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700948 "%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d\n", soc, peer_id,
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530949 hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
950 peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
951 peer_mac_addr[5], vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700952
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530953 peer = soc->peer_id_to_obj_map[peer_id];
954
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530955 if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530956 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
957 "invalid hw_peer_id: %d", hw_peer_id);
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +0530958 qdf_assert_always(0);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +0530959 }
960
961 /*
962 * check if peer already exists for this peer_id, if so
963 * this peer map event is in response for a wds peer add
964 * wmi command sent during wds source port learning.
965 * in this case just add the ast entry to the existing
966 * peer ast_list.
967 */
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530968 if (!peer)
969 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
970 hw_peer_id, vdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +0530971
sumedh baikady68450ab2018-03-23 18:36:29 -0700972 if (peer) {
973 qdf_assert_always(peer->vdev);
974 /*
975 * For every peer MAp message search and set if bss_peer
976 */
977 if (!(qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw,
978 DP_MAC_ADDR_LEN))) {
979 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Anish Nataraj0dae6762018-03-02 22:31:45 +0530980 "vdev bss_peer!!!!");
sumedh baikady68450ab2018-03-23 18:36:29 -0700981 peer->bss_peer = 1;
982 peer->vdev->vap_bss_peer = peer;
983 }
Anish Nataraj0dae6762018-03-02 22:31:45 +0530984 }
985
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +0530986 dp_peer_map_ast(soc, peer, peer_mac_addr,
987 hw_peer_id, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700988}
989
990void
991dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
992{
993 struct dp_peer *peer;
994 struct dp_soc *soc = (struct dp_soc *)soc_handle;
995 uint8_t i;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +0530996
997 peer = __dp_peer_find_by_id(soc, peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700998
999 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001000 "peer_unmap_event (soc:%pK) peer_id %d peer %pK\n",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001001 soc, peer_id, peer);
1002
1003 /*
1004 * Currently peer IDs are assigned for vdevs as well as peers.
1005 * If the peer ID is for a vdev, then the peer pointer stored
1006 * in peer_id_to_obj_map will be NULL.
1007 */
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301008 if (!peer) {
1009 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1010 "%s: Received unmap event for invalid peer_id"
1011 " %u\n", __func__, peer_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001012 return;
Chaithanya Garrepalli974da262018-02-22 20:32:19 +05301013 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001014
1015 soc->peer_id_to_obj_map[peer_id] = NULL;
1016 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
1017 if (peer->peer_ids[i] == peer_id) {
1018 peer->peer_ids[i] = HTT_INVALID_PEER;
1019 break;
1020 }
1021 }
1022
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301023 if (soc->cdp_soc.ol_ops->peer_unmap_event) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05301024 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301025 peer_id);
1026 }
1027
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001028 /*
1029 * Remove a reference to the peer.
1030 * If there are no more references, delete the peer object.
1031 */
1032 dp_peer_unref_delete(peer);
1033}
1034
1035void
1036dp_peer_find_detach(struct dp_soc *soc)
1037{
1038 dp_peer_find_map_detach(soc);
1039 dp_peer_find_hash_detach(soc);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301040 dp_peer_ast_hash_detach(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001041}
1042
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001043static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
1044 union hal_reo_status *reo_status)
1045{
1046 struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001047
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001048 if ((reo_status->rx_queue_status.header.status !=
1049 HAL_REO_CMD_SUCCESS) &&
1050 (reo_status->rx_queue_status.header.status !=
1051 HAL_REO_CMD_DRAIN)) {
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001052 /* Should not happen normally. Just print error for now */
1053 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1054 "%s: Rx tid HW desc update failed(%d): tid %d\n",
1055 __func__,
1056 reo_status->rx_queue_status.header.status,
1057 rx_tid->tid);
1058 }
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001059}
1060
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001061/*
Leo Chang5ea93a42016-11-03 12:39:49 -07001062 * dp_find_peer_by_addr - find peer instance by mac address
1063 * @dev: physical device instance
1064 * @peer_mac_addr: peer mac address
1065 * @local_id: local id for the peer
1066 *
1067 * Return: peer instance pointer
1068 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001069void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
Leo Chang5ea93a42016-11-03 12:39:49 -07001070 uint8_t *local_id)
1071{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001072 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07001073 struct dp_peer *peer;
1074
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05301075 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, 0);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05301076
Leo Chang5ea93a42016-11-03 12:39:49 -07001077 if (!peer)
1078 return NULL;
1079
1080 /* Multiple peer ids? How can know peer id? */
1081 *local_id = peer->local_id;
Jeff Johnson3f217e22017-09-18 10:13:35 -07001082 DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001083
1084 /* ref_cnt is incremented inside dp_peer_find_hash_find().
1085 * Decrement it here.
1086 */
1087 qdf_atomic_dec(&peer->ref_cnt);
1088
Leo Chang5ea93a42016-11-03 12:39:49 -07001089 return peer;
1090}
1091
1092/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001093 * dp_rx_tid_update_wifi3() – Update receive TID state
1094 * @peer: Datapath peer handle
1095 * @tid: TID
1096 * @ba_window_size: BlockAck window size
1097 * @start_seq: Starting sequence number
1098 *
1099 * Return: 0 on success, error code on failure
1100 */
Jeff Johnson416168b2017-01-06 09:42:43 -08001101static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
1102 ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001103{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001104 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1105 struct dp_soc *soc = peer->vdev->pdev->soc;
1106 struct hal_reo_cmd_params params;
1107
1108 qdf_mem_zero(&params, sizeof(params));
1109
1110 params.std.need_status = 1;
1111 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1112 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1113 params.u.upd_queue_params.update_ba_window_size = 1;
1114 params.u.upd_queue_params.ba_window_size = ba_window_size;
1115
1116 if (start_seq < IEEE80211_SEQ_MAX) {
1117 params.u.upd_queue_params.update_ssn = 1;
1118 params.u.upd_queue_params.ssn = start_seq;
1119 }
1120
Pramod Simhab17d0672017-03-06 17:20:13 -08001121 dp_set_ssn_valid_flag(&params, 0);
1122
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001123 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001124 return 0;
1125}
1126
1127/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001128 * dp_reo_desc_free() - Callback free reo descriptor memory after
1129 * HW cache flush
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001130 *
1131 * @soc: DP SOC handle
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001132 * @cb_ctxt: Callback context
1133 * @reo_status: REO command status
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001134 */
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001135static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
1136 union hal_reo_status *reo_status)
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001137{
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001138 struct reo_desc_list_node *freedesc =
1139 (struct reo_desc_list_node *)cb_ctxt;
1140 struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001141
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001142 if ((reo_status->fl_cache_status.header.status !=
1143 HAL_REO_CMD_SUCCESS) &&
1144 (reo_status->fl_cache_status.header.status !=
1145 HAL_REO_CMD_DRAIN)) {
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001146 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1147 "%s: Rx tid HW desc flush failed(%d): tid %d\n",
1148 __func__,
1149 reo_status->rx_queue_status.header.status,
1150 freedesc->rx_tid.tid);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001151 }
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001152 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1153 "%s: hw_qdesc_paddr: %pK, tid:%d\n", __func__,
1154 (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
1155 qdf_mem_unmap_nbytes_single(soc->osdev,
1156 rx_tid->hw_qdesc_paddr,
1157 QDF_DMA_BIDIRECTIONAL,
1158 rx_tid->hw_qdesc_alloc_size);
1159 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
1160 qdf_mem_free(freedesc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001161}
1162
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001163#if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1164/* Hawkeye emulation requires bus address to be >= 0x50000000 */
1165static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1166{
1167 if (dma_addr < 0x50000000)
1168 return QDF_STATUS_E_FAILURE;
1169 else
1170 return QDF_STATUS_SUCCESS;
1171}
1172#else
1173static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
1174{
1175 return QDF_STATUS_SUCCESS;
1176}
1177#endif
1178
1179
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001180/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001181 * dp_rx_tid_setup_wifi3() – Setup receive TID state
1182 * @peer: Datapath peer handle
1183 * @tid: TID
1184 * @ba_window_size: BlockAck window size
1185 * @start_seq: Starting sequence number
1186 *
1187 * Return: 0 on success, error code on failure
1188 */
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001189int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001190 uint32_t ba_window_size, uint32_t start_seq)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001191{
1192 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1193 struct dp_vdev *vdev = peer->vdev;
1194 struct dp_soc *soc = vdev->pdev->soc;
1195 uint32_t hw_qdesc_size;
1196 uint32_t hw_qdesc_align;
1197 int hal_pn_type;
1198 void *hw_qdesc_vaddr;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001199 uint32_t alloc_tries = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001200
Karunakar Dasineni372647d2018-01-15 22:27:39 -08001201 if (peer->delete_in_progress)
1202 return QDF_STATUS_E_FAILURE;
1203
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001204 rx_tid->ba_win_size = ba_window_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001205 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
1206 return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
1207 start_seq);
sumedh baikadye3947bd2017-11-29 19:19:25 -08001208 rx_tid->num_of_addba_req = 0;
1209 rx_tid->num_of_delba_req = 0;
1210 rx_tid->num_of_addba_resp = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001211#ifdef notyet
1212 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
1213#else
1214 /* TODO: Allocating HW queue descriptors based on max BA window size
1215 * for all QOS TIDs so that same descriptor can be used later when
1216 * ADDBA request is recevied. This should be changed to allocate HW
1217 * queue descriptors based on BA window size being negotiated (0 for
1218 * non BA cases), and reallocate when BA window size changes and also
1219 * send WMI message to FW to change the REO queue descriptor in Rx
1220 * peer entry as part of dp_rx_tid_update.
1221 */
1222 if (tid != DP_NON_QOS_TID)
1223 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1224 HAL_RX_MAX_BA_WINDOW);
1225 else
1226 hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1227 ba_window_size);
1228#endif
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001229
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001230 hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
1231 /* To avoid unnecessary extra allocation for alignment, try allocating
1232 * exact size and see if we already have aligned address.
1233 */
1234 rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001235
1236try_desc_alloc:
1237 rx_tid->hw_qdesc_vaddr_unaligned =
1238 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001239
1240 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1241 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1242 "%s: Rx tid HW desc alloc failed: tid %d\n",
1243 __func__, tid);
1244 return QDF_STATUS_E_NOMEM;
1245 }
1246
1247 if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
1248 hw_qdesc_align) {
1249 /* Address allocated above is not alinged. Allocate extra
1250 * memory for alignment
1251 */
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001252 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001253 rx_tid->hw_qdesc_vaddr_unaligned =
Pramod Simha6b23f752017-03-30 11:54:18 -07001254 qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
1255 hw_qdesc_align - 1);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001256
1257 if (!rx_tid->hw_qdesc_vaddr_unaligned) {
1258 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1259 "%s: Rx tid HW desc alloc failed: tid %d\n",
1260 __func__, tid);
1261 return QDF_STATUS_E_NOMEM;
1262 }
1263
Pramod Simha6b23f752017-03-30 11:54:18 -07001264 hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
1265 rx_tid->hw_qdesc_vaddr_unaligned,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001266 hw_qdesc_align);
Pramod Simha6b23f752017-03-30 11:54:18 -07001267
1268 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001269 "%s: Total Size %d Aligned Addr %pK\n",
Pramod Simha6b23f752017-03-30 11:54:18 -07001270 __func__, rx_tid->hw_qdesc_alloc_size,
1271 hw_qdesc_vaddr);
1272
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001273 } else {
1274 hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001275 }
1276
1277 /* TODO: Ensure that sec_type is set before ADDBA is received.
1278 * Currently this is set based on htt indication
1279 * HTT_T2H_MSG_TYPE_SEC_IND from target
1280 */
1281 switch (peer->security[dp_sec_ucast].sec_type) {
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301282 case cdp_sec_type_tkip_nomic:
1283 case cdp_sec_type_aes_ccmp:
1284 case cdp_sec_type_aes_ccmp_256:
1285 case cdp_sec_type_aes_gcmp:
1286 case cdp_sec_type_aes_gcmp_256:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001287 hal_pn_type = HAL_PN_WPA;
1288 break;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301289 case cdp_sec_type_wapi:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001290 if (vdev->opmode == wlan_op_mode_ap)
1291 hal_pn_type = HAL_PN_WAPI_EVEN;
1292 else
1293 hal_pn_type = HAL_PN_WAPI_UNEVEN;
1294 break;
1295 default:
1296 hal_pn_type = HAL_PN_NONE;
1297 break;
1298 }
1299
1300 hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
1301 hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
1302
Pramod Simha6b23f752017-03-30 11:54:18 -07001303 qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001304 QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
Pramod Simha6b23f752017-03-30 11:54:18 -07001305 &(rx_tid->hw_qdesc_paddr));
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001306
Pramod Simha6b23f752017-03-30 11:54:18 -07001307 if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001308 QDF_STATUS_SUCCESS) {
1309 if (alloc_tries++ < 10)
1310 goto try_desc_alloc;
1311 else {
1312 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1313 "%s: Rx tid HW desc alloc failed (lowmem): tid %d\n",
1314 __func__, tid);
1315 return QDF_STATUS_E_NOMEM;
1316 }
1317 }
1318
Leo Chang5ea93a42016-11-03 12:39:49 -07001319 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
Manoj Ekbote1f2c0b52017-02-11 23:24:43 -08001320 soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
1321 vdev->pdev->osif_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001322 peer->vdev->vdev_id, peer->mac_addr.raw,
1323 rx_tid->hw_qdesc_paddr, tid, tid);
Dhanashri Atre8abb9ee2016-11-22 17:41:02 -08001324
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001325 }
1326 return 0;
1327}
1328
1329/*
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001330 * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
1331 * after deleting the entries (ie., setting valid=0)
1332 *
1333 * @soc: DP SOC handle
1334 * @cb_ctxt: Callback context
1335 * @reo_status: REO command status
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001336 */
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001337static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
1338 union hal_reo_status *reo_status)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001339{
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001340 struct reo_desc_list_node *freedesc =
1341 (struct reo_desc_list_node *)cb_ctxt;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001342 uint32_t list_size;
1343 struct reo_desc_list_node *desc;
1344 unsigned long curr_ts = qdf_get_system_timestamp();
1345 uint32_t desc_size, tot_desc_size;
1346 struct hal_reo_cmd_params params;
1347
Karunakar Dasineni31b98d42018-02-27 23:05:08 -08001348 if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
1349 qdf_mem_zero(reo_status, sizeof(*reo_status));
1350 reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
1351 dp_reo_desc_free(soc, (void *)freedesc, reo_status);
1352 return;
1353 } else if (reo_status->rx_queue_status.header.status !=
1354 HAL_REO_CMD_SUCCESS) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001355 /* Should not happen normally. Just print error for now */
1356 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001357 "%s: Rx tid HW desc deletion failed(%d): tid %d\n",
1358 __func__,
1359 reo_status->rx_queue_status.header.status,
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001360 freedesc->rx_tid.tid);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001361 }
1362
Houston Hoffman41b912c2017-08-30 14:27:51 -07001363 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001364 "%s: rx_tid: %d status: %d\n", __func__,
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001365 freedesc->rx_tid.tid,
1366 reo_status->rx_queue_status.header.status);
Krishna Kumaar Natarajan1741dc42017-01-26 19:24:48 -08001367
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001368 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1369 freedesc->free_ts = curr_ts;
1370 qdf_list_insert_back_size(&soc->reo_desc_freelist,
1371 (qdf_list_node_t *)freedesc, &list_size);
1372
1373 while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1374 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1375 ((list_size >= REO_DESC_FREELIST_SIZE) ||
1376 ((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
1377 struct dp_rx_tid *rx_tid;
1378
1379 qdf_list_remove_front(&soc->reo_desc_freelist,
1380 (qdf_list_node_t **)&desc);
1381 list_size--;
1382 rx_tid = &desc->rx_tid;
1383
1384 /* Flush and invalidate REO descriptor from HW cache: Base and
1385 * extension descriptors should be flushed separately */
1386 tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc,
1387 rx_tid->ba_win_size);
1388 desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0);
1389
1390 /* Flush reo extension descriptors */
1391 while ((tot_desc_size -= desc_size) > 0) {
1392 qdf_mem_zero(&params, sizeof(params));
1393 params.std.addr_lo =
1394 ((uint64_t)(rx_tid->hw_qdesc_paddr) +
1395 tot_desc_size) & 0xffffffff;
1396 params.std.addr_hi =
1397 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1398
1399 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1400 CMD_FLUSH_CACHE,
1401 &params,
1402 NULL,
1403 NULL)) {
1404 QDF_TRACE(QDF_MODULE_ID_DP,
1405 QDF_TRACE_LEVEL_ERROR,
1406 "%s: fail to send CMD_CACHE_FLUSH:"
1407 "tid %d desc %pK\n", __func__,
1408 rx_tid->tid,
1409 (void *)(rx_tid->hw_qdesc_paddr));
1410 }
1411 }
1412
1413 /* Flush base descriptor */
1414 qdf_mem_zero(&params, sizeof(params));
1415 params.std.need_status = 1;
1416 params.std.addr_lo =
1417 (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1418 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1419
1420 if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1421 CMD_FLUSH_CACHE,
1422 &params,
1423 dp_reo_desc_free,
1424 (void *)desc)) {
1425 union hal_reo_status reo_status;
1426 /*
1427 * If dp_reo_send_cmd return failure, related TID queue desc
1428 * should be unmapped. Also locally reo_desc, together with
1429 * TID queue desc also need to be freed accordingly.
1430 *
1431 * Here invoke desc_free function directly to do clean up.
1432 */
1433 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1434 "%s: fail to send REO cmd to flush cache: tid %d\n",
1435 __func__, rx_tid->tid);
1436 qdf_mem_zero(&reo_status, sizeof(reo_status));
1437 reo_status.fl_cache_status.header.status = 0;
1438 dp_reo_desc_free(soc, (void *)desc, &reo_status);
1439 }
1440 }
1441 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001442}
1443
1444/*
1445 * dp_rx_tid_delete_wifi3() – Delete receive TID queue
1446 * @peer: Datapath peer handle
1447 * @tid: TID
1448 *
1449 * Return: 0 on success, error code on failure
1450 */
Jeff Johnson416168b2017-01-06 09:42:43 -08001451static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001452{
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001453 struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
1454 struct dp_soc *soc = peer->vdev->pdev->soc;
1455 struct hal_reo_cmd_params params;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001456 struct reo_desc_list_node *freedesc =
1457 qdf_mem_malloc(sizeof(*freedesc));
Lin Baifca76402017-12-11 15:03:49 +08001458
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001459 if (!freedesc) {
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08001460 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001461 "%s: malloc failed for freedesc: tid %d\n",
1462 __func__, tid);
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08001463 return -ENOMEM;
1464 }
1465
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001466 freedesc->rx_tid = *rx_tid;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001467
1468 qdf_mem_zero(&params, sizeof(params));
1469
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001470 params.std.need_status = 0;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001471 params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
1472 params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1473 params.u.upd_queue_params.update_vld = 1;
1474 params.u.upd_queue_params.vld = 0;
1475
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001476 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1477 dp_rx_tid_delete_cb, (void *)freedesc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001478
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08001479 rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1480 rx_tid->hw_qdesc_alloc_size = 0;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08001481 rx_tid->hw_qdesc_paddr = 0;
Karunakar Dasinenia8c779b2017-01-11 13:57:55 -08001482
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001483 return 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001484}
1485
Pramod Simhab17d0672017-03-06 17:20:13 -08001486#ifdef DP_LFR
1487static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1488{
1489 int tid;
1490
1491 for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
1492 dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
1493 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001494 "Setting up TID %d for peer %pK peer->local_id %d\n",
Pramod Simhab17d0672017-03-06 17:20:13 -08001495 tid, peer, peer->local_id);
1496 }
1497}
1498#else
1499static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1500#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001501/*
1502 * dp_peer_rx_init() – Initialize receive TID state
1503 * @pdev: Datapath pdev
1504 * @peer: Datapath peer
1505 *
1506 */
1507void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
1508{
1509 int tid;
1510 struct dp_rx_tid *rx_tid;
1511 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1512 rx_tid = &peer->rx_tid[tid];
1513 rx_tid->array = &rx_tid->base;
1514 rx_tid->base.head = rx_tid->base.tail = NULL;
1515 rx_tid->tid = tid;
1516 rx_tid->defrag_timeout_ms = 0;
1517 rx_tid->ba_win_size = 0;
1518 rx_tid->ba_status = DP_RX_BA_INACTIVE;
1519
1520 rx_tid->defrag_waitlist_elem.tqe_next = NULL;
1521 rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
1522
1523#ifdef notyet /* TODO: See if this is required for exception handling */
1524 /* invalid sequence number */
1525 peer->tids_last_seq[tid] = 0xffff;
1526#endif
1527 }
1528
1529 /* Setup default (non-qos) rx tid queue */
1530 dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001531
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001532 /* Setup rx tid queue for TID 0.
1533 * Other queues will be setup on receiving first packet, which will cause
1534 * NULL REO queue error
1535 */
1536 dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
1537
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001538 /*
Pramod Simhab17d0672017-03-06 17:20:13 -08001539 * Setup the rest of TID's to handle LFR
1540 */
1541 dp_peer_setup_remaining_tids(peer);
1542
1543 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001544 * Set security defaults: no PN check, no security. The target may
1545 * send a HTT SEC_IND message to overwrite these defaults.
1546 */
1547 peer->security[dp_sec_ucast].sec_type =
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301548 peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001549}
1550
1551/*
1552 * dp_peer_rx_cleanup() – Cleanup receive TID state
1553 * @vdev: Datapath vdev
1554 * @peer: Datapath peer
1555 *
1556 */
1557void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1558{
1559 int tid;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001560 uint32_t tid_delete_mask = 0;
1561 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001562 if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001563 dp_rx_tid_delete_wifi3(peer, tid);
1564 tid_delete_mask |= (1 << tid);
1565 }
1566 }
1567#ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1568 if (soc->ol_ops->peer_rx_reorder_queue_remove) {
Manoj Ekbote1f2c0b52017-02-11 23:24:43 -08001569 soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->osif_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001570 peer->vdev->vdev_id, peer->mac_addr.raw,
1571 tid_delete_mask);
1572 }
1573#endif
1574}
1575
1576/*
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08001577 * dp_peer_cleanup() – Cleanup peer information
1578 * @vdev: Datapath vdev
1579 * @peer: Datapath peer
1580 *
1581 */
1582void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1583{
1584 peer->last_assoc_rcvd = 0;
1585 peer->last_disassoc_rcvd = 0;
1586 peer->last_deauth_rcvd = 0;
1587
1588 /* cleanup the Rx reorder queues for this peer */
1589 dp_peer_rx_cleanup(vdev, peer);
1590}
1591
1592/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001593* dp_rx_addba_requestprocess_wifi3() – Process ADDBA request from peer
1594*
1595* @peer: Datapath peer handle
1596* @dialogtoken: dialogtoken from ADDBA frame
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001597* @tid: TID number
1598* @startseqnum: Start seq. number received in BA sequence control
1599* in ADDBA frame
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001600*
1601* Return: 0 on success, error code on failure
1602*/
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001603int dp_addba_requestprocess_wifi3(void *peer_handle,
1604 uint8_t dialogtoken, uint16_t tid, uint16_t batimeout,
1605 uint16_t buffersize, uint16_t startseqnum)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001606{
1607 struct dp_peer *peer = (struct dp_peer *)peer_handle;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001608 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1609
1610 if ((rx_tid->ba_status == DP_RX_BA_ACTIVE) &&
1611 (rx_tid->hw_qdesc_vaddr_unaligned != NULL))
1612 rx_tid->ba_status = DP_RX_BA_INACTIVE;
1613
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001614 if (dp_rx_tid_setup_wifi3(peer, tid, buffersize,
1615 startseqnum)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001616 /* TODO: Should we send addba reject in this case */
1617 return QDF_STATUS_E_FAILURE;
1618 }
1619
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08001620 if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1621 rx_tid->statuscode = rx_tid->userstatuscode;
1622 else
1623 rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1624
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001625 rx_tid->dialogtoken = dialogtoken;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001626 rx_tid->ba_status = DP_RX_BA_ACTIVE;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001627 rx_tid->num_of_addba_req++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001628
1629 return 0;
1630}
1631
1632/*
1633* dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
1634*
1635* @peer: Datapath peer handle
1636* @tid: TID number
1637* @dialogtoken: output dialogtoken
1638* @statuscode: output dialogtoken
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001639* @buffersize: Ouput BA window sizze
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001640* @batimeout: Ouput BA timeout
1641*/
1642void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
1643 uint8_t *dialogtoken, uint16_t *statuscode,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001644 uint16_t *buffersize, uint16_t *batimeout)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001645{
1646 struct dp_peer *peer = (struct dp_peer *)peer_handle;
1647 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1648
sumedh baikadye3947bd2017-11-29 19:19:25 -08001649 rx_tid->num_of_addba_resp++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001650 /* setup ADDBA response paramters */
1651 *dialogtoken = rx_tid->dialogtoken;
1652 *statuscode = rx_tid->statuscode;
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001653 *buffersize = rx_tid->ba_win_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001654 *batimeout = 0;
1655}
1656
1657/*
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08001658* dp_set_addba_response() – Set a user defined ADDBA response status code
1659*
1660* @peer: Datapath peer handle
1661* @tid: TID number
1662* @statuscode: response status code to be set
1663*/
1664void dp_set_addba_response(void *peer_handle, uint8_t tid,
1665 uint16_t statuscode)
1666{
1667 struct dp_peer *peer = (struct dp_peer *)peer_handle;
1668 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1669
1670 rx_tid->userstatuscode = statuscode;
1671}
1672
1673/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001674* dp_rx_delba_process_wifi3() – Process DELBA from peer
1675* @peer: Datapath peer handle
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001676* @tid: TID number
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001677* @reasoncode: Reason code received in DELBA frame
1678*
1679* Return: 0 on success, error code on failure
1680*/
1681int dp_delba_process_wifi3(void *peer_handle,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07001682 int tid, uint16_t reasoncode)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001683{
1684 struct dp_peer *peer = (struct dp_peer *)peer_handle;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001685 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1686
1687 if (rx_tid->ba_status != DP_RX_BA_ACTIVE)
1688 return QDF_STATUS_E_FAILURE;
1689
1690 /* TODO: See if we can delete the existing REO queue descriptor and
1691 * replace with a new one without queue extenstion descript to save
1692 * memory
1693 */
sumedh baikadye3947bd2017-11-29 19:19:25 -08001694 rx_tid->num_of_delba_req++;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08001695 dp_rx_tid_update_wifi3(peer, tid, 1, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001696
1697 rx_tid->ba_status = DP_RX_BA_INACTIVE;
1698
1699 return 0;
1700}
1701
1702void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
1703 qdf_nbuf_t msdu_list)
1704{
1705 while (msdu_list) {
1706 qdf_nbuf_t msdu = msdu_list;
1707
1708 msdu_list = qdf_nbuf_next(msdu_list);
1709 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001710 "discard rx %pK from partly-deleted peer %pK "
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001711 "(%02x:%02x:%02x:%02x:%02x:%02x)\n",
1712 msdu, peer,
1713 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1714 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1715 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
1716 qdf_nbuf_free(msdu);
1717 }
1718}
1719
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301720
1721/**
1722 * dp_set_pn_check_wifi3() - enable PN check in REO for security
1723 * @peer: Datapath peer handle
1724 * @vdev: Datapath vdev
1725 * @pdev - data path device instance
1726 * @sec_type - security type
1727 * @rx_pn - Receive pn starting number
1728 *
1729 */
1730
1731void
1732dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
1733{
1734 struct dp_peer *peer = (struct dp_peer *)peer_handle;
1735 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
1736 struct dp_pdev *pdev;
1737 struct dp_soc *soc;
1738 int i;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001739 uint8_t pn_size;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301740 struct hal_reo_cmd_params params;
1741
1742 /* preconditions */
1743 qdf_assert(vdev);
1744
1745 pdev = vdev->pdev;
1746 soc = pdev->soc;
1747
1748
1749 qdf_mem_zero(&params, sizeof(params));
1750
1751 params.std.need_status = 1;
1752 params.u.upd_queue_params.update_pn_valid = 1;
1753 params.u.upd_queue_params.update_pn_size = 1;
1754 params.u.upd_queue_params.update_pn = 1;
1755 params.u.upd_queue_params.update_pn_check_needed = 1;
1756
1757 peer->security[dp_sec_ucast].sec_type = sec_type;
1758
1759 switch (sec_type) {
1760 case cdp_sec_type_tkip_nomic:
1761 case cdp_sec_type_aes_ccmp:
1762 case cdp_sec_type_aes_ccmp_256:
1763 case cdp_sec_type_aes_gcmp:
1764 case cdp_sec_type_aes_gcmp_256:
1765 params.u.upd_queue_params.pn_check_needed = 1;
1766 params.u.upd_queue_params.pn_size = 48;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001767 pn_size = 48;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301768 break;
1769 case cdp_sec_type_wapi:
1770 params.u.upd_queue_params.pn_check_needed = 1;
1771 params.u.upd_queue_params.pn_size = 128;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001772 pn_size = 128;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301773 if (vdev->opmode == wlan_op_mode_ap) {
1774 params.u.upd_queue_params.pn_even = 1;
1775 params.u.upd_queue_params.update_pn_even = 1;
1776 } else {
1777 params.u.upd_queue_params.pn_uneven = 1;
1778 params.u.upd_queue_params.update_pn_uneven = 1;
1779 }
1780 break;
1781 default:
1782 params.u.upd_queue_params.pn_check_needed = 0;
sumedh baikadye3947bd2017-11-29 19:19:25 -08001783 pn_size = 0;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301784 break;
1785 }
1786
1787
1788 for (i = 0; i < DP_MAX_TIDS; i++) {
1789 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
1790 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
1791 params.std.addr_lo =
1792 rx_tid->hw_qdesc_paddr & 0xffffffff;
1793 params.std.addr_hi =
1794 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1795
1796 if (sec_type != cdp_sec_type_wapi) {
1797 params.u.upd_queue_params.update_pn_valid = 0;
1798 } else {
1799 /*
1800 * Setting PN valid bit for WAPI sec_type,
1801 * since WAPI PN has to be started with
1802 * predefined value
1803 */
1804 params.u.upd_queue_params.update_pn_valid = 1;
1805 params.u.upd_queue_params.pn_31_0 = rx_pn[0];
1806 params.u.upd_queue_params.pn_63_32 = rx_pn[1];
1807 params.u.upd_queue_params.pn_95_64 = rx_pn[2];
1808 params.u.upd_queue_params.pn_127_96 = rx_pn[3];
1809 }
sumedh baikadye3947bd2017-11-29 19:19:25 -08001810 rx_tid->pn_size = pn_size;
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05301811 dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
1812 dp_rx_tid_update_cb, rx_tid);
1813 } else {
1814 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1815 "PN Check not setup for TID :%d \n", i);
1816 }
1817 }
1818}
1819
1820
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001821void
1822dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
1823 enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
1824 u_int32_t *rx_pn)
1825{
1826 struct dp_soc *soc = (struct dp_soc *)soc_handle;
1827 struct dp_peer *peer;
1828 int sec_index;
1829
1830 peer = dp_peer_find_by_id(soc, peer_id);
1831 if (!peer) {
1832 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1833 "Couldn't find peer from ID %d - skipping security inits\n",
1834 peer_id);
1835 return;
1836 }
1837 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001838 "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001839 "%s key of type %d\n",
1840 peer,
1841 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
1842 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
1843 peer->mac_addr.raw[4], peer->mac_addr.raw[5],
1844 is_unicast ? "ucast" : "mcast",
1845 sec_type);
1846 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
1847 peer->security[sec_index].sec_type = sec_type;
Leo Chang5ea93a42016-11-03 12:39:49 -07001848#ifdef notyet /* TODO: See if this is required for defrag support */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001849 /* michael key only valid for TKIP, but for simplicity,
1850 * copy it anyway
1851 */
1852 qdf_mem_copy(
1853 &peer->security[sec_index].michael_key[0],
1854 michael_key,
1855 sizeof(peer->security[sec_index].michael_key));
1856#ifdef BIG_ENDIAN_HOST
1857 OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
1858 sizeof(peer->security[sec_index].michael_key));
1859#endif /* BIG_ENDIAN_HOST */
1860#endif
1861
1862#ifdef notyet /* TODO: Check if this is required for wifi3.0 */
1863 if (sec_type != htt_sec_type_wapi) {
1864 qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
1865 } else {
1866 for (i = 0; i < DP_MAX_TIDS; i++) {
1867 /*
1868 * Setting PN valid bit for WAPI sec_type,
1869 * since WAPI PN has to be started with predefined value
1870 */
1871 peer->tids_last_pn_valid[i] = 1;
1872 qdf_mem_copy(
1873 (u_int8_t *) &peer->tids_last_pn[i],
1874 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
1875 peer->tids_last_pn[i].pn128[1] =
1876 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
1877 peer->tids_last_pn[i].pn128[0] =
1878 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
1879 }
1880 }
1881#endif
1882 /* TODO: Update HW TID queue with PN check parameters (pn type for
1883 * all security types and last pn for WAPI) once REO command API
1884 * is available
1885 */
1886}
1887
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05301888#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07001889/**
1890 * dp_register_peer() - Register peer into physical device
1891 * @pdev - data path device instance
1892 * @sta_desc - peer description
1893 *
1894 * Register peer into physical device
1895 *
1896 * Return: QDF_STATUS_SUCCESS registration success
1897 * QDF_STATUS_E_FAULT peer not found
1898 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001899QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07001900 struct ol_txrx_desc_type *sta_desc)
1901{
1902 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001903 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07001904
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001905 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
1906 sta_desc->sta_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07001907 if (!peer)
1908 return QDF_STATUS_E_FAULT;
1909
1910 qdf_spin_lock_bh(&peer->peer_info_lock);
1911 peer->state = OL_TXRX_PEER_STATE_CONN;
1912 qdf_spin_unlock_bh(&peer->peer_info_lock);
1913
1914 return QDF_STATUS_SUCCESS;
1915}
1916
1917/**
1918 * dp_clear_peer() - remove peer from physical device
1919 * @pdev - data path device instance
1920 * @sta_id - local peer id
1921 *
1922 * remove peer from physical device
1923 *
1924 * Return: QDF_STATUS_SUCCESS registration success
1925 * QDF_STATUS_E_FAULT peer not found
1926 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001927QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07001928{
1929 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001930 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07001931
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001932 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07001933 if (!peer)
1934 return QDF_STATUS_E_FAULT;
1935
1936 qdf_spin_lock_bh(&peer->peer_info_lock);
1937 peer->state = OL_TXRX_PEER_STATE_DISC;
1938 qdf_spin_unlock_bh(&peer->peer_info_lock);
1939
1940 return QDF_STATUS_SUCCESS;
1941}
1942
1943/**
1944 * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
1945 * @pdev - data path device instance
1946 * @vdev - virtual interface instance
1947 * @peer_addr - peer mac address
1948 * @peer_id - local peer id with target mac address
1949 *
1950 * Find peer by peer mac address within vdev
1951 *
1952 * Return: peer instance void pointer
1953 * NULL cannot find target peer
1954 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001955void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
1956 struct cdp_vdev *vdev_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07001957 uint8_t *peer_addr, uint8_t *local_id)
1958{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001959 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
1960 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07001961 struct dp_peer *peer;
1962
Jeff Johnson3f217e22017-09-18 10:13:35 -07001963 DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05301964 peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
Jeff Johnson3f217e22017-09-18 10:13:35 -07001965 DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
Leo Chang5ea93a42016-11-03 12:39:49 -07001966
1967 if (!peer)
1968 return NULL;
1969
1970 if (peer->vdev != vdev)
1971 return NULL;
1972
1973 *local_id = peer->local_id;
Yun Park11d46e02017-11-27 10:51:53 -08001974 DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07001975
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08001976 /* ref_cnt is incremented inside dp_peer_find_hash_find().
1977 * Decrement it here.
1978 */
1979 qdf_atomic_dec(&peer->ref_cnt);
Leo Chang5ea93a42016-11-03 12:39:49 -07001980
1981 return peer;
1982}
1983
1984/**
1985 * dp_local_peer_id() - Find local peer id within peer instance
1986 * @peer - peer instance
1987 *
1988 * Find local peer id within peer instance
1989 *
1990 * Return: local peer id
1991 */
1992uint16_t dp_local_peer_id(void *peer)
1993{
1994 return ((struct dp_peer *)peer)->local_id;
1995}
1996
1997/**
1998 * dp_peer_find_by_local_id() - Find peer by local peer id
1999 * @pdev - data path device instance
2000 * @local_peer_id - local peer id want to find
2001 *
2002 * Find peer by local peer id within physical device
2003 *
2004 * Return: peer instance void pointer
2005 * NULL cannot find target peer
2006 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002007void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07002008{
2009 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002010 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002011
2012 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2013 peer = pdev->local_peer_ids.map[local_id];
2014 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Yun Park11d46e02017-11-27 10:51:53 -08002015 DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002016 return peer;
2017}
2018
2019/**
2020 * dp_peer_state_update() - update peer local state
2021 * @pdev - data path device instance
2022 * @peer_addr - peer mac address
2023 * @state - new peer local state
2024 *
2025 * update peer local state
2026 *
2027 * Return: QDF_STATUS_SUCCESS registration success
2028 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002029QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
Leo Chang5ea93a42016-11-03 12:39:49 -07002030 enum ol_txrx_peer_state state)
2031{
2032 struct dp_peer *peer;
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002033 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07002034
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05302035 peer = dp_peer_find_hash_find(pdev->soc, peer_mac, 0, 0);
Ankit Gupta6fb389b2017-01-03 12:23:45 -08002036 if (NULL == peer) {
2037 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2038 "Failed to find peer for: [%pM]", peer_mac);
2039 return QDF_STATUS_E_FAILURE;
2040 }
Leo Chang5ea93a42016-11-03 12:39:49 -07002041 peer->state = state;
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002042
Jeff Johnson3f217e22017-09-18 10:13:35 -07002043 DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08002044 /* ref_cnt is incremented inside dp_peer_find_hash_find().
2045 * Decrement it here.
2046 */
2047 qdf_atomic_dec(&peer->ref_cnt);
2048
Leo Chang5ea93a42016-11-03 12:39:49 -07002049 return QDF_STATUS_SUCCESS;
2050}
2051
2052/**
2053 * dp_get_vdevid() - Get virtaul interface id which peer registered
2054 * @peer - peer instance
2055 * @vdev_id - virtaul interface id which peer registered
2056 *
2057 * Get virtaul interface id which peer registered
2058 *
2059 * Return: QDF_STATUS_SUCCESS registration success
2060 */
2061QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
2062{
2063 struct dp_peer *peer = peer_handle;
2064
Jeff Johnson3f217e22017-09-18 10:13:35 -07002065 DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
Leo Chang5ea93a42016-11-03 12:39:49 -07002066 peer, peer->vdev, peer->vdev->vdev_id);
2067 *vdev_id = peer->vdev->vdev_id;
2068 return QDF_STATUS_SUCCESS;
2069}
2070
Yun Park601d0d82017-08-28 21:49:31 -07002071struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
2072 uint8_t sta_id)
Yun Parkfde6b9e2017-06-26 17:13:11 -07002073{
Yun Park601d0d82017-08-28 21:49:31 -07002074 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Yun Parkfde6b9e2017-06-26 17:13:11 -07002075 struct dp_peer *peer = NULL;
Yun Parkfde6b9e2017-06-26 17:13:11 -07002076
2077 if (sta_id >= WLAN_MAX_STA_COUNT) {
2078 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2079 "Invalid sta id passed");
2080 return NULL;
2081 }
2082
Yun Parkfde6b9e2017-06-26 17:13:11 -07002083 if (!pdev) {
2084 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2085 "PDEV not found for sta_id [%d]", sta_id);
2086 return NULL;
2087 }
2088
2089 peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
2090 if (!peer) {
2091 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2092 "PEER [%d] not found", sta_id);
2093 return NULL;
2094 }
2095
2096 return (struct cdp_vdev *)peer->vdev;
2097}
2098
Leo Chang5ea93a42016-11-03 12:39:49 -07002099/**
2100 * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
2101 * @peer - peer instance
2102 *
2103 * Get virtual interface instance which peer belongs
2104 *
2105 * Return: virtual interface instance pointer
2106 * NULL in case cannot find
2107 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002108struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07002109{
2110 struct dp_peer *peer = peer_handle;
2111
Jeff Johnson3f217e22017-09-18 10:13:35 -07002112 DP_TRACE(INFO, "peer %pK vdev %pK", peer, peer->vdev);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002113 return (struct cdp_vdev *)peer->vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07002114}
2115
2116/**
2117 * dp_peer_get_peer_mac_addr() - Get peer mac address
2118 * @peer - peer instance
2119 *
2120 * Get peer mac address
2121 *
2122 * Return: peer mac address pointer
2123 * NULL in case cannot find
2124 */
2125uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
2126{
2127 struct dp_peer *peer = peer_handle;
2128 uint8_t *mac;
2129
2130 mac = peer->mac_addr.raw;
Jeff Johnson3f217e22017-09-18 10:13:35 -07002131 DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
Leo Chang5ea93a42016-11-03 12:39:49 -07002132 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2133 return peer->mac_addr.raw;
2134}
2135
2136/**
2137 * dp_get_peer_state() - Get local peer state
2138 * @peer - peer instance
2139 *
2140 * Get local peer state
2141 *
2142 * Return: peer status
2143 */
2144int dp_get_peer_state(void *peer_handle)
2145{
2146 struct dp_peer *peer = peer_handle;
2147
Yun Park11d46e02017-11-27 10:51:53 -08002148 DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
Leo Chang5ea93a42016-11-03 12:39:49 -07002149 return peer->state;
2150}
2151
2152/**
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002153 * dp_get_last_assoc_received() - get time of last assoc received
2154 * @peer_handle: peer handle
2155 *
2156 * Return: pointer for the time of last assoc received
2157 */
2158qdf_time_t *dp_get_last_assoc_received(void *peer_handle)
2159{
2160 struct dp_peer *peer = peer_handle;
2161
Jeff Johnson3f217e22017-09-18 10:13:35 -07002162 DP_TRACE(INFO, "peer %pK last_assoc_rcvd: %lu", peer,
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002163 peer->last_assoc_rcvd);
2164 return &peer->last_assoc_rcvd;
2165}
2166
2167/**
2168 * dp_get_last_disassoc_received() - get time of last disassoc received
2169 * @peer_handle: peer handle
2170 *
2171 * Return: pointer for the time of last disassoc received
2172 */
2173qdf_time_t *dp_get_last_disassoc_received(void *peer_handle)
2174{
2175 struct dp_peer *peer = peer_handle;
2176
Jeff Johnson3f217e22017-09-18 10:13:35 -07002177 DP_TRACE(INFO, "peer %pK last_disassoc_rcvd: %lu", peer,
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002178 peer->last_disassoc_rcvd);
2179 return &peer->last_disassoc_rcvd;
2180}
2181
2182/**
2183 * dp_get_last_deauth_received() - get time of last deauth received
2184 * @peer_handle: peer handle
2185 *
2186 * Return: pointer for the time of last deauth received
2187 */
2188qdf_time_t *dp_get_last_deauth_received(void *peer_handle)
2189{
2190 struct dp_peer *peer = peer_handle;
2191
Jeff Johnson3f217e22017-09-18 10:13:35 -07002192 DP_TRACE(INFO, "peer %pK last_deauth_rcvd: %lu", peer,
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08002193 peer->last_deauth_rcvd);
2194 return &peer->last_deauth_rcvd;
2195}
2196
2197/**
Leo Chang5ea93a42016-11-03 12:39:49 -07002198 * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
2199 * @pdev - data path device instance
2200 *
2201 * local peer id pool alloc for physical device
2202 *
2203 * Return: none
2204 */
2205void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
2206{
2207 int i;
2208
2209 /* point the freelist to the first ID */
2210 pdev->local_peer_ids.freelist = 0;
2211
2212 /* link each ID to the next one */
2213 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
2214 pdev->local_peer_ids.pool[i] = i + 1;
2215 pdev->local_peer_ids.map[i] = NULL;
2216 }
2217
2218 /* link the last ID to itself, to mark the end of the list */
2219 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
2220 pdev->local_peer_ids.pool[i] = i;
2221
2222 qdf_spinlock_create(&pdev->local_peer_ids.lock);
2223 DP_TRACE(INFO, "Peer pool init");
2224}
2225
2226/**
2227 * dp_local_peer_id_alloc() - allocate local peer id
2228 * @pdev - data path device instance
2229 * @peer - new peer instance
2230 *
2231 * allocate local peer id
2232 *
2233 * Return: none
2234 */
2235void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
2236{
2237 int i;
2238
2239 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2240 i = pdev->local_peer_ids.freelist;
2241 if (pdev->local_peer_ids.pool[i] == i) {
2242 /* the list is empty, except for the list-end marker */
2243 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2244 } else {
2245 /* take the head ID and advance the freelist */
2246 peer->local_id = i;
2247 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
2248 pdev->local_peer_ids.map[i] = peer;
2249 }
2250 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Jeff Johnson3f217e22017-09-18 10:13:35 -07002251 DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
Leo Chang5ea93a42016-11-03 12:39:49 -07002252}
2253
2254/**
2255 * dp_local_peer_id_free() - remove local peer id
2256 * @pdev - data path device instance
2257 * @peer - peer instance should be removed
2258 *
2259 * remove local peer id
2260 *
2261 * Return: none
2262 */
2263void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
2264{
2265 int i = peer->local_id;
2266 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
2267 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
2268 return;
2269 }
2270
2271 /* put this ID on the head of the freelist */
2272 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
2273 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
2274 pdev->local_peer_ids.freelist = i;
2275 pdev->local_peer_ids.map[i] = NULL;
2276 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
2277}
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05302278#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05302279
2280/**
2281 * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
2282 * @soc_handle: DP SOC handle
2283 * @peer_id:peer_id of the peer
2284 *
2285 * return: vdev_id of the vap
2286 */
2287uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
2288 uint16_t peer_id, uint8_t *peer_mac)
2289{
2290 struct dp_soc *soc = (struct dp_soc *)soc_handle;
2291 struct dp_peer *peer;
2292
2293 peer = dp_peer_find_by_id(soc, peer_id);
2294
2295 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -07002296 "soc %pK peer_id %d", soc, peer_id);
Ishank Jain1e7401c2017-02-17 15:38:39 +05302297
2298 if (!peer) {
2299 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2300 "peer not found ");
2301 return CDP_INVALID_VDEV_ID;
2302 }
2303
2304 qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
2305 return peer->vdev->vdev_id;
2306}
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002307
2308/**
2309 * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
2310 * @peer: DP peer handle
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302311 * @dp_stats_cmd_cb: REO command callback function
2312 * @cb_ctxt: Callback context
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002313 *
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302314 * Return: none
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002315 */
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302316void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
2317 void *cb_ctxt)
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002318{
2319 struct dp_soc *soc = peer->vdev->pdev->soc;
2320 struct hal_reo_cmd_params params;
2321 int i;
2322
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302323 if (!dp_stats_cmd_cb)
2324 return;
2325
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002326 qdf_mem_zero(&params, sizeof(params));
2327 for (i = 0; i < DP_MAX_TIDS; i++) {
2328 struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
2329 if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
2330 params.std.need_status = 1;
2331 params.std.addr_lo =
2332 rx_tid->hw_qdesc_paddr & 0xffffffff;
2333 params.std.addr_hi =
2334 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05302335
2336 if (cb_ctxt) {
2337 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2338 &params, dp_stats_cmd_cb, cb_ctxt);
2339 } else {
2340 dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2341 &params, dp_stats_cmd_cb, rx_tid);
2342 }
Karunakar Dasineni3da08112017-06-15 14:42:39 -07002343
2344 /* Flush REO descriptor from HW cache to update stats
2345 * in descriptor memory. This is to help debugging */
2346 qdf_mem_zero(&params, sizeof(params));
2347 params.std.need_status = 0;
2348 params.std.addr_lo =
2349 rx_tid->hw_qdesc_paddr & 0xffffffff;
2350 params.std.addr_hi =
2351 (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08002352 params.u.fl_cache_params.flush_no_inval = 1;
Karunakar Dasineni3da08112017-06-15 14:42:39 -07002353 dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2354 NULL);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002355 }
2356 }
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07002357}
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05302358