blob: e93bcd63256f7baac7db388a1dc8902c4cc55393 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Sravan Kumar Kairam8433f902019-01-10 15:53:54 +05302 * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== includes ===*/
20/* header files for OS primitives */
21#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053023#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053024#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053025#include <qdf_atomic.h> /* qdf_atomic_read */
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053026#include <qdf_debugfs.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080027
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028/* header files for utilities */
29#include <cds_queue.h> /* TAILQ */
30
31/* header files for configuration API */
32#include <ol_cfg.h> /* ol_cfg_is_high_latency */
33#include <ol_if_athvar.h>
34
35/* header files for HTT API */
36#include <ol_htt_api.h>
37#include <ol_htt_tx_api.h>
38
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039/* header files for our own APIs */
40#include <ol_txrx_api.h>
41#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070042#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070043#include <ol_txrx_ctrl_api.h>
44#include <cdp_txrx_stats.h>
45#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046/* header files for our internal definitions */
47#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
48#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049#include <ol_tx.h> /* ol_tx_ll */
50#include <ol_rx.h> /* ol_rx_deliver */
51#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
52#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
53#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
54#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
55#include <ol_rx_reorder.h>
56#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
57#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
58#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053059#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080060#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070061#include <ol_txrx_types.h>
hangtian72704802019-04-17 18:16:25 +080062#include <ol_cfg.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080063#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070064#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080065#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070066#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080067#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053068#include "hif.h"
wadesong9e95bd92017-04-14 14:28:40 +080069#include "hif_main.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070070#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053071#ifndef REMOVE_PKT_LOG
72#include "pktlog_ac.h"
73#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070074#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053075#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053076#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080077#include <cdp_txrx_handle.h>
Poddar, Siddarthdb568162017-07-27 18:16:38 +053078#include "wlan_qct_sys.h"
79
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080080#include <htt_internal.h>
Yun Parkb4f591d2017-03-29 15:51:01 -070081#include <ol_txrx_ipa.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070082#include "wlan_roam_debug.h"
jitiphil377bcc12018-10-05 19:46:08 +053083#include "cfg_ucfg_api.h"
84
Yun Parkb4f591d2017-03-29 15:51:01 -070085
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053086#define DPT_DEBUGFS_PERMS (QDF_FILE_USR_READ | \
87 QDF_FILE_USR_WRITE | \
88 QDF_FILE_GRP_READ | \
89 QDF_FILE_OTH_READ)
90
jitiphilecbee582018-06-06 14:29:40 +053091#define DPT_DEBUGFS_NUMBER_BASE 10
92/**
93 * enum dpt_set_param_debugfs - dpt set params
94 * @DPT_SET_PARAM_PROTO_BITMAP : set proto bitmap
95 * @DPT_SET_PARAM_NR_RECORDS: set num of records
96 * @DPT_SET_PARAM_VERBOSITY: set verbosity
97 */
98enum dpt_set_param_debugfs {
99 DPT_SET_PARAM_PROTO_BITMAP = 1,
100 DPT_SET_PARAM_NR_RECORDS = 2,
101 DPT_SET_PARAM_VERBOSITY = 3,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530102 DPT_SET_PARAM_NUM_RECORDS_TO_DUMP = 4,
jitiphilecbee582018-06-06 14:29:40 +0530103 DPT_SET_PARAM_MAX,
104};
105
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800106QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -0700107 uint8_t *peer_mac,
108 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800109static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
110 bool val);
111int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700112extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800113ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700114 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700115
Leo Chang98726762016-10-28 11:07:18 -0700116extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
117 uint64_t **last_pn, uint32_t **rmf_pn_replays);
118
Mohit Khanna78cb6bb2017-03-31 17:05:14 -0700119/* thresh for peer's cached buf queue beyond which the elements are dropped */
120#define OL_TXRX_CACHED_BUFQ_THRESH 128
121
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530122/**
123 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
124 * fw is compatible for marking first packet after wow wakeup
125 * @value: 1 for enabled/ 0 for disabled
126 *
127 * Return: None
128 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800129static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530130{
131 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
132
133 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +0530134 ol_txrx_err("pdev is NULL");
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530135 return;
136 }
137
138 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
139}
140
Nirav Shah22bf44d2015-12-10 15:39:48 +0530141/**
142 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
143 * wmi is enabled or not.
144 * @value: 1 for enabled/ 0 for disable
145 *
146 * Return: None
147 */
148void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
149{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530150 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700151
Nirav Shah22bf44d2015-12-10 15:39:48 +0530152 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +0530153 qdf_print("pdev is NULL");
Nirav Shah22bf44d2015-12-10 15:39:48 +0530154 return;
155 }
156 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530157}
158
159/**
160 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
161 *
162 * Return: is_mgmt_over_wmi_enabled
163 */
164uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
165{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530166 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700167
Nirav Shah22bf44d2015-12-10 15:39:48 +0530168 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +0530169 qdf_print("pdev is NULL");
Nirav Shah22bf44d2015-12-10 15:39:48 +0530170 return 0;
171 }
172 return pdev->is_mgmt_over_wmi_enabled;
173}
174
175
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800176#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800177static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800178ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
179 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800180{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800181 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
182 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800183 struct ol_txrx_peer_t *peer;
184
185 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
186 if (!peer)
187 return NULL;
188 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800189 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800190 return peer;
191}
192
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800193static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800194{
Leo Chang98726762016-10-28 11:07:18 -0700195 struct ol_txrx_peer_t *peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700196
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800197 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530198 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530199 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530200 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800201 }
202
203 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530204 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205}
206
Yun Park0dad1002017-07-14 14:57:01 -0700207static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(struct cdp_pdev *ppdev,
208 uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800209{
Yun Park0dad1002017-07-14 14:57:01 -0700210 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800211 struct ol_txrx_peer_t *peer = NULL;
Yun Park5dd9a122018-01-12 15:00:12 -0800212 ol_txrx_vdev_handle vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800213
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800214 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530215 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530216 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800217 return NULL;
218 }
219
Yun Park5dd9a122018-01-12 15:00:12 -0800220 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev, sta_id,
221 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800222 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530223 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530224 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800225 return NULL;
226 }
227
Yun Park5dd9a122018-01-12 15:00:12 -0800228 vdev = peer->vdev;
229 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
230
231 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232}
233
Mohit Khannababadb82017-02-21 18:54:19 -0800234/**
235 * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
236 * @ppdev: pointer of type cdp_pdev
237 * @peer_addr: peer mac addr
238 * @peer_id: pointer to fill in the value of peer->local_id for caller
239 *
240 * This function finds a peer with given mac address and returns its peer_id.
241 * Note that this function does not increment the peer->ref_cnt.
242 * This means that the peer may be deleted in some other parallel context after
243 * its been found.
244 *
245 * Return: peer handle if peer is found, NULL if peer is not found.
246 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800247void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Yun Park0dad1002017-07-14 14:57:01 -0700248 uint8_t *peer_addr,
249 uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800250{
251 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800252 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800253
Mohit Khannab7bec722017-11-10 11:43:44 -0800254 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
255 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800256 if (!peer)
257 return NULL;
258 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800259 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khannababadb82017-02-21 18:54:19 -0800260 return peer;
261}
262
263/**
Mohit Khannab7bec722017-11-10 11:43:44 -0800264 * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
Mohit Khannababadb82017-02-21 18:54:19 -0800265 * @pdev: pointer of type ol_txrx_pdev_handle
266 * @peer_addr: peer mac addr
267 * @peer_id: pointer to fill in the value of peer->local_id for caller
268 *
269 * This function finds the peer with given mac address and returns its peer_id.
270 * Note that this function increments the peer->ref_cnt.
271 * This makes sure that peer will be valid. This also means the caller needs to
Mohit Khannab7bec722017-11-10 11:43:44 -0800272 * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
Mohit Khannababadb82017-02-21 18:54:19 -0800273 * reference.
274 * Sample usage:
275 * {
276 * //the API call below increments the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800277 * peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800278 *
279 * // Once peer usage is done
280 *
281 * //the API call below decrements the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800282 * ol_txrx_peer_release_ref(peer, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800283 * }
284 *
285 * Return: peer handle if the peer is found, NULL if peer is not found.
286 */
Mohit Khannab7bec722017-11-10 11:43:44 -0800287ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
288 u8 *peer_addr,
289 u8 *peer_id,
290 enum peer_debug_id_type dbg_id)
Mohit Khannababadb82017-02-21 18:54:19 -0800291{
292 struct ol_txrx_peer_t *peer;
293
Mohit Khannab7bec722017-11-10 11:43:44 -0800294 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
295 dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800296 if (!peer)
297 return NULL;
298 *peer_id = peer->local_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800299 return peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300}
301
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800302static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800303{
Leo Chang98726762016-10-28 11:07:18 -0700304 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700305
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306 return peer->local_id;
307}
308
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700309/**
310 * @brief Find a txrx peer handle from a peer's local ID
311 * @details
312 * The control SW typically uses the txrx peer handle to refer to the peer.
313 * In unusual circumstances, if it is infeasible for the control SW maintain
314 * the txrx peer handle but it can maintain a small integer local peer ID,
315 * this function allows the peer handled to be retrieved, based on the local
316 * peer ID.
317 *
318 * @param pdev - the data physical device object
319 * @param local_peer_id - the ID txrx assigned locally to the peer in question
320 * @return handle to the txrx peer object
321 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800323ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800324 uint8_t local_peer_id)
325{
326 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800327 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700328
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800329 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
330 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
331 return NULL;
332 }
333
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530334 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800335 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530336 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800337 return peer;
338}
339
Jingxiang Ge3badb982018-01-02 17:39:01 +0800340/**
341 * @brief Find a txrx peer handle from a peer's local ID
342 * @param pdev - the data physical device object
343 * @param local_peer_id - the ID txrx assigned locally to the peer in question
344 * @dbg_id - debug_id to track caller
345 * @return handle to the txrx peer object
346 * @details
347 * The control SW typically uses the txrx peer handle to refer to the peer.
348 * In unusual circumstances, if it is infeasible for the control SW maintain
349 * the txrx peer handle but it can maintain a small integer local peer ID,
350 * this function allows the peer handled to be retrieved, based on the local
351 * peer ID.
352 *
353 * Note that this function increments the peer->ref_cnt.
354 * This makes sure that peer will be valid. This also means the caller needs to
355 * call the corresponding API -
356 * ol_txrx_peer_release_ref
357 *
358 * reference.
359 * Sample usage:
360 * {
361 * //the API call below increments the peer->ref_cnt
362 * peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
363 *
364 * // Once peer usage is done
365 *
366 * //the API call below decrements the peer->ref_cnt
367 * ol_txrx_peer_release_ref(peer, dbg_id);
368 * }
369 *
370 * Return: peer handle if the peer is found, NULL if peer is not found.
371 */
372ol_txrx_peer_handle
373ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
374 uint8_t local_peer_id,
375 enum peer_debug_id_type dbg_id)
376{
377 struct ol_txrx_peer_t *peer = NULL;
378 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
379
380 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
381 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
382 return NULL;
383 }
384
385 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
386 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
387 peer = pdev->local_peer_ids.map[local_peer_id];
388 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
389 if (peer && peer->valid)
390 ol_txrx_peer_get_ref(peer, dbg_id);
Jingxiang Ge9f297062018-01-24 13:31:31 +0800391 else
392 peer = NULL;
Jingxiang Ge3badb982018-01-02 17:39:01 +0800393 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
394
395 return peer;
396}
397
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
399{
400 int i;
401
402 /* point the freelist to the first ID */
403 pdev->local_peer_ids.freelist = 0;
404
405 /* link each ID to the next one */
406 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
407 pdev->local_peer_ids.pool[i] = i + 1;
408 pdev->local_peer_ids.map[i] = NULL;
409 }
410
411 /* link the last ID to itself, to mark the end of the list */
412 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
413 pdev->local_peer_ids.pool[i] = i;
414
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530415 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416}
417
418static void
419ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
420 struct ol_txrx_peer_t *peer)
421{
422 int i;
423
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530424 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800425 i = pdev->local_peer_ids.freelist;
426 if (pdev->local_peer_ids.pool[i] == i) {
427 /* the list is empty, except for the list-end marker */
428 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
429 } else {
430 /* take the head ID and advance the freelist */
431 peer->local_id = i;
432 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
433 pdev->local_peer_ids.map[i] = peer;
434 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530435 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800436}
437
438static void
439ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
440 struct ol_txrx_peer_t *peer)
441{
442 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700443
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800444 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
445 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
446 return;
447 }
448 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530449 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800450 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
451 pdev->local_peer_ids.freelist = i;
452 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530453 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800454}
455
456static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
457{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530458 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800459}
460
461#else
462#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
463#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
464#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
465#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
466#endif
467
Nirav Shahd21a2e32018-04-20 16:34:43 +0530468#if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800469/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530470 * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
471 * @file: file to read
472 * @arg: pdev object
473 *
474 * Return: QDF_STATUS
475 */
476static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
477 void *arg)
478{
479 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
480 uint32_t i = 0;
481 QDF_STATUS status = QDF_STATUS_SUCCESS;
482
483 if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
484 return QDF_STATUS_E_INVAL;
485 else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
486 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
487 return QDF_STATUS_SUCCESS;
488 }
489
490 i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
491 status = qdf_dpt_dump_stats_debugfs(file, i);
492 if (status == QDF_STATUS_E_FAILURE)
493 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
494 else if (status == QDF_STATUS_SUCCESS)
495 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
496
497 return status;
498}
499
500/**
jitiphilecbee582018-06-06 14:29:40 +0530501 * ol_txrx_conv_str_to_int_debugfs() - convert string to int
502 * @buf: buffer containing string
503 * @len: buffer len
504 * @proto_bitmap: defines the protocol to be tracked
505 * @nr_records: defines the nth packet which is traced
506 * @verbosity: defines the verbosity level
507 *
508 * This function expects char buffer to be null terminated.
509 * Otherwise results could be unexpected values.
510 *
511 * Return: 0 on success
512 */
513static int ol_txrx_conv_str_to_int_debugfs(char *buf, qdf_size_t len,
514 int *proto_bitmap,
515 int *nr_records,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530516 int *verbosity,
517 int *num_records_to_dump)
jitiphilecbee582018-06-06 14:29:40 +0530518{
519 int num_value = DPT_SET_PARAM_PROTO_BITMAP;
520 int ret, param_value = 0;
521 char *buf_param = buf;
522 int i;
523
524 for (i = 1; i < DPT_SET_PARAM_MAX; i++) {
525 /* Loop till you reach space as kstrtoint operates till
526 * null character. Replace space with null character
527 * to read each value.
528 * terminate the loop either at null terminated char or
529 * len is 0.
530 */
531 while (*buf && len) {
532 if (*buf == ' ') {
533 *buf = '\0';
534 buf++;
535 len--;
536 break;
537 }
538 buf++;
539 len--;
540 }
541 /* get the parameter */
542 ret = qdf_kstrtoint(buf_param,
543 DPT_DEBUGFS_NUMBER_BASE,
544 &param_value);
545 if (ret) {
546 QDF_TRACE(QDF_MODULE_ID_TXRX,
547 QDF_TRACE_LEVEL_ERROR,
548 "%s: Error while parsing buffer. ret %d",
549 __func__, ret);
550 return ret;
551 }
552 switch (num_value) {
553 case DPT_SET_PARAM_PROTO_BITMAP:
554 *proto_bitmap = param_value;
555 break;
556 case DPT_SET_PARAM_NR_RECORDS:
557 *nr_records = param_value;
558 break;
559 case DPT_SET_PARAM_VERBOSITY:
560 *verbosity = param_value;
561 break;
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530562 case DPT_SET_PARAM_NUM_RECORDS_TO_DUMP:
563 if (param_value > MAX_QDF_DP_TRACE_RECORDS)
564 param_value = MAX_QDF_DP_TRACE_RECORDS;
565 *num_records_to_dump = param_value;
566 break;
jitiphilecbee582018-06-06 14:29:40 +0530567 default:
568 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530569 "%s %d: :Set command needs exactly 4 arguments in format <proto_bitmap> <number of record> <Verbosity> <number of records to dump>.",
jitiphilecbee582018-06-06 14:29:40 +0530570 __func__, __LINE__);
571 break;
572 }
573 num_value++;
574 /*buf_param should now point to the next param value. */
575 buf_param = buf;
576 }
577
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530578 /* buf is not yet NULL implies more than 4 params are passed. */
jitiphilecbee582018-06-06 14:29:40 +0530579 if (*buf) {
580 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530581 "%s %d: :Set command needs exactly 4 arguments in format <proto_bitmap> <number of record> <Verbosity> <number of records to dump>.",
jitiphilecbee582018-06-06 14:29:40 +0530582 __func__, __LINE__);
583 return -EINVAL;
584 }
585 return 0;
586}
587
588/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530589 * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
590 * @priv: pdev object
591 * @buf: buff to get value for dpt parameters
592 * @len: buf length
593 *
594 * Return: QDF_STATUS
595 */
596static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
597 const char *buf,
598 qdf_size_t len)
599{
jitiphilecbee582018-06-06 14:29:40 +0530600 int ret;
601 int proto_bitmap = 0;
602 int nr_records = 0;
603 int verbosity = 0;
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530604 int num_records_to_dump = 0;
jitiphilecbee582018-06-06 14:29:40 +0530605 char *buf1 = NULL;
606
607 if (!buf || !len) {
608 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
609 "%s: null buffer or len. len %u",
610 __func__, (uint8_t)len);
611 return QDF_STATUS_E_FAULT;
612 }
613
614 buf1 = (char *)qdf_mem_malloc(len);
Nirav Shah7c8c1712018-09-10 16:01:31 +0530615 if (!buf1)
jitiphilecbee582018-06-06 14:29:40 +0530616 return QDF_STATUS_E_FAULT;
Nirav Shah7c8c1712018-09-10 16:01:31 +0530617
jitiphilecbee582018-06-06 14:29:40 +0530618 qdf_mem_copy(buf1, buf, len);
619 ret = ol_txrx_conv_str_to_int_debugfs(buf1, len, &proto_bitmap,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530620 &nr_records, &verbosity,
621 &num_records_to_dump);
jitiphilecbee582018-06-06 14:29:40 +0530622 if (ret) {
623 qdf_mem_free(buf1);
624 return QDF_STATUS_E_INVAL;
625 }
626
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530627 qdf_dpt_set_value_debugfs(proto_bitmap, nr_records, verbosity,
628 num_records_to_dump);
jitiphilecbee582018-06-06 14:29:40 +0530629 qdf_mem_free(buf1);
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530630 return QDF_STATUS_SUCCESS;
631}
632
633static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
634{
635 pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
636 pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
637 pdev->dpt_debugfs_fops.priv = pdev;
638
639 pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
640
641 if (!pdev->dpt_stats_log_dir) {
642 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
643 "%s: error while creating debugfs dir for %s",
644 __func__, "dpt_stats");
645 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
646 return -EBUSY;
647 }
648
649 if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
650 pdev->dpt_stats_log_dir,
651 &pdev->dpt_debugfs_fops)) {
652 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
653 "%s: debug Entry creation failed!",
654 __func__);
655 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
656 return -EBUSY;
657 }
658
659 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
660 return 0;
661}
662
Nirav Shahd21a2e32018-04-20 16:34:43 +0530663static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
664{
665 qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
666}
667#else
668static inline int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
669{
670 return 0;
671}
672
673static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
674{
675}
676#endif
677
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530678/**
Dhanashri Atre12a08392016-02-17 13:10:34 -0800679 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800680 * @ctrl_pdev: cfg pdev
681 * @htc_pdev: HTC pdev
682 * @osdev: os dev
683 *
684 * Return: txrx pdev handle
685 * NULL for failure
686 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800687static struct cdp_pdev *
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530688ol_txrx_pdev_attach(ol_txrx_soc_handle soc,
689 struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700690 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800691{
692 struct ol_txrx_pdev_t *pdev;
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530693 struct cdp_cfg *cfg_pdev = (struct cdp_cfg *)ctrl_pdev;
hqufd227fe2017-06-26 17:01:14 +0800694 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800695
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530696 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800697 if (!pdev)
698 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800699
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530700 /* init LL/HL cfg here */
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530701 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(cfg_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530702 /*
703 * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
704 * enabled or not.
705 */
706 pdev->cfg.credit_update_enabled =
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530707 ol_cfg_is_credit_update_enabled(cfg_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530708
709 /* Explicitly request TX Completions from FW */
710 pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() ||
711 cds_is_packet_log_enabled();
712
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530713 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(cfg_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800714
715 /* store provided params */
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530716 pdev->ctrl_pdev = cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800717 pdev->osdev = osdev;
718
719 for (i = 0; i < htt_num_sec_types; i++)
720 pdev->sec_types[i] = (enum ol_sec_type)i;
721
722 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530723 ol_txrx_tso_stats_init(pdev);
jitiphil335d2412018-06-07 22:49:24 +0530724 ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800725
726 TAILQ_INIT(&pdev->vdev_list);
727
tfyu9fcabd72017-09-26 17:46:48 +0800728 TAILQ_INIT(&pdev->req_list);
729 pdev->req_list_depth = 0;
730 qdf_spinlock_create(&pdev->req_list_spinlock);
Ajit Pal Singh8184e932018-07-25 13:54:13 +0530731 qdf_spinlock_create(&pdev->tx_mutex);
tfyu9fcabd72017-09-26 17:46:48 +0800732
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733 /* do initial set up of the peer ID -> peer object lookup map */
734 if (ol_txrx_peer_find_attach(pdev))
735 goto fail1;
736
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530737 /* initialize the counter of the target's tx buffer availability */
738 qdf_atomic_init(&pdev->target_tx_credit);
739 qdf_atomic_init(&pdev->orig_target_tx_credit);
740
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530741 if (ol_cfg_is_high_latency(cfg_pdev)) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530742 qdf_spinlock_create(&pdev->tx_queue_spinlock);
743 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
Jeff Johnson6795c3a2019-03-18 13:43:04 -0700744 if (!pdev->tx_sched.scheduler)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530745 goto fail2;
746 }
747 ol_txrx_pdev_txq_log_init(pdev);
748 ol_txrx_pdev_grp_stats_init(pdev);
749
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800750 pdev->htt_pdev =
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530751 htt_pdev_alloc(pdev, cfg_pdev, htc_pdev, osdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800752 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530753 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800754
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530755 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
756 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +0800757
758 /*
759 * Init the tid --> category table.
760 * Regular tids (0-15) map to their AC.
761 * Extension tids get their own categories.
762 */
763 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
764 int ac = TXRX_TID_TO_WMM_AC(tid);
765
766 pdev->tid_to_ac[tid] = ac;
767 }
768 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
769 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
770 pdev->tid_to_ac[OL_TX_MGMT_TID] =
771 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
772 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
773 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
774 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
775 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
776
Alok Kumar604b0332019-01-24 17:49:25 +0530777 if (ol_cfg_is_flow_steering_enabled(pdev->ctrl_pdev))
778 pdev->peer_id_unmap_ref_cnt =
779 TXRX_RFS_ENABLE_PEER_ID_UNMAP_COUNT;
780 else
781 pdev->peer_id_unmap_ref_cnt =
782 TXRX_RFS_DISABLE_PEER_ID_UNMAP_COUNT;
783
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530784 ol_txrx_debugfs_init(pdev);
785
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800786 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800787
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530788fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800789 ol_txrx_peer_find_detach(pdev);
790
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530791fail2:
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530792 if (ol_cfg_is_high_latency(cfg_pdev))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530793 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
794
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800795fail1:
Ajit Pal Singh8184e932018-07-25 13:54:13 +0530796 qdf_spinlock_destroy(&pdev->tx_mutex);
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530797 ol_txrx_tso_stats_deinit(pdev);
jitiphil335d2412018-06-07 22:49:24 +0530798 ol_txrx_fw_stats_desc_pool_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530799 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800800
801fail0:
802 return NULL;
803}
804
Komal Seelamc4b28632016-02-03 15:02:18 +0530805#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
806/**
807 * htt_pkt_log_init() - API to initialize packet log
808 * @handle: pdev handle
809 * @scn: HIF context
810 *
811 * Return: void
812 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800813void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +0530814{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800815 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700816
Komal Seelamc4b28632016-02-03 15:02:18 +0530817 if (handle->pkt_log_init)
818 return;
819
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530820 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -0700821 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -0700822 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -0800823 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Komal Seelamc4b28632016-02-03 15:02:18 +0530824 if (pktlogmod_init(scn))
Nirav Shah7c8c1712018-09-10 16:01:31 +0530825 qdf_print(" pktlogmod_init failed");
Komal Seelamc4b28632016-02-03 15:02:18 +0530826 else
827 handle->pkt_log_init = true;
828 }
829}
830
831/**
832 * htt_pktlogmod_exit() - API to cleanup pktlog info
833 * @handle: Pdev handle
834 * @scn: HIF Context
835 *
836 * Return: void
837 */
Houston Hoffman8c485042017-02-08 13:40:21 -0800838static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +0530839{
Houston Hoffman8c485042017-02-08 13:40:21 -0800840 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -0700841 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +0530842 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -0800843 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +0530844 handle->pkt_log_init = false;
845 }
846}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800847
Komal Seelamc4b28632016-02-03 15:02:18 +0530848#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800849void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -0800850static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +0530851#endif
852
hangtianb9c91362019-06-07 10:39:38 +0800853#ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
854/**
855 * ol_txrx_pdev_set_threshold() - set pdev pool stop/start threshold
856 * @pdev: txrx pdev
857 *
858 * Return: void
859 */
860static void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
861{
862 uint32_t stop_threshold;
863 uint32_t start_threshold;
864 uint16_t desc_pool_size = pdev->tx_desc.pool_size;
865
866 stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
867 start_threshold = stop_threshold +
868 ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
869 pdev->tx_desc.start_th = (start_threshold * desc_pool_size) / 100;
870 pdev->tx_desc.stop_th = (stop_threshold * desc_pool_size) / 100;
871 pdev->tx_desc.stop_priority_th =
872 (TX_PRIORITY_TH * pdev->tx_desc.stop_th) / 100;
873 if (pdev->tx_desc.stop_priority_th >= MAX_TSO_SEGMENT_DESC)
874 pdev->tx_desc.stop_priority_th -= MAX_TSO_SEGMENT_DESC;
875
876 pdev->tx_desc.start_priority_th =
877 (TX_PRIORITY_TH * pdev->tx_desc.start_th) / 100;
878 if (pdev->tx_desc.start_priority_th >= MAX_TSO_SEGMENT_DESC)
879 pdev->tx_desc.start_priority_th -= MAX_TSO_SEGMENT_DESC;
880 pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
881}
882#else
883static inline void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
884{
885}
886#endif
887
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800888/**
Dhanashri Atre12a08392016-02-17 13:10:34 -0800889 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800890 * @pdev: txrx pdev
891 *
892 * Return: 0 for success
893 */
894int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800895ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800896{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800897 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -0700898 uint16_t i;
899 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800900 int ret = 0;
901 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530902 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800903
Leo Chang376398b2015-10-23 14:19:02 -0700904 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
905 union ol_tx_desc_list_elem_t *c_element;
906 unsigned int sig_bit;
907 uint16_t desc_per_page;
908
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800909 if (!osc) {
910 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -0700911 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800912 }
913
914 /*
915 * For LL, limit the number of host's tx descriptors to match
916 * the number of target FW tx descriptors.
917 * This simplifies the FW, by ensuring the host will never
918 * download more tx descriptors than the target has space for.
919 * The FW will drop/free low-priority tx descriptors when it
920 * starts to run low, so that in theory the host should never
921 * run out of tx descriptors.
922 */
923
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800924 /*
925 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530926 * HL - wait for a HTT target credit initialization
927 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800928 */
Nirav Shah52d85aa2018-04-26 14:03:00 +0530929 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
930 ol_tx_init_pdev(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800931
Nirav Shah76291962016-04-25 10:50:37 +0530932 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
933
Nirav Shah5ff1fd02018-03-11 14:55:53 +0530934 ol_tx_setup_fastpath_ce_handles(osc, pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800935
Rakshith Suresh Patkar0f6375c2018-12-04 20:59:07 +0530936 if ((ol_txrx_get_new_htt_msg_format(pdev)))
937 ol_set_cfg_new_htt_format(pdev->ctrl_pdev, true);
938 else
939 ol_set_cfg_new_htt_format(pdev->ctrl_pdev, false);
940
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800941 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
942 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +0530943 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800944
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800945 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -0800946 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
947 ret = htt_ipa_uc_attach(pdev->htt_pdev);
948 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -0700949 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -0800950 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800951
Leo Chang376398b2015-10-23 14:19:02 -0700952 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530953 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530954 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -0700955 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
956 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
Jeff Johnson6795c3a2019-03-18 13:43:04 -0700957 (!pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530958 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -0700959 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -0800960 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -0700961 goto page_alloc_fail;
962 }
963 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
964 pdev->tx_desc.offset_filter = desc_per_page - 1;
965 /* Calculate page divider to find page number */
966 sig_bit = 0;
967 while (desc_per_page) {
968 sig_bit++;
969 desc_per_page = desc_per_page >> 1;
970 }
971 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -0800972 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -0700973 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
974 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
975 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
976 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800977
978 /*
979 * Each SW tx desc (used only within the tx datapath SW) has a
980 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
981 * Go ahead and allocate the HTT tx desc and link it with the SW tx
982 * desc now, to avoid doing it during time-critical transmit.
983 */
984 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -0700985 pdev->tx_desc.freelist =
986 (union ol_tx_desc_list_elem_t *)
987 (*pdev->tx_desc.desc_pages.cacheable_pages);
988 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800989 for (i = 0; i < desc_pool_size; i++) {
990 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -0700991 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530992 qdf_dma_addr_t frag_paddr = 0;
993 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800994
Leo Chang376398b2015-10-23 14:19:02 -0700995 if (i == (desc_pool_size - 1))
996 c_element->next = NULL;
997 else
998 c_element->next = (union ol_tx_desc_list_elem_t *)
999 ol_tx_desc_find(pdev, i + 1);
1000
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001001 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001002 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301003 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001004 "%s: failed to alloc HTT tx desc (%d of %d)",
1005 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001006 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -08001007 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001008 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001009 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001010
Leo Chang376398b2015-10-23 14:19:02 -07001011 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001012 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001013 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001014 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001015 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301016 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001017 "%s: failed to alloc HTT frag dsc (%d/%d)",
1018 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001019 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001020 fail_idx = i;
1021 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001022 }
Leo Chang376398b2015-10-23 14:19:02 -07001023 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -07001024 /*
1025 * Initialize the first 6 words (TSO flags)
1026 * of the frag descriptor
1027 */
Leo Chang376398b2015-10-23 14:19:02 -07001028 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1029 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001030 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001031 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001032#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001033 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001034#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001035 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001036 0xffffffff;
1037#endif
1038#endif
Leo Chang376398b2015-10-23 14:19:02 -07001039 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301040 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001041 c_element = c_element->next;
1042 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001043 }
1044
1045 /* link SW tx descs into a freelist */
1046 pdev->tx_desc.num_free = desc_pool_size;
Nirav Shah7c8c1712018-09-10 16:01:31 +05301047 ol_txrx_dbg("first tx_desc:0x%pK Last tx desc:0x%pK",
1048 (uint32_t *)pdev->tx_desc.freelist,
1049 (uint32_t *)(pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001050
hangtianb9c91362019-06-07 10:39:38 +08001051 ol_txrx_pdev_set_threshold(pdev);
hangtian72704802019-04-17 18:16:25 +08001052
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001053 /* check what format of frames are expected to be delivered by the OS */
1054 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1055 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1056 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1057 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1058 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1059 pdev->htt_pkt_type = htt_pkt_type_eth2;
1060 else
1061 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1062 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301063 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001064 "%s Invalid standard frame type: %d",
1065 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001066 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001067 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001068 }
1069
1070 /* setup the global rx defrag waitlist */
1071 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1072
1073 /* configure where defrag timeout and duplicate detection is handled */
1074 pdev->rx.flags.defrag_timeout_check =
1075 pdev->rx.flags.dup_check =
1076 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1077
1078#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1079 /* Need to revisit this part. Currently,hardcode to riva's caps */
1080 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1081 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1082 /*
1083 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1084 * header for non-first subframe of A-MSDU.
1085 */
1086 pdev->sw_subfrm_hdr_recovery_enable = 1;
1087 /*
1088 * The Riva HW doesn't have the capability to set Protected Frame bit
1089 * in the MAC header for encrypted data frame.
1090 */
1091 pdev->sw_pf_proc_enable = 1;
1092
1093 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001094 /*
1095 * sw llc process is only needed in
1096 * 802.3 to 802.11 transform case
1097 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001098 pdev->sw_tx_llc_proc_enable = 1;
1099 pdev->sw_rx_llc_proc_enable = 1;
1100 } else {
1101 pdev->sw_tx_llc_proc_enable = 0;
1102 pdev->sw_rx_llc_proc_enable = 0;
1103 }
1104
1105 switch (pdev->frame_format) {
1106 case wlan_frm_fmt_raw:
1107 pdev->sw_tx_encap =
1108 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1109 ? 0 : 1;
1110 pdev->sw_rx_decap =
1111 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1112 ? 0 : 1;
1113 break;
1114 case wlan_frm_fmt_native_wifi:
1115 pdev->sw_tx_encap =
1116 pdev->
1117 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1118 ? 0 : 1;
1119 pdev->sw_rx_decap =
1120 pdev->
1121 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1122 ? 0 : 1;
1123 break;
1124 case wlan_frm_fmt_802_3:
1125 pdev->sw_tx_encap =
1126 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1127 ? 0 : 1;
1128 pdev->sw_rx_decap =
1129 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1130 ? 0 : 1;
1131 break;
1132 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301133 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001134 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1135 pdev->frame_format,
1136 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001137 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001138 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001139 }
1140#endif
1141
1142 /*
1143 * Determine what rx processing steps are done within the host.
1144 * Possibilities:
1145 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1146 * (This is unlikely; even if the target is doing rx->tx forwarding,
1147 * the host should be doing rx->tx forwarding too, as a back up for
1148 * the target's rx->tx forwarding, in case the target runs short on
1149 * memory, and can't store rx->tx frames that are waiting for
1150 * missing prior rx frames to arrive.)
1151 * 2. Just rx -> tx forwarding.
1152 * This is the typical configuration for HL, and a likely
1153 * configuration for LL STA or small APs (e.g. retail APs).
1154 * 3. Both PN check and rx -> tx forwarding.
1155 * This is the typical configuration for large LL APs.
1156 * Host-side PN check without rx->tx forwarding is not a valid
1157 * configuration, since the PN check needs to be done prior to
1158 * the rx->tx forwarding.
1159 */
1160 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001161 /*
1162 * PN check, rx-tx forwarding and rx reorder is done by
1163 * the target
1164 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001165 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1166 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1167 else
1168 pdev->rx_opt_proc = ol_rx_fwd_check;
1169 } else {
1170 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1171 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1172 /*
1173 * PN check done on host,
1174 * rx->tx forwarding not done at all.
1175 */
1176 pdev->rx_opt_proc = ol_rx_pn_check_only;
1177 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1178 /*
1179 * Both PN check and rx->tx forwarding done
1180 * on host.
1181 */
1182 pdev->rx_opt_proc = ol_rx_pn_check;
1183 } else {
1184#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1185"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301186 QDF_TRACE(QDF_MODULE_ID_TXRX,
1187 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001188 "%s: %s", __func__, TRACESTR01);
1189#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001190 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001191 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001192 }
1193 } else {
1194 /* PN check done on target */
1195 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1196 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1197 /*
1198 * rx->tx forwarding done on host (possibly as
1199 * back-up for target-side primary rx->tx
1200 * forwarding)
1201 */
1202 pdev->rx_opt_proc = ol_rx_fwd_check;
1203 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001204 /*
1205 * rx->tx forwarding either done in target,
1206 * or not done at all
1207 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001208 pdev->rx_opt_proc = ol_rx_deliver;
1209 }
1210 }
1211 }
1212
1213 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301214 qdf_spinlock_create(&pdev->peer_ref_mutex);
1215 qdf_spinlock_create(&pdev->rx.mutex);
1216 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001217 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001218 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1219
Yun Parkf01f6e22017-01-18 17:27:02 -08001220 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1221 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001222 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001223 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001224
Yun Parkf01f6e22017-01-18 17:27:02 -08001225 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1226 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001227 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001228 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001229
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001230 /*
1231 * WDI event attach
1232 */
1233 wdi_event_attach(pdev);
1234
1235 /*
1236 * Initialize rx PN check characteristics for different security types.
1237 */
hangtian127c9532019-01-12 13:29:07 +08001238 qdf_mem_zero(&pdev->rx_pn[0], sizeof(pdev->rx_pn));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001239
1240 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1241 pdev->rx_pn[htt_sec_type_tkip].len =
1242 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1243 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1244 pdev->rx_pn[htt_sec_type_tkip].cmp =
1245 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1246 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1247
1248 /* WAPI: 128-bit PN */
1249 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1250 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1251
1252 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1253
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001254 ol_txrx_dbg("Created pdev %pK\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001255
1256 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1257
1258#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1259#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1260
1261/* #if 1 -- TODO: clean this up */
1262#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1263 /* avg = 100% * new + 0% * old */ \
1264 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1265/*
Yun Parkeaea8632017-04-09 09:53:45 -07001266 * #else
1267 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1268 * //avg = 25% * new + 25% * old
1269 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1270 * #endif
1271 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001272 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1273 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1274#endif
1275
1276 ol_txrx_local_peer_id_pool_init(pdev);
1277
1278 pdev->cfg.ll_pause_txq_limit =
1279 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1280
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301281 /* TX flow control for peer who is in very bad link status */
1282 ol_tx_badpeer_flow_cl_init(pdev);
1283
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001284#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301285 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301286 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001287
1288 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301289 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001290 {
1291 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001292
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001293 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301294 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001295 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1296 * 1000);
1297 /*
1298 * Compute a factor and shift that together are equal to the
1299 * inverse of the bin_width time, so that rather than dividing
1300 * by the bin width time, approximately the same result can be
1301 * obtained much more efficiently by a multiply + shift.
1302 * multiply_factor >> shift = 1 / bin_width_time, so
1303 * multiply_factor = (1 << shift) / bin_width_time.
1304 *
1305 * Pick the shift semi-arbitrarily.
1306 * If we knew statically what the bin_width would be, we could
1307 * choose a shift that minimizes the error.
1308 * Since the bin_width is determined dynamically, simply use a
1309 * shift that is about half of the uint32_t size. This should
1310 * result in a relatively large multiplier value, which
1311 * minimizes error from rounding the multiplier to an integer.
1312 * The rounding error only becomes significant if the tick units
1313 * are on the order of 1 microsecond. In most systems, it is
1314 * expected that the tick units will be relatively low-res,
1315 * on the order of 1 millisecond. In such systems the rounding
1316 * error is negligible.
1317 * It would be more accurate to dynamically try out different
1318 * shifts and choose the one that results in the smallest
1319 * rounding error, but that extra level of fidelity is
1320 * not needed.
1321 */
1322 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1323 pdev->tx_delay.hist_internal_bin_width_mult =
1324 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1325 1000 + (bin_width_1000ticks >> 1)) /
1326 bin_width_1000ticks;
1327 }
1328#endif /* QCA_COMPUTE_TX_DELAY */
1329
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001330 /* Thermal Mitigation */
1331 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001332
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001333 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001334
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301335 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1336
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001337 ol_tx_register_flow_control(pdev);
1338
1339 return 0; /* success */
1340
Leo Chang376398b2015-10-23 14:19:02 -07001341pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001342 OL_RX_REORDER_TRACE_DETACH(pdev);
1343
Leo Chang376398b2015-10-23 14:19:02 -07001344reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301345 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1346 qdf_spinlock_destroy(&pdev->rx.mutex);
1347 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301348 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001349 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1350
Leo Chang376398b2015-10-23 14:19:02 -07001351control_init_fail:
1352desc_alloc_fail:
1353 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001354 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001355 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001356
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301357 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001358 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001359
Leo Chang376398b2015-10-23 14:19:02 -07001360page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001361 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1362 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001363uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001364 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301365htt_attach_fail:
1366 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001367ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001368 return ret; /* fail */
1369}
1370
Dhanashri Atre12a08392016-02-17 13:10:34 -08001371/**
1372 * ol_txrx_pdev_attach_target() - send target configuration
1373 *
1374 * @pdev - the physical device being initialized
1375 *
1376 * The majority of the data SW setup are done by the pdev_attach
1377 * functions, but this function completes the data SW setup by
1378 * sending datapath configuration messages to the target.
1379 *
1380 * Return: 0 - success 1 - failure
1381 */
Rajeev Kumar Sirasanagandlaed4d1b32019-01-29 11:08:20 -08001382static int ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001383{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001384 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001385
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05301386 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001387}
1388
Dhanashri Atre12a08392016-02-17 13:10:34 -08001389/**
Mohit Khanna54f3a382017-03-13 17:56:32 -07001390 * ol_tx_free_descs_inuse - free tx descriptors which are in use
1391 * @pdev - the physical device for which tx descs need to be freed
1392 *
1393 * Cycle through the list of TX descriptors (for a pdev) which are in use,
1394 * for which TX completion has not been received and free them. Should be
1395 * called only when the interrupts are off and all lower layer RX is stopped.
1396 * Otherwise there may be a race condition with TX completions.
1397 *
1398 * Return: None
1399 */
1400static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1401{
1402 int i;
1403 void *htt_tx_desc;
1404 struct ol_tx_desc_t *tx_desc;
1405 int num_freed_tx_desc = 0;
1406
1407 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1408 tx_desc = ol_tx_desc_find(pdev, i);
1409 /*
1410 * Confirm that each tx descriptor is "empty", i.e. it has
1411 * no tx frame attached.
1412 * In particular, check that there are no frames that have
1413 * been given to the target to transmit, for which the
1414 * target has never provided a response.
1415 */
1416 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
1417 ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
1418 ol_tx_desc_frame_free_nonstd(pdev,
1419 tx_desc, 1);
1420 num_freed_tx_desc++;
1421 }
1422 htt_tx_desc = tx_desc->htt_tx_desc;
1423 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1424 }
1425
1426 if (num_freed_tx_desc)
1427 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1428 "freed %d tx frames for which no resp from target",
1429 num_freed_tx_desc);
1430
1431}
1432
1433/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301434 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08001435 * @pdev - the data physical device object being removed
1436 * @force - delete the pdev (and its vdevs and peers) even if
1437 * there are outstanding references by the target to the vdevs
1438 * and peers within the pdev
1439 *
1440 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301441 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08001442 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301443 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08001444 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301445static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001446{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001447 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001448
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001449 /* preconditions */
1450 TXRX_ASSERT2(pdev);
1451
1452 /* check that the pdev has no vdevs allocated */
1453 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1454
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001455#ifdef QCA_SUPPORT_TX_THROTTLE
1456 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301457 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1458 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001459#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301460 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1461 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001462#endif
1463#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001464
1465 if (force) {
1466 /*
1467 * The assertion above confirms that all vdevs within this pdev
1468 * were detached. However, they may not have actually been
1469 * deleted.
1470 * If the vdev had peers which never received a PEER_UNMAP msg
1471 * from the target, then there are still zombie peer objects,
1472 * and the vdev parents of the zombie peers are also zombies,
1473 * hanging around until their final peer gets deleted.
1474 * Go through the peer hash table and delete any peers left.
1475 * As a side effect, this will complete the deletion of any
1476 * vdevs that are waiting for their peers to finish deletion.
1477 */
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001478 ol_txrx_dbg("Force delete for pdev %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001479 pdev);
1480 ol_txrx_peer_find_hash_erase(pdev);
1481 }
1482
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301483 /* to get flow pool status before freeing descs */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07001484 ol_tx_dump_flow_pool_info((void *)pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001485 ol_tx_free_descs_inuse(pdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301486 ol_tx_deregister_flow_control(pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001487
1488 /*
1489 * ol_tso_seg_list_deinit should happen after
1490 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
1491 * which is being de-initilized in ol_tso_seg_list_deinit
1492 */
1493 ol_tso_seg_list_deinit(pdev);
1494 ol_tso_num_seg_list_deinit(pdev);
1495
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301496 /* Stop the communication between HTT and target at first */
1497 htt_detach_target(pdev->htt_pdev);
1498
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301499 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001500 &pdev->tx_desc.desc_pages, 0, true);
1501 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001502
1503 /* Detach micro controller data path offload resource */
1504 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1505 htt_ipa_uc_detach(pdev->htt_pdev);
1506
1507 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05301508 ol_tx_desc_dup_detect_deinit(pdev);
1509
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301510 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1511 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1512 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001513 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001514#ifdef QCA_SUPPORT_TX_THROTTLE
1515 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301516 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001517#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301518
1519 /* TX flow control for peer who is in very bad link status */
1520 ol_tx_badpeer_flow_cl_deinit(pdev);
1521
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001522 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1523
1524 OL_RX_REORDER_TRACE_DETACH(pdev);
1525 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301526
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001527 /*
1528 * WDI event detach
1529 */
1530 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301531
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001532 ol_txrx_local_peer_id_cleanup(pdev);
1533
1534#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301535 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001536#endif
1537}
1538
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301539/**
1540 * ol_txrx_pdev_detach() - delete the data SW state
1541 * @ppdev - the data physical device object being removed
1542 * @force - delete the pdev (and its vdevs and peers) even if
1543 * there are outstanding references by the target to the vdevs
1544 * and peers within the pdev
1545 *
1546 * This function is used when the WLAN driver is being removed to
1547 * remove the host data component within the driver.
1548 * All virtual devices within the physical device need to be deleted
1549 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1550 *
1551 * Return: None
1552 */
1553static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
1554{
1555 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rakesh Pillai33942c42018-05-09 11:45:38 +05301556 struct ol_txrx_stats_req_internal *req, *temp_req;
tfyu9fcabd72017-09-26 17:46:48 +08001557 int i = 0;
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301558
1559 /*checking to ensure txrx pdev structure is not NULL */
1560 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301561 ol_txrx_err("pdev is NULL");
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301562 return;
1563 }
1564
1565 htt_pktlogmod_exit(pdev);
1566
tfyu9fcabd72017-09-26 17:46:48 +08001567 qdf_spin_lock_bh(&pdev->req_list_spinlock);
1568 if (pdev->req_list_depth > 0)
1569 ol_txrx_err(
1570 "Warning: the txrx req list is not empty, depth=%d\n",
1571 pdev->req_list_depth
1572 );
Rakesh Pillai33942c42018-05-09 11:45:38 +05301573 TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
tfyu9fcabd72017-09-26 17:46:48 +08001574 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
1575 pdev->req_list_depth--;
1576 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05301577 "%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
tfyu9fcabd72017-09-26 17:46:48 +08001578 i++,
1579 req,
1580 req->base.print.verbose,
1581 req->base.print.concise,
1582 req->base.stats_type_upload_mask,
1583 req->base.stats_type_reset_mask
1584 );
1585 qdf_mem_free(req);
1586 }
1587 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
1588
1589 qdf_spinlock_destroy(&pdev->req_list_spinlock);
Ajit Pal Singh8184e932018-07-25 13:54:13 +05301590 qdf_spinlock_destroy(&pdev->tx_mutex);
tfyu9fcabd72017-09-26 17:46:48 +08001591
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301592 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
1593
1594 if (pdev->cfg.is_high_latency)
1595 ol_tx_sched_detach(pdev);
1596
1597 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
1598
1599 htt_pdev_free(pdev->htt_pdev);
1600 ol_txrx_peer_find_detach(pdev);
Manjunathappa Prakash8b686632019-01-15 22:07:54 -08001601 qdf_flush_work(&pdev->peer_unmap_timer_work);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301602 ol_txrx_tso_stats_deinit(pdev);
jitiphil335d2412018-06-07 22:49:24 +05301603 ol_txrx_fw_stats_desc_pool_deinit(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301604
1605 ol_txrx_pdev_txq_log_destroy(pdev);
1606 ol_txrx_pdev_grp_stat_destroy(pdev);
Alok Kumarddd457e2018-04-09 13:51:42 +05301607
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301608 ol_txrx_debugfs_exit(pdev);
1609
Alok Kumarddd457e2018-04-09 13:51:42 +05301610 qdf_mem_free(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301611}
1612
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301613#if defined(QCA_HL_NETDEV_FLOW_CONTROL)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301614
1615/**
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301616 * ol_txrx_vdev_per_vdev_tx_desc_init() - initialise per vdev tx desc count
1617 * related variables.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301618 * @vdev: the virtual device object
1619 *
1620 * Return: None
1621 */
1622static inline void
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301623ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301624{
1625 qdf_atomic_init(&vdev->tx_desc_count);
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301626 vdev->tx_desc_limit = 0;
1627 vdev->queue_restart_th = 0;
1628 vdev->prio_q_paused = 0;
1629 vdev->queue_stop_th = 0;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301630}
1631#else
1632
1633static inline void
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301634ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301635{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301636}
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301637#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301638
Dhanashri Atre12a08392016-02-17 13:10:34 -08001639/**
1640 * ol_txrx_vdev_attach - Allocate and initialize the data object
1641 * for a new virtual device.
1642 *
1643 * @data_pdev - the physical device the virtual device belongs to
1644 * @vdev_mac_addr - the MAC address of the virtual device
1645 * @vdev_id - the ID used to identify the virtual device to the target
1646 * @op_mode - whether this virtual device is operating as an AP,
1647 * an IBSS, or a STA
1648 *
1649 * Return: success: handle to new data vdev object, failure: NULL
1650 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001651static struct cdp_vdev *
1652ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001653 uint8_t *vdev_mac_addr,
1654 uint8_t vdev_id, enum wlan_op_mode op_mode)
1655{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001656 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001657 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001658 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001659
1660 /* preconditions */
1661 TXRX_ASSERT2(pdev);
1662 TXRX_ASSERT2(vdev_mac_addr);
1663
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301664 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001665 if (!vdev)
1666 return NULL; /* failure */
1667
1668 /* store provided params */
1669 vdev->pdev = pdev;
1670 vdev->vdev_id = vdev_id;
1671 vdev->opmode = op_mode;
1672
1673 vdev->delete.pending = 0;
1674 vdev->safemode = 0;
1675 vdev->drop_unenc = 1;
1676 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301677 vdev->fwd_tx_packets = 0;
1678 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001679
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301680 ol_txrx_vdev_per_vdev_tx_desc_init(vdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301681
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301682 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -08001683 QDF_MAC_ADDR_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001684
1685 TAILQ_INIT(&vdev->peer_list);
1686 vdev->last_real_peer = NULL;
1687
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001688 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301689
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001690#ifdef QCA_IBSS_SUPPORT
1691 vdev->ibss_peer_num = 0;
1692 vdev->ibss_peer_heart_beat_timer = 0;
1693#endif
1694
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301695 ol_txrx_vdev_txqs_init(vdev);
1696
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301697 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001698 vdev->ll_pause.paused_reason = 0;
1699 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
1700 vdev->ll_pause.txq.depth = 0;
wadesong5e2e8012017-08-21 16:56:03 +08001701 qdf_atomic_init(&vdev->delete.detaching);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301702 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001703 &vdev->ll_pause.timer,
1704 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301705 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301706 qdf_atomic_init(&vdev->os_q_paused);
1707 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001708 vdev->tx_fl_lwm = 0;
1709 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001710 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001711 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05301712 qdf_mem_zero(&vdev->last_peer_mac_addr,
1713 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301714 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001715 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08001716 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001717 vdev->osif_fc_ctx = NULL;
1718
Alok Kumar75355aa2018-03-19 17:32:58 +05301719 vdev->txrx_stats.txack_success = 0;
1720 vdev->txrx_stats.txack_failed = 0;
1721
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001722 /* Default MAX Q depth for every VDEV */
1723 vdev->ll_pause.max_q_depth =
1724 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001725 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001726 /* add this vdev into the pdev's list */
1727 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
chenguo2201c0a2018-11-15 18:07:41 +08001728 if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam())
1729 pdev->monitor_vdev = vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001730
Poddar, Siddarth14521792017-03-14 21:19:42 +05301731 ol_txrx_dbg(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07001732 "Created vdev %pK ("QDF_MAC_ADDR_STR")\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001733 vdev,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07001734 QDF_MAC_ADDR_ARRAY(vdev->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001735
1736 /*
1737 * We've verified that htt_op_mode == wlan_op_mode,
1738 * so no translation is needed.
1739 */
1740 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
1741
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001742 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001743}
1744
Dhanashri Atre12a08392016-02-17 13:10:34 -08001745/**
1746 *ol_txrx_vdev_register - Link a vdev's data object with the
1747 * matching OS shim vdev object.
1748 *
1749 * @txrx_vdev: the virtual device's data object
1750 * @osif_vdev: the virtual device's OS shim object
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301751 * @ctrl_vdev: UMAC vdev objmgr handle
Dhanashri Atre12a08392016-02-17 13:10:34 -08001752 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
1753 *
1754 * The data object for a virtual device is created by the
1755 * function ol_txrx_vdev_attach. However, rather than fully
1756 * linking the data vdev object with the vdev objects from the
1757 * other subsystems that the data vdev object interacts with,
1758 * the txrx_vdev_attach function focuses primarily on creating
1759 * the data vdev object. After the creation of both the data
1760 * vdev object and the OS shim vdev object, this
1761 * txrx_osif_vdev_attach function is used to connect the two
1762 * vdev objects, so the data SW can use the OS shim vdev handle
1763 * when passing rx data received by a vdev up to the OS shim.
1764 */
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301765static void ol_txrx_vdev_register(struct cdp_vdev *pvdev, void *osif_vdev,
1766 struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
1767 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001768{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001769 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001770
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001771 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301772 qdf_print("vdev/txrx_ops is NULL!");
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001773 qdf_assert(0);
1774 return;
1775 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001776
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001777 vdev->osif_dev = osif_vdev;
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301778 vdev->ctrl_vdev = ctrl_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001779 vdev->rx = txrx_ops->rx.rx;
Poddar, Siddarth3906e172018-01-09 11:24:58 +05301780 vdev->stats_rx = txrx_ops->rx.stats_rx;
Alok Kumar4696fb02018-06-06 00:10:18 +05301781 vdev->tx_comp = txrx_ops->tx.tx_comp;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001782 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001783}
1784
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001785void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
1786{
1787 vdev->safemode = val;
1788}
1789
Dhanashri Atre12a08392016-02-17 13:10:34 -08001790/**
1791 * ol_txrx_set_privacy_filters - set the privacy filter
1792 * @vdev - the data virtual device object
1793 * @filter - filters to be set
1794 * @num - the number of filters
1795 *
1796 * Rx related. Set the privacy filters. When rx packets, check
1797 * the ether type, filter type and packet type to decide whether
1798 * discard these packets.
1799 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08001800static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001801ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
1802 void *filters, uint32_t num)
1803{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301804 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001805 num * sizeof(struct privacy_exemption));
1806 vdev->num_filters = num;
1807}
1808
1809void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
1810{
1811 vdev->drop_unenc = val;
1812}
1813
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001814#if defined(CONFIG_HL_SUPPORT) || defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
gbian016a42e2017-03-01 18:49:11 +08001815
1816static void
1817ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1818{
1819 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1820 int i;
1821 struct ol_tx_desc_t *tx_desc;
1822
1823 qdf_spin_lock_bh(&pdev->tx_mutex);
1824 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1825 tx_desc = ol_tx_desc_find(pdev, i);
1826 if (tx_desc->vdev == vdev)
1827 tx_desc->vdev = NULL;
1828 }
1829 qdf_spin_unlock_bh(&pdev->tx_mutex);
1830}
1831
1832#else
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001833#ifdef QCA_LL_TX_FLOW_CONTROL_V2
1834static void ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1835{
1836 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1837 struct ol_tx_flow_pool_t *pool;
1838 int i;
1839 struct ol_tx_desc_t *tx_desc;
gbian016a42e2017-03-01 18:49:11 +08001840
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001841 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
1842 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1843 tx_desc = ol_tx_desc_find(pdev, i);
1844 if (!qdf_atomic_read(&tx_desc->ref_cnt))
1845 /* not in use */
1846 continue;
1847
1848 pool = tx_desc->pool;
1849 qdf_spin_lock_bh(&pool->flow_pool_lock);
1850 if (tx_desc->vdev == vdev)
1851 tx_desc->vdev = NULL;
1852 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1853 }
1854 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
1855}
1856
1857#else
gbian016a42e2017-03-01 18:49:11 +08001858static void
1859ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1860{
gbian016a42e2017-03-01 18:49:11 +08001861}
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001862#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1863#endif /* CONFIG_HL_SUPPORT */
gbian016a42e2017-03-01 18:49:11 +08001864
Dhanashri Atre12a08392016-02-17 13:10:34 -08001865/**
1866 * ol_txrx_vdev_detach - Deallocate the specified data virtual
1867 * device object.
1868 * @data_vdev: data object for the virtual device in question
1869 * @callback: function to call (if non-NULL) once the vdev has
1870 * been wholly deleted
1871 * @callback_context: context to provide in the callback
1872 *
1873 * All peers associated with the virtual device need to be deleted
1874 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
1875 * However, for the peers to be fully deleted, the peer deletion has to
1876 * percolate through the target data FW and back up to the host data SW.
1877 * Thus, even though the host control SW may have issued a peer_detach
1878 * call for each of the vdev's peers, the peer objects may still be
1879 * allocated, pending removal of all references to them by the target FW.
1880 * In this case, though the vdev_detach function call will still return
1881 * immediately, the vdev itself won't actually be deleted, until the
1882 * deletions of all its peers complete.
1883 * The caller can provide a callback function pointer to be notified when
1884 * the vdev deletion actually happens - whether it's directly within the
1885 * vdev_detach call, or if it's deferred until all in-progress peer
1886 * deletions have completed.
1887 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08001888static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001889ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001890 ol_txrx_vdev_delete_cb callback, void *context)
1891{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001892 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
wadesong5e2e8012017-08-21 16:56:03 +08001893 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001894
1895 /* preconditions */
1896 TXRX_ASSERT2(vdev);
wadesong5e2e8012017-08-21 16:56:03 +08001897 pdev = vdev->pdev;
1898
1899 /* prevent anyone from restarting the ll_pause timer again */
1900 qdf_atomic_set(&vdev->delete.detaching, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001901
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301902 ol_txrx_vdev_tx_queue_free(vdev);
1903
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301904 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301905 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001906 vdev->ll_pause.is_q_timer_on = false;
1907 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301908 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07001909
Nirav Shahcbc6d722016-03-01 16:24:53 +05301910 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301911 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001912 vdev->ll_pause.txq.head = next;
1913 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301914 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
wadesong5e2e8012017-08-21 16:56:03 +08001915
1916 /* ll_pause timer should be deleted without any locks held, and
1917 * no timer function should be executed after this point because
1918 * qdf_timer_free is deleting the timer synchronously.
1919 */
1920 qdf_timer_free(&vdev->ll_pause.timer);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301921 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001922
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301923 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001924 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08001925 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001926 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301927 qdf_spin_unlock_bh(&vdev->flow_control_lock);
1928 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001929
1930 /* remove the vdev from its parent pdev's list */
1931 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
1932
1933 /*
1934 * Use peer_ref_mutex while accessing peer_list, in case
1935 * a peer is in the process of being removed from the list.
1936 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301937 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001938 /* check that the vdev has no peers allocated */
1939 if (!TAILQ_EMPTY(&vdev->peer_list)) {
1940 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05301941 ol_txrx_dbg(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07001942 "not deleting vdev object %pK ("QDF_MAC_ADDR_STR") until deletion finishes for all its peers\n",
Nirav Shah7c8c1712018-09-10 16:01:31 +05301943 vdev,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07001944 QDF_MAC_ADDR_ARRAY(vdev->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001945 /* indicate that the vdev needs to be deleted */
1946 vdev->delete.pending = 1;
1947 vdev->delete.callback = callback;
1948 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301949 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001950 return;
1951 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301952 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001953 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001954
Poddar, Siddarth14521792017-03-14 21:19:42 +05301955 ol_txrx_dbg(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07001956 "deleting vdev obj %pK ("QDF_MAC_ADDR_STR")\n",
Nirav Shah7c8c1712018-09-10 16:01:31 +05301957 vdev,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07001958 QDF_MAC_ADDR_ARRAY(vdev->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001959
1960 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
1961
1962 /*
Yun Parkeaea8632017-04-09 09:53:45 -07001963 * The ol_tx_desc_free might access the invalid content of vdev referred
1964 * by tx desc, since this vdev might be detached in another thread
1965 * asynchronous.
1966 *
1967 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
1968 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
1969 * to avoid crash.
1970 *
1971 */
gbian016a42e2017-03-01 18:49:11 +08001972 ol_txrx_tx_desc_reset_vdev(vdev);
1973
1974 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001975 * Doesn't matter if there are outstanding tx frames -
1976 * they will be freed once the target sends a tx completion
1977 * message for them.
1978 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301979 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001980 if (callback)
1981 callback(context);
1982}
1983
1984/**
1985 * ol_txrx_flush_rx_frames() - flush cached rx frames
1986 * @peer: peer
1987 * @drop: set flag to drop frames
1988 *
1989 * Return: None
1990 */
1991void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301992 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001993{
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001994 struct ol_txrx_cached_bufq_t *bufqi;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001995 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301996 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001997 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001998
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301999 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2000 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002001 return;
2002 }
2003
Dhanashri Atre182b0272016-02-17 15:35:07 -08002004 qdf_assert(peer->vdev);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302005 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002006 bufqi = &peer->bufq_info;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002007
Dhanashri Atre50141c52016-04-07 13:15:29 -07002008 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002009 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002010 else
2011 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302012 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002013
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002014 qdf_spin_lock_bh(&bufqi->bufq_lock);
2015 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002016 typeof(*cache_buf), list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002017 while (!list_empty(&bufqi->cached_bufq)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002018 list_del(&cache_buf->list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002019 bufqi->curr--;
2020 qdf_assert(bufqi->curr >= 0);
2021 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002022 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302023 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002024 } else {
2025 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002026 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302027 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302028 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002029 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302030 qdf_mem_free(cache_buf);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002031 qdf_spin_lock_bh(&bufqi->bufq_lock);
2032 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002033 typeof(*cache_buf), list);
2034 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002035 bufqi->qdepth_no_thresh = bufqi->curr;
2036 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302037 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002038}
2039
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07002040static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302041{
2042 uint8_t sta_id;
2043 struct ol_txrx_peer_t *peer;
2044 struct ol_txrx_pdev_t *pdev;
2045
2046 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2047 if (!pdev)
2048 return;
2049
2050 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002051 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2052 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302053 if (!peer)
2054 continue;
2055 ol_txrx_flush_rx_frames(peer, 1);
2056 }
2057}
2058
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302059/* Define short name to use in cds_trigger_recovery */
2060#define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2061
Dhanashri Atre12a08392016-02-17 13:10:34 -08002062/**
Naveen Rawat17c42a82018-02-01 19:18:27 -08002063 * ol_txrx_dump_peer_access_list() - dump peer access list
2064 * @peer: peer handle
2065 *
2066 * This function will dump if any peer debug ids are still accessing peer
2067 *
2068 * Return: None
2069 */
2070static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2071{
2072 u32 i;
2073 u32 pending_ref;
2074
2075 for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2076 pending_ref = qdf_atomic_read(&peer->access_list[i]);
2077 if (pending_ref)
2078 ol_txrx_info_high("id %d pending refs %d",
2079 i, pending_ref);
2080 }
2081}
2082
2083/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08002084 * ol_txrx_peer_attach - Allocate and set up references for a
2085 * data peer object.
2086 * @data_pdev: data physical device object that will indirectly
2087 * own the data_peer object
2088 * @data_vdev - data virtual device object that will directly
2089 * own the data_peer object
2090 * @peer_mac_addr - MAC address of the new peer
2091 *
2092 * When an association with a peer starts, the host's control SW
2093 * uses this function to inform the host data SW.
2094 * The host data SW allocates its own peer object, and stores a
2095 * reference to the control peer object within the data peer object.
2096 * The host data SW also stores a reference to the virtual device
2097 * that the peer is associated with. This virtual device handle is
2098 * used when the data SW delivers rx data frames to the OS shim layer.
2099 * The host data SW returns a handle to the new peer data object,
2100 * so a reference within the control peer object can be set to the
2101 * data peer object.
2102 *
2103 * Return: handle to new data peer object, or NULL if the attach
2104 * fails
2105 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002106static void *
psimha8696f772018-04-03 17:38:38 -07002107ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05302108 struct cdp_ctrl_objmgr_peer *ctrl_peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002109{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002110 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002111 struct ol_txrx_peer_t *peer;
2112 struct ol_txrx_peer_t *temp_peer;
2113 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002114 bool wait_on_deletion = false;
2115 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002116 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302117 bool cmp_wait_mac = false;
2118 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Alok Kumare1977442018-11-28 17:16:03 +05302119 u8 check_valid = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002120
2121 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002122 TXRX_ASSERT2(vdev);
2123 TXRX_ASSERT2(peer_mac_addr);
2124
Dhanashri Atre12a08392016-02-17 13:10:34 -08002125 pdev = vdev->pdev;
2126 TXRX_ASSERT2(pdev);
2127
Alok Kumare1977442018-11-28 17:16:03 +05302128 if (pdev->enable_peer_unmap_conf_support)
2129 check_valid = 1;
2130
Abhishek Singh217d9782017-04-28 23:49:11 +05302131 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2132 QDF_MAC_ADDR_SIZE))
2133 cmp_wait_mac = true;
2134
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302135 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002136 /* check for duplicate existing peer */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002137 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2138 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
Alok Kumare1977442018-11-28 17:16:03 +05302139 (union ol_txrx_align_mac_addr_t *)peer_mac_addr) &&
2140 (check_valid == 0 || temp_peer->valid)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302141 ol_txrx_info_high(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002142 "vdev_id %d ("QDF_MAC_ADDR_STR") already exists.\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002143 vdev->vdev_id,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002144 QDF_MAC_ADDR_ARRAY(peer_mac_addr));
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302145 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002146 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002147 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002148 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302149 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002150 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302151 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002152 return NULL;
2153 }
2154 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302155 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2156 &temp_peer->mac_addr,
Alok Kumare1977442018-11-28 17:16:03 +05302157 &vdev->last_peer_mac_addr) &&
2158 (check_valid == 0 ||
2159 temp_peer->valid)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302160 ol_txrx_info_high(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002161 "vdev_id %d ("QDF_MAC_ADDR_STR") old peer exists.\n",
Abhishek Singh217d9782017-04-28 23:49:11 +05302162 vdev->vdev_id,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002163 QDF_MAC_ADDR_ARRAY(vdev->last_peer_mac_addr.raw));
Abhishek Singh217d9782017-04-28 23:49:11 +05302164 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2165 vdev->wait_on_peer_id = temp_peer->local_id;
2166 qdf_event_reset(&vdev->wait_delete_comp);
2167 wait_on_deletion = true;
2168 break;
2169 } else {
2170 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2171 ol_txrx_err("peer not found");
2172 return NULL;
2173 }
2174 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002175 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302176 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002177
Abhishek Singh217d9782017-04-28 23:49:11 +05302178 qdf_mem_zero(&vdev->last_peer_mac_addr,
2179 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002180 if (wait_on_deletion) {
2181 /* wait for peer deletion */
Nachiket Kukade0396b732017-11-14 16:35:16 +05302182 rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002183 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002184 if (QDF_STATUS_SUCCESS != rc) {
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002185 ol_txrx_err("error waiting for peer_id(%d) deletion, status %d\n",
Dustin Brown100201e2017-07-10 11:48:40 -07002186 vdev->wait_on_peer_id, (int) rc);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002187 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08002188 ol_txrx_dump_peer_access_list(temp_peer);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002189 wlan_roam_debug_dump_table();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002190 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Dustin Brown100201e2017-07-10 11:48:40 -07002191
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002192 return NULL;
2193 }
2194 }
2195
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302196 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002197 if (!peer)
2198 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002199
2200 /* store provided params */
2201 peer->vdev = vdev;
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05302202 peer->ctrl_peer = peer->ctrl_peer;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302203 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -08002204 QDF_MAC_ADDR_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002205
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302206 ol_txrx_peer_txqs_init(pdev, peer);
2207
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002208 INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302209 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002210 /* add this peer into the vdev's list */
2211 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302212 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002213 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
Frank Liu4362e462018-01-16 11:51:55 +08002214 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2215 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002216 vdev->last_real_peer = peer;
Frank Liu4362e462018-01-16 11:51:55 +08002217 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2218 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002219
2220 peer->rx_opt_proc = pdev->rx_opt_proc;
2221
2222 ol_rx_peer_init(pdev, peer);
2223
2224 /* initialize the peer_id */
2225 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2226 peer->peer_ids[i] = HTT_INVALID_PEER;
2227
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302228 qdf_spinlock_create(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002229 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2230
2231 peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002232
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302233 qdf_atomic_init(&peer->delete_in_progress);
2234 qdf_atomic_init(&peer->flush_in_progress);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302235 qdf_atomic_init(&peer->ref_cnt);
Mohit Khannab7bec722017-11-10 11:43:44 -08002236
2237 for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2238 qdf_atomic_init(&peer->access_list[i]);
2239
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002240 /* keep one reference for attach */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002241 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002242
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002243 /* Set a flag to indicate peer create is pending in firmware */
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002244 qdf_atomic_init(&peer->fw_create_pending);
2245 qdf_atomic_set(&peer->fw_create_pending, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002246
2247 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002248 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2249 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002250
2251 ol_txrx_peer_find_hash_add(pdev, peer);
2252
Mohit Khanna47384bc2016-08-15 15:37:05 -07002253 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002254 "vdev %pK created peer %pK ref_cnt %d ("QDF_MAC_ADDR_STR")\n",
Mohit Khanna47384bc2016-08-15 15:37:05 -07002255 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002256 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002257 /*
2258 * For every peer MAp message search and set if bss_peer
2259 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002260 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -08002261 QDF_MAC_ADDR_SIZE))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002262 peer->bss_peer = 1;
2263
2264 /*
2265 * The peer starts in the "disc" state while association is in progress.
2266 * Once association completes, the peer will get updated to "auth" state
2267 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2268 * or else to the "conn" state. For non-open mode, the peer will
2269 * progress to "auth" state once the authentication completes.
2270 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002271 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002272 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002273 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002274
2275#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2276 peer->rssi_dbm = HTT_RSSI_INVALID;
2277#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002278 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2279 !pdev->self_peer) {
2280 pdev->self_peer = peer;
2281 /*
2282 * No Tx in monitor mode, otherwise results in target assert.
2283 * Setting disable_intrabss_fwd to true
2284 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002285 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002286 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002287
2288 ol_txrx_local_peer_id_alloc(pdev, peer);
2289
Leo Chang98726762016-10-28 11:07:18 -07002290 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002291}
2292
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302293#undef PEER_DEL_TIMEOUT
2294
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002295/*
2296 * Discarding tx filter - removes all data frames (disconnected state)
2297 */
2298static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2299{
2300 return A_ERROR;
2301}
2302
2303/*
2304 * Non-autentication tx filter - filters out data frames that are not
2305 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2306 * data frames (connected state)
2307 */
2308static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2309{
2310 return
2311 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2312 tx_msdu_info->htt.info.ethertype ==
2313 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2314}
2315
2316/*
2317 * Pass-through tx filter - lets all data frames through (authenticated state)
2318 */
2319static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2320{
2321 return A_OK;
2322}
2323
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002324/**
2325 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2326 * @peer: handle to peer
2327 *
2328 * returns mac addrs for module which do not know peer type
2329 *
2330 * Return: the mac_addr from peer
2331 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002332static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002333ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002334{
Leo Chang98726762016-10-28 11:07:18 -07002335 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002336
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002337 if (!peer)
2338 return NULL;
2339
2340 return peer->mac_addr.raw;
2341}
2342
Abhishek Singhcfb44482017-03-10 12:42:37 +05302343#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002344/**
2345 * ol_txrx_get_pn_info() - Returns pn info from peer
2346 * @peer: handle to peer
2347 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2348 * @last_pn: return last_rmf_pn value from peer.
2349 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2350 *
2351 * Return: NONE
2352 */
2353void
Leo Chang98726762016-10-28 11:07:18 -07002354ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002355 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2356{
Leo Chang98726762016-10-28 11:07:18 -07002357 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002358 *last_pn_valid = &peer->last_rmf_pn_valid;
2359 *last_pn = &peer->last_rmf_pn;
2360 *rmf_pn_replays = &peer->rmf_pn_replays;
2361}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302362#else
2363void
2364ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2365 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2366{
2367}
2368#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002369
2370/**
2371 * ol_txrx_get_opmode() - Return operation mode of vdev
2372 * @vdev: vdev handle
2373 *
2374 * Return: operation mode.
2375 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002376static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002377{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002378 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002379
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002380 return vdev->opmode;
2381}
2382
2383/**
2384 * ol_txrx_get_peer_state() - Return peer state of peer
2385 * @peer: peer handle
2386 *
2387 * Return: return peer state
2388 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002389static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002390{
Leo Chang98726762016-10-28 11:07:18 -07002391 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002392
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002393 return peer->state;
2394}
2395
2396/**
2397 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2398 * @peer: peer handle
2399 *
2400 * Return: vdev handle from peer
2401 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002402static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002403{
Leo Chang98726762016-10-28 11:07:18 -07002404 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002405
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002406 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002407}
2408
2409/**
2410 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2411 * @vdev: vdev handle
2412 *
2413 * Return: vdev mac address
2414 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002415static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002416ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002417{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002418 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002419
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002420 if (!vdev)
2421 return NULL;
2422
2423 return vdev->mac_addr.raw;
2424}
2425
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002426#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002427/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002428 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002429 * vdev
2430 * @vdev: vdev handle
2431 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002432 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002433 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002434struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002435ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2436{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002437 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002438}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002439#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002440
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002441#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002442/**
2443 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2444 * @vdev: vdev handle
2445 *
2446 * Return: Handle to pdev
2447 */
2448ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2449{
2450 return vdev->pdev;
2451}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002452#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002453
2454/**
2455 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2456 * @vdev: vdev handle
2457 *
2458 * Return: Handle to control pdev
2459 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002460static struct cdp_cfg *
2461ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002462{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002463 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002464
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002465 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002466}
2467
2468/**
2469 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2470 * @vdev: vdev handle
2471 *
2472 * Return: Rx Fwd disabled status
2473 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002474static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002475ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002476{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002477 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002478 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2479 vdev->pdev->ctrl_pdev;
2480 return cfg->rx_fwd_disabled;
2481}
2482
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002483#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002484/**
2485 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2486 * @vdev: vdev handle
2487 * @peer_num_delta: peer nums to be adjusted
2488 *
2489 * Return: -1 for failure or total peer nums after adjustment.
2490 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002491static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002492ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002493 int16_t peer_num_delta)
2494{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002495 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002496 int16_t new_peer_num;
2497
2498 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07002499 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002500 return OL_TXRX_INVALID_NUM_PEERS;
2501
2502 vdev->ibss_peer_num = new_peer_num;
2503
2504 return new_peer_num;
2505}
2506
2507/**
2508 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2509 * beat timer
2510 * @vdev: vdev handle
2511 * @timer_value_sec: new heart beat timer value
2512 *
2513 * Return: Old timer value set in vdev.
2514 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002515static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2516 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002517{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002518 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002519 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2520
2521 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2522
2523 return old_timer_value;
2524}
jiad391c5282018-11-26 16:21:04 +08002525#else /* !QCA_IBSS_SUPPORT */
2526static inline int16_t
2527ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
2528 int16_t peer_num_delta)
2529{
2530 return 0;
2531}
2532
2533static inline uint16_t
2534ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2535 uint16_t timer_value_sec)
2536{
2537 return 0;
2538}
2539#endif /* QCA_IBSS_SUPPORT */
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002540
2541/**
2542 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
2543 * @vdev: vdev handle
2544 * @callback: callback function to remove the peer.
2545 * @callback_context: handle for callback function
2546 * @remove_last_peer: Does it required to last peer.
2547 *
2548 * Return: NONE
2549 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002550static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002551ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002552 ol_txrx_vdev_peer_remove_cb callback,
2553 void *callback_context, bool remove_last_peer)
2554{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002555 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002556 ol_txrx_peer_handle peer, temp;
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002557 int self_removed = 0;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002558 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002559 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002560
2561 temp = NULL;
2562 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
2563 peer_list_elem) {
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05302564 if (qdf_atomic_read(&peer->delete_in_progress))
2565 continue;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002566 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002567 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05302568 callback(callback_context, temp->mac_addr.raw,
Jiachao Wu641760e2018-01-21 12:11:31 +08002569 vdev->vdev_id, temp);
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002570 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002571 }
2572 /* self peer is deleted last */
2573 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002574 self_removed = 1;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002575 break;
Yun Parkeaea8632017-04-09 09:53:45 -07002576 }
2577 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002578 }
2579
Mohit Khanna137b97d2016-04-21 16:11:33 -07002580 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2581
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002582 if (self_removed)
Nirav Shah7c8c1712018-09-10 16:01:31 +05302583 ol_txrx_info("self peer removed by caller");
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002584
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002585 if (remove_last_peer) {
2586 /* remove IBSS bss peer last */
2587 peer = TAILQ_FIRST(&vdev->peer_list);
2588 callback(callback_context, (uint8_t *) &vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08002589 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002590 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002591}
2592
2593/**
2594 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
2595 * @vdev: vdev handle
2596 * @callback: callback function to remove the peer.
2597 * @callback_context: handle for callback function
2598 *
2599 * Return: NONE
2600 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002601static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002602ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002603 ol_txrx_vdev_peer_remove_cb callback,
2604 void *callback_context)
2605{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002606 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002607 ol_txrx_peer_handle peer = NULL;
Jiachao Wu641760e2018-01-21 12:11:31 +08002608 ol_txrx_peer_handle tmp_peer = NULL;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002609
Jiachao Wu641760e2018-01-21 12:11:31 +08002610 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, peer_list_elem, tmp_peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302611 ol_txrx_info_high(
Nirav Shah7c8c1712018-09-10 16:01:31 +05302612 "peer found for vdev id %d. deleting the peer",
2613 vdev->vdev_id);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002614 callback(callback_context, (uint8_t *)&vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08002615 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002616 }
2617}
2618
Nirav Shah575282c2018-07-08 22:48:00 +05302619#ifdef WLAN_FEATURE_DSRC
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002620/**
2621 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2622 * @vdev: vdev handle
2623 * @ocb_set_chan: OCB channel information to be set in vdev.
2624 *
2625 * Return: NONE
2626 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002627static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002628 struct ol_txrx_ocb_set_chan ocb_set_chan)
2629{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002630 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002631
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002632 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2633 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2634}
2635
2636/**
2637 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2638 * @vdev: vdev handle
2639 *
2640 * Return: handle to struct ol_txrx_ocb_chan_info
2641 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002642static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002643ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002644{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002645 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002646
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002647 return vdev->ocb_channel_info;
2648}
Nirav Shah575282c2018-07-08 22:48:00 +05302649#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002650
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002651/**
2652 * @brief specify the peer's authentication state
2653 * @details
2654 * Specify the peer's authentication state (none, connected, authenticated)
2655 * to allow the data SW to determine whether to filter out invalid data frames.
2656 * (In the "connected" state, where security is enabled, but authentication
2657 * has not completed, tx and rx data frames other than EAPOL or WAPI should
2658 * be discarded.)
2659 * This function is only relevant for systems in which the tx and rx filtering
2660 * are done in the host rather than in the target.
2661 *
2662 * @param data_peer - which peer has changed its state
2663 * @param state - the new state of the peer
2664 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002665 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002666 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002667QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002668 uint8_t *peer_mac,
2669 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002670{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002671 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002672 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002673 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002674
Anurag Chouhanc5548422016-02-24 18:33:27 +05302675 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302676 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05302677 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302678 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002679 }
2680
Mohit Khannab7bec722017-11-10 11:43:44 -08002681 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2682 PEER_DEBUG_ID_OL_INTERNAL);
Jeff Johnson6795c3a2019-03-18 13:43:04 -07002683 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302684 ol_txrx_err(
Nirav Shah7c8c1712018-09-10 16:01:31 +05302685 "peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302686 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2687 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302688 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002689 }
2690
2691 /* TODO: Should we send WMI command of the connection state? */
2692 /* avoid multiple auth state change. */
2693 if (peer->state == state) {
2694#ifdef TXRX_PRINT_VERBOSE_ENABLE
Nirav Shah7c8c1712018-09-10 16:01:31 +05302695 ol_txrx_dbg("no state change, returns directly");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002696#endif
Mohit Khannab7bec722017-11-10 11:43:44 -08002697 peer_ref_cnt = ol_txrx_peer_release_ref
2698 (peer,
2699 PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302700 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002701 }
2702
Nirav Shah7c8c1712018-09-10 16:01:31 +05302703 ol_txrx_dbg("change from %d to %d",
2704 peer->state, state);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002705
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002706 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002707 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002708 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002709 ? ol_tx_filter_non_auth
2710 : ol_tx_filter_discard);
2711
2712 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002713 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002714 int tid;
2715 /*
2716 * Pause all regular (non-extended) TID tx queues until
2717 * data arrives and ADDBA negotiation has completed.
2718 */
Nirav Shah7c8c1712018-09-10 16:01:31 +05302719 ol_txrx_dbg("pause peer and unpause mgmt/non-qos");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002720 ol_txrx_peer_pause(peer); /* pause all tx queues */
2721 /* unpause mgmt and non-QoS tx queues */
2722 for (tid = OL_TX_NUM_QOS_TIDS;
2723 tid < OL_TX_NUM_TIDS; tid++)
2724 ol_txrx_peer_tid_unpause(peer, tid);
2725 }
2726 }
Mohit Khannab7bec722017-11-10 11:43:44 -08002727 peer_ref_cnt = ol_txrx_peer_release_ref(peer,
2728 PEER_DEBUG_ID_OL_INTERNAL);
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002729 /*
Mohit Khannab7bec722017-11-10 11:43:44 -08002730 * after ol_txrx_peer_release_ref, peer object cannot be accessed
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002731 * if the return code was 0
2732 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08002733 if (peer_ref_cnt > 0)
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002734 /*
2735 * Set the state after the Pause to avoid the race condiction
2736 * with ADDBA check in tx path
2737 */
2738 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302739 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002740}
2741
2742void
2743ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
2744{
2745 peer->keyinstalled = val;
2746}
2747
2748void
2749ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
2750 uint8_t *peer_mac,
2751 union ol_txrx_peer_update_param_t *param,
2752 enum ol_txrx_peer_update_select_t select)
2753{
2754 struct ol_txrx_peer_t *peer;
2755
Mohit Khannab7bec722017-11-10 11:43:44 -08002756 peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
2757 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002758 if (!peer) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05302759 ol_txrx_dbg("peer is null");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002760 return;
2761 }
2762
2763 switch (select) {
2764 case ol_txrx_peer_update_qos_capable:
2765 {
2766 /* save qos_capable here txrx peer,
2767 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
2768 */
2769 peer->qos_capable = param->qos_capable;
2770 /*
2771 * The following function call assumes that the peer has a
2772 * single ID. This is currently true, and
2773 * is expected to remain true.
2774 */
2775 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
2776 peer->peer_ids[0],
2777 peer->qos_capable);
2778 break;
2779 }
2780 case ol_txrx_peer_update_uapsdMask:
2781 {
2782 peer->uapsd_mask = param->uapsd_mask;
2783 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
2784 peer->peer_ids[0],
2785 peer->uapsd_mask);
2786 break;
2787 }
2788 case ol_txrx_peer_update_peer_security:
2789 {
2790 enum ol_sec_type sec_type = param->sec_type;
2791 enum htt_sec_type peer_sec_type = htt_sec_type_none;
2792
2793 switch (sec_type) {
2794 case ol_sec_type_none:
2795 peer_sec_type = htt_sec_type_none;
2796 break;
2797 case ol_sec_type_wep128:
2798 peer_sec_type = htt_sec_type_wep128;
2799 break;
2800 case ol_sec_type_wep104:
2801 peer_sec_type = htt_sec_type_wep104;
2802 break;
2803 case ol_sec_type_wep40:
2804 peer_sec_type = htt_sec_type_wep40;
2805 break;
2806 case ol_sec_type_tkip:
2807 peer_sec_type = htt_sec_type_tkip;
2808 break;
2809 case ol_sec_type_tkip_nomic:
2810 peer_sec_type = htt_sec_type_tkip_nomic;
2811 break;
2812 case ol_sec_type_aes_ccmp:
2813 peer_sec_type = htt_sec_type_aes_ccmp;
2814 break;
2815 case ol_sec_type_wapi:
2816 peer_sec_type = htt_sec_type_wapi;
2817 break;
2818 default:
2819 peer_sec_type = htt_sec_type_none;
2820 break;
2821 }
2822
2823 peer->security[txrx_sec_ucast].sec_type =
2824 peer->security[txrx_sec_mcast].sec_type =
2825 peer_sec_type;
2826
2827 break;
2828 }
2829 default:
2830 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05302831 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002832 "ERROR: unknown param %d in %s", select,
2833 __func__);
2834 break;
2835 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002836 } /* switch */
Mohit Khannab7bec722017-11-10 11:43:44 -08002837 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002838}
2839
2840uint8_t
2841ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2842{
2843
2844 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07002845
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002846 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
2847 if (peer)
2848 return peer->uapsd_mask;
2849 return 0;
2850}
2851
2852uint8_t
2853ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2854{
2855
2856 struct ol_txrx_peer_t *peer_t =
2857 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
Jeff Johnson6795c3a2019-03-18 13:43:04 -07002858 if (peer_t)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002859 return peer_t->qos_capable;
2860 return 0;
2861}
2862
Mohit Khannab7bec722017-11-10 11:43:44 -08002863/**
Mohit Khannab7bec722017-11-10 11:43:44 -08002864 * ol_txrx_peer_free_tids() - free tids for the peer
2865 * @peer: peer handle
2866 *
2867 * Return: None
2868 */
2869static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
2870{
2871 int i = 0;
2872 /*
2873 * 'array' is allocated in addba handler and is supposed to be
2874 * freed in delba handler. There is the case (for example, in
2875 * SSR) where delba handler is not called. Because array points
2876 * to address of 'base' by default and is reallocated in addba
2877 * handler later, only free the memory when the array does not
2878 * point to base.
2879 */
2880 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
2881 if (peer->tids_rx_reorder[i].array !=
2882 &peer->tids_rx_reorder[i].base) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05302883 ol_txrx_dbg("delete reorder arr, tid:%d", i);
Mohit Khannab7bec722017-11-10 11:43:44 -08002884 qdf_mem_free(peer->tids_rx_reorder[i].array);
2885 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
2886 (uint8_t)i);
2887 }
2888 }
2889}
2890
2891/**
2892 * ol_txrx_peer_release_ref() - release peer reference
2893 * @peer: peer handle
2894 *
2895 * Release peer reference and delete peer if refcount is 0
2896 *
wadesong9f2b1102017-12-20 22:58:35 +08002897 * Return: Resulting peer ref_cnt after this function is invoked
Mohit Khannab7bec722017-11-10 11:43:44 -08002898 */
2899int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
2900 enum peer_debug_id_type debug_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002901{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002902 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002903 struct ol_txrx_vdev_t *vdev;
2904 struct ol_txrx_pdev_t *pdev;
Ajit Pal Singhbd3d3642019-02-25 14:25:21 +05302905 bool ref_silent = true;
Jingxiang Ge190679b2018-01-30 08:56:19 +08002906 int access_list = 0;
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002907 uint32_t err_code = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002908
2909 /* preconditions */
2910 TXRX_ASSERT2(peer);
2911
2912 vdev = peer->vdev;
Jeff Johnson6795c3a2019-03-18 13:43:04 -07002913 if (!vdev) {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002914 ol_txrx_err("The vdev is not present anymore\n");
Amar Singhal7ef59092018-09-11 15:32:35 -07002915 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002916 }
2917
2918 pdev = vdev->pdev;
Jeff Johnson6795c3a2019-03-18 13:43:04 -07002919 if (!pdev) {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002920 ol_txrx_err("The pdev is not present anymore\n");
2921 err_code = 0xbad2;
2922 goto ERR_STATE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002923 }
2924
Mohit Khannab7bec722017-11-10 11:43:44 -08002925 if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
2926 ol_txrx_err("incorrect debug_id %d ", debug_id);
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002927 err_code = 0xbad3;
2928 goto ERR_STATE;
Mohit Khannab7bec722017-11-10 11:43:44 -08002929 }
2930
Jingxiang Ge3badb982018-01-02 17:39:01 +08002931 if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
2932 ref_silent = true;
2933
2934 if (!ref_silent)
2935 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
2936 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002937 peer, 0xdead,
Jingxiang Ge3badb982018-01-02 17:39:01 +08002938 qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002939
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002940
2941 /*
2942 * Hold the lock all the way from checking if the peer ref count
2943 * is zero until the peer references are removed from the hash
2944 * table and vdev list (if the peer ref count is zero).
2945 * This protects against a new HL tx operation starting to use the
2946 * peer object just after this function concludes it's done being used.
2947 * Furthermore, the lock needs to be held while checking whether the
2948 * vdev's list of peers is empty, to make sure that list is not modified
2949 * concurrently with the empty check.
2950 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302951 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07002952
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002953 /*
2954 * Check for the reference count before deleting the peer
2955 * as we noticed that sometimes we are re-entering this
2956 * function again which is leading to dead-lock.
2957 * (A double-free should never happen, so assert if it does.)
2958 */
2959 rc = qdf_atomic_read(&(peer->ref_cnt));
2960
2961 if (rc == 0) {
2962 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2963 ol_txrx_err("The Peer is not present anymore\n");
2964 qdf_assert(0);
2965 return -EACCES;
2966 }
2967 /*
2968 * now decrement rc; this will be the return code.
2969 * 0 : peer deleted
2970 * >0: peer ref removed, but still has other references
2971 * <0: sanity failed - no changes to the state of the peer
2972 */
2973 rc--;
2974
Mohit Khannab7bec722017-11-10 11:43:44 -08002975 if (!qdf_atomic_read(&peer->access_list[debug_id])) {
2976 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
jitiphil8ad8a6f2018-03-01 23:45:05 +05302977 ol_txrx_err("peer %pK ref was not taken by %d",
Mohit Khannab7bec722017-11-10 11:43:44 -08002978 peer, debug_id);
2979 ol_txrx_dump_peer_access_list(peer);
2980 QDF_BUG(0);
2981 return -EACCES;
2982 }
Mohit Khannab7bec722017-11-10 11:43:44 -08002983 qdf_atomic_dec(&peer->access_list[debug_id]);
2984
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07002985 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Mohit Khannab7bec722017-11-10 11:43:44 -08002986 u16 peer_id;
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002987 wlan_roam_debug_log(vdev->vdev_id,
2988 DEBUG_DELETING_PEER_OBJ,
2989 DEBUG_INVALID_PEER_ID,
2990 &peer->mac_addr.raw, peer, 0,
2991 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002992 peer_id = peer->local_id;
2993 /* remove the reference to the peer from the hash table */
2994 ol_txrx_peer_find_hash_remove(pdev, peer);
2995
2996 /* remove the peer from its parent vdev's list */
2997 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
2998
2999 /* cleanup the Rx reorder queues for this peer */
3000 ol_rx_peer_cleanup(vdev, peer);
3001
Jingxiang Ge3badb982018-01-02 17:39:01 +08003002 qdf_spinlock_destroy(&peer->peer_info_lock);
3003 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
3004
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003005 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303006 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003007
3008 /*
3009 * Set wait_delete_comp event if the current peer id matches
3010 * with registered peer id.
3011 */
3012 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303013 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003014 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3015 }
3016
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003017 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3018 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003019
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003020 /* check whether the parent vdev has no peers left */
3021 if (TAILQ_EMPTY(&vdev->peer_list)) {
3022 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003023 * Check if the parent vdev was waiting for its peers
3024 * to be deleted, in order for it to be deleted too.
3025 */
3026 if (vdev->delete.pending) {
3027 ol_txrx_vdev_delete_cb vdev_delete_cb =
3028 vdev->delete.callback;
3029 void *vdev_delete_context =
3030 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303031 /*
3032 * Now that there are no references to the peer,
3033 * we can release the peer reference lock.
3034 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303035 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303036
gbian016a42e2017-03-01 18:49:11 +08003037 /*
Yun Parkeaea8632017-04-09 09:53:45 -07003038 * The ol_tx_desc_free might access the invalid
3039 * content of vdev referred by tx desc, since
3040 * this vdev might be detached in another thread
3041 * asynchronous.
3042 *
3043 * Go through tx desc pool to set corresponding
3044 * tx desc's vdev to NULL when detach this vdev,
3045 * and add vdev checking in the ol_tx_desc_free
3046 * to avoid crash.
3047 */
gbian016a42e2017-03-01 18:49:11 +08003048 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05303049 ol_txrx_dbg(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003050 "deleting vdev object %pK ("QDF_MAC_ADDR_STR") - its last peer is done",
Nirav Shah7c8c1712018-09-10 16:01:31 +05303051 vdev,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003052 QDF_MAC_ADDR_ARRAY(vdev->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003053 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303054 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003055 if (vdev_delete_cb)
3056 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303057 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303058 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003059 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303060 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303061 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303062 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003063
jitiphil8ad8a6f2018-03-01 23:45:05 +05303064 ol_txrx_info_high("[%d][%d]: Deleting peer %pK ref_cnt -> %d %s",
Mohit Khannab7bec722017-11-10 11:43:44 -08003065 debug_id,
3066 qdf_atomic_read(&peer->access_list[debug_id]),
3067 peer, rc,
3068 qdf_atomic_read(&peer->fw_create_pending)
3069 == 1 ?
3070 "(No Maps received)" : "");
Mohit Khanna8ee37c62017-08-07 17:15:20 -07003071
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303072 ol_txrx_peer_tx_queue_free(pdev, peer);
3073
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -07003074 /* Remove mappings from peer_id to peer object */
3075 ol_txrx_peer_clear_map_peer(pdev, peer);
3076
wadesong9f2b1102017-12-20 22:58:35 +08003077 /* Remove peer pointer from local peer ID map */
3078 ol_txrx_local_peer_id_free(pdev, peer);
3079
Mohit Khannab7bec722017-11-10 11:43:44 -08003080 ol_txrx_peer_free_tids(peer);
3081
3082 ol_txrx_dump_peer_access_list(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003083
Alok Kumar8df4c762018-12-04 18:06:29 +05303084 if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam() &&
3085 pdev->self_peer == peer)
3086 pdev->self_peer = NULL;
3087
Alok Kumar8e178242018-06-15 12:49:57 +05303088 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003089 } else {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003090 access_list = qdf_atomic_read(&peer->access_list[debug_id]);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303091 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Jingxiang Ge3badb982018-01-02 17:39:01 +08003092 if (!ref_silent)
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003093 ol_txrx_info_high("[%d][%d]: ref delete peer %pK ref_cnt -> %d",
3094 debug_id,
3095 access_list,
3096 peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003097 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003098 return rc;
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003099ERR_STATE:
3100 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3101 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3102 peer, err_code, qdf_atomic_read(&peer->ref_cnt));
3103 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003104}
3105
Dhanashri Atre12a08392016-02-17 13:10:34 -08003106/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003107 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3108 * @peer: pointer to ol txrx peer structure
3109 *
3110 * Return: QDF Status
3111 */
3112static QDF_STATUS
3113ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3114{
3115 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3116 /* Drop pending Rx frames in CDS */
3117 if (sched_ctx)
3118 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3119
3120 /* Purge the cached rx frame queue */
3121 ol_txrx_flush_rx_frames(peer, 1);
3122
3123 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003124 peer->state = OL_TXRX_PEER_STATE_DISC;
3125 qdf_spin_unlock_bh(&peer->peer_info_lock);
3126
3127 return QDF_STATUS_SUCCESS;
3128}
3129
3130/**
3131 * ol_txrx_clear_peer() - clear peer
3132 * @sta_id: sta id
3133 *
3134 * Return: QDF Status
3135 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003136static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003137{
3138 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003139 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Zhu Jianmin99523042018-06-06 20:01:44 +08003140 QDF_STATUS status;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003141
3142 if (!pdev) {
Zhu Jianmin99523042018-06-06 20:01:44 +08003143 ol_txrx_err("Unable to find pdev!");
Mohit Khanna0696eef2016-04-14 16:14:08 -07003144 return QDF_STATUS_E_FAILURE;
3145 }
3146
3147 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303148 ol_txrx_err("Invalid sta id %d", sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003149 return QDF_STATUS_E_INVAL;
3150 }
3151
Zhu Jianmin99523042018-06-06 20:01:44 +08003152 peer = ol_txrx_peer_get_ref_by_local_id(ppdev, sta_id,
3153 PEER_DEBUG_ID_OL_INTERNAL);
Kabilan Kannanfa163982018-01-30 12:03:41 -08003154
3155 /* Return success, if the peer is already cleared by
3156 * data path via peer detach function.
3157 */
Mohit Khanna0696eef2016-04-14 16:14:08 -07003158 if (!peer)
Kabilan Kannanfa163982018-01-30 12:03:41 -08003159 return QDF_STATUS_SUCCESS;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003160
Zhu Jianmin99523042018-06-06 20:01:44 +08003161 ol_txrx_dbg("Clear peer rx frames: " QDF_MAC_ADDR_STR,
3162 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
3163 ol_txrx_clear_peer_internal(peer);
3164 status = ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003165
Zhu Jianmin99523042018-06-06 20:01:44 +08003166 return status;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003167}
3168
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003169void peer_unmap_timer_work_function(void *param)
3170{
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003171 WMA_LOGI("Enter: %s", __func__);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003172 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08003173 ol_txrx_dump_peer_access_list(param);
Manjunathappa Prakash8b686632019-01-15 22:07:54 -08003174 ol_txrx_peer_release_ref(param, PEER_DEBUG_ID_OL_UNMAP_TIMER_WORK);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003175 wlan_roam_debug_dump_table();
Anurag Chouhan4085ff72017-10-05 18:09:56 +05303176 cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003177}
3178
Mohit Khanna0696eef2016-04-14 16:14:08 -07003179/**
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003180 * peer_unmap_timer_handler() - peer unmap timer function
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003181 * @data: peer object pointer
3182 *
3183 * Return: none
3184 */
3185void peer_unmap_timer_handler(void *data)
3186{
3187 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003188 ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003189
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003190 ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003191 peer, qdf_atomic_read(&peer->ref_cnt));
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003192 ol_txrx_err("peer %pK ("QDF_MAC_ADDR_STR")",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003193 peer,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003194 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
Nachiket Kukadea48fd772017-07-28 18:48:57 +05303195 if (!cds_is_driver_recovering() && !cds_is_fw_down()) {
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003196 qdf_create_work(0, &txrx_pdev->peer_unmap_timer_work,
3197 peer_unmap_timer_work_function,
Naveen Rawat17c42a82018-02-01 19:18:27 -08003198 peer);
Manjunathappa Prakash8b686632019-01-15 22:07:54 -08003199 /* Make sure peer is present before scheduling work */
3200 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_UNMAP_TIMER_WORK);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003201 qdf_sched_work(0, &txrx_pdev->peer_unmap_timer_work);
Deepak Dhamdhered42ab7c2017-04-13 19:32:16 -07003202 } else {
3203 ol_txrx_err("Recovery is in progress, ignore!");
3204 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003205}
3206
3207
3208/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003209 * ol_txrx_peer_detach() - Delete a peer's data object.
3210 * @peer - the object to detach
Naveen Rawatf4ada152017-09-05 14:56:12 -07003211 * @bitmap - bitmap indicating special handling of request.
Dhanashri Atre12a08392016-02-17 13:10:34 -08003212 *
3213 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003214 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003215 * stored in the control peer object to the data peer
3216 * object (set up by a call to ol_peer_store()) is provided.
3217 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003218 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003219 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003220static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003221{
Leo Chang98726762016-10-28 11:07:18 -07003222 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003223 struct ol_txrx_vdev_t *vdev = peer->vdev;
3224
3225 /* redirect peer's rx delivery function to point to a discard func */
3226 peer->rx_opt_proc = ol_rx_discard;
3227
3228 peer->valid = 0;
3229
Mohit Khanna0696eef2016-04-14 16:14:08 -07003230 /* flush all rx packets before clearing up the peer local_id */
3231 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003232
3233 /* debug print to dump rx reorder state */
3234 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3235
Abhinav Kumar50d4dc72018-06-15 16:35:50 +05303236 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003237 "%s:peer %pK ("QDF_MAC_ADDR_STR")",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003238 __func__, peer,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003239 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003240
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303241 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003242 if (vdev->last_real_peer == peer)
3243 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303244 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003245 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3246
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003247 /*
3248 * set delete_in_progress to identify that wma
3249 * is waiting for unmap massage for this peer
3250 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303251 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003252
Lin Bai973e6922018-01-08 17:59:19 +08003253 if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
Naveen Rawatf4ada152017-09-05 14:56:12 -07003254 if (vdev->opmode == wlan_op_mode_sta) {
3255 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3256 &peer->mac_addr,
3257 sizeof(union ol_txrx_align_mac_addr_t));
Abhishek Singh217d9782017-04-28 23:49:11 +05303258
Lin Bai973e6922018-01-08 17:59:19 +08003259 /*
3260 * Create a timer to track unmap events when the
3261 * sta peer gets deleted.
3262 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003263 qdf_timer_start(&peer->peer_unmap_timer,
3264 OL_TXRX_PEER_UNMAP_TIMEOUT);
Mohit Khannab7bec722017-11-10 11:43:44 -08003265 ol_txrx_info_high
3266 ("started peer_unmap_timer for peer %pK",
3267 peer);
Naveen Rawatf4ada152017-09-05 14:56:12 -07003268 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003269 }
3270
3271 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003272 * Remove the reference added during peer_attach.
3273 * The peer will still be left allocated until the
3274 * PEER_UNMAP message arrives to remove the other
3275 * reference, added by the PEER_MAP message.
3276 */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003277 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003278}
3279
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003280/**
3281 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
Lin Bai973e6922018-01-08 17:59:19 +08003282 * @ppeer - the object to detach
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003283 *
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003284 * Detach a peer and force peer object to be removed. It is called during
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003285 * roaming scenario when the firmware has already deleted a peer.
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003286 * Remove it from the peer_id_to_object map. Peer object is actually freed
3287 * when last reference is deleted.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003288 *
3289 * Return: None
3290 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003291static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003292{
Leo Chang98726762016-10-28 11:07:18 -07003293 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003294 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3295
Nirav Shah7c8c1712018-09-10 16:01:31 +05303296 ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
3297 peer, qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003298
3299 /* Clear the peer_id_to_obj map entries */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003300 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
Lin Bai973e6922018-01-08 17:59:19 +08003301 ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003302}
3303
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003304/**
Alok Kumare1977442018-11-28 17:16:03 +05303305 * ol_txrx_peer_detach_sync() - peer detach sync callback
3306 * @ppeer - the peer object
3307 * @peer_unmap_sync - peer unmap sync cb.
3308 * @bitmap - bitmap indicating special handling of request.
3309 *
Alok Kumare1977442018-11-28 17:16:03 +05303310 * Return: None
3311 */
3312static void ol_txrx_peer_detach_sync(void *ppeer,
3313 ol_txrx_peer_unmap_sync_cb peer_unmap_sync,
3314 uint32_t bitmap)
3315{
3316 ol_txrx_peer_handle peer = ppeer;
Alok Kumar604b0332019-01-24 17:49:25 +05303317 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
Alok Kumare1977442018-11-28 17:16:03 +05303318
3319 ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d", __func__,
3320 peer, qdf_atomic_read(&peer->ref_cnt));
3321
Alok Kumar604b0332019-01-24 17:49:25 +05303322 if (!pdev->peer_unmap_sync_cb)
3323 pdev->peer_unmap_sync_cb = peer_unmap_sync;
3324
Alok Kumare1977442018-11-28 17:16:03 +05303325 ol_txrx_peer_detach(peer, bitmap);
3326}
3327
3328/**
Alok Kumar688eadb2019-02-14 14:44:01 +05303329 * ol_txrx_peer_unmap_sync_cb_set() - set peer unmap sync callback
3330 * @ppdev - TXRX pdev context
3331 * @peer_unmap_sync - peer unmap sync callback
3332 *
3333 * Return: None
3334 */
3335static void ol_txrx_peer_unmap_sync_cb_set(
3336 struct cdp_pdev *ppdev,
3337 ol_txrx_peer_unmap_sync_cb peer_unmap_sync)
3338{
3339 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
3340
3341 if (!pdev->peer_unmap_sync_cb)
3342 pdev->peer_unmap_sync_cb = peer_unmap_sync;
3343}
3344
3345/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003346 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3347 * @txrx_pdev: Pointer to txrx pdev
3348 *
3349 * Return: none
3350 */
3351static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3352{
3353 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003354 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003355
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303356 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3357 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3358 else
3359 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003360
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003361 num_free = ol_tx_get_total_free_desc(pdev);
3362
Kapil Gupta53d9b572017-06-28 17:53:25 +05303363 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303364 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003365 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003366
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003367}
3368
3369/**
3370 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3371 * @timeout: timeout in ms
3372 *
3373 * Wait for tx queue to be empty, return timeout error if
3374 * queue doesn't empty before timeout occurs.
3375 *
3376 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303377 * QDF_STATUS_SUCCESS if the queue empties,
3378 * QDF_STATUS_E_TIMEOUT in case of timeout,
3379 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003380 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003381static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003382{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003383 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003384
Jeff Johnson6795c3a2019-03-18 13:43:04 -07003385 if (!txrx_pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303386 ol_txrx_err("txrx context is null");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303387 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003388 }
3389
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003390 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303391 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003392 if (timeout <= 0) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303393 ol_txrx_err("tx frames are pending");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003394 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303395 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003396 }
3397 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3398 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303399 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003400}
3401
3402#ifndef QCA_WIFI_3_0_EMU
Himanshu Agarwal83a87572017-05-25 14:09:50 +05303403#define SUSPEND_DRAIN_WAIT 500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003404#else
3405#define SUSPEND_DRAIN_WAIT 3000
3406#endif
3407
Yue Ma1e11d792016-02-26 18:58:44 -08003408#ifdef FEATURE_RUNTIME_PM
3409/**
3410 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3411 * @txrx_pdev: TXRX pdev context
3412 *
3413 * TXRX is ready to runtime suspend if there are no pending packets
3414 * in the tx queue.
3415 *
3416 * Return: QDF_STATUS
3417 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003418static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003419{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003420 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003421
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003422 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003423 return QDF_STATUS_E_BUSY;
3424 else
3425 return QDF_STATUS_SUCCESS;
3426}
3427
3428/**
3429 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3430 * @txrx_pdev: TXRX pdev context
3431 *
3432 * This is a dummy function for symmetry.
3433 *
3434 * Return: QDF_STATUS_SUCCESS
3435 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003436static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003437{
3438 return QDF_STATUS_SUCCESS;
3439}
3440#endif
3441
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003442/**
3443 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003444 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003445 *
3446 * Ensure that ol_txrx is ready for bus suspend
3447 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303448 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003449 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003450static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003451{
3452 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3453}
3454
3455/**
3456 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003457 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003458 *
3459 * Dummy function for symetry
3460 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303461 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003462 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003463static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003464{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303465 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003466}
3467
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003468/**
3469 * ol_txrx_get_tx_pending - Get the number of pending transmit
3470 * frames that are awaiting completion.
3471 *
3472 * @pdev - the data physical device object
3473 * Mainly used in clean up path to make sure all buffers have been freed
3474 *
3475 * Return: count of pending frames
3476 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003477int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003478{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003479 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003480 uint32_t total;
3481
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303482 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3483 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3484 else
3485 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003486
Nirav Shah55b45a02016-01-21 10:00:16 +05303487 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003488}
3489
3490void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3491{
3492 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07003493 /*
3494 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303495 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07003496 * which is tha same with normal data send complete path
3497 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003498 htt_tx_pending_discard(pdev_handle->htt_pdev);
3499
3500 TAILQ_INIT(&tx_descs);
3501 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3502 /* Discard Frames in Discard List */
3503 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3504
3505 ol_tx_discard_target_frms(pdev_handle);
3506}
3507
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003508static inline
3509uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3510{
3511 return (uint64_t) ((size_t) req);
3512}
3513
3514static inline
3515struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3516{
3517 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3518}
3519
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003520#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003521void
3522ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3523 uint8_t cfg_stats_type, uint32_t cfg_val)
3524{
jitiphil335d2412018-06-07 22:49:24 +05303525 uint8_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07003526
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003527 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3528 0 /* reset mask */,
3529 cfg_stats_type, cfg_val, dummy_cookie);
3530}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003531#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003532
jitiphil335d2412018-06-07 22:49:24 +05303533/**
3534 * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
3535 * @pdev: handle to ol txrx pdev
3536 * @pool_size: Size of fw stats descriptor pool
3537 *
3538 * Return: 0 for success, error code on failure.
3539 */
3540int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
3541 uint8_t pool_size)
3542{
3543 int i;
3544
3545 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303546 ol_txrx_err("pdev is NULL");
jitiphil335d2412018-06-07 22:49:24 +05303547 return -EINVAL;
3548 }
3549 pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
3550 sizeof(struct ol_txrx_fw_stats_desc_elem_t));
Nirav Shah7c8c1712018-09-10 16:01:31 +05303551 if (!pdev->ol_txrx_fw_stats_desc_pool.pool)
jitiphil335d2412018-06-07 22:49:24 +05303552 return -ENOMEM;
Nirav Shah7c8c1712018-09-10 16:01:31 +05303553
jitiphil335d2412018-06-07 22:49:24 +05303554 pdev->ol_txrx_fw_stats_desc_pool.freelist =
3555 &pdev->ol_txrx_fw_stats_desc_pool.pool[0];
3556 pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
3557
3558 for (i = 0; i < (pool_size - 1); i++) {
3559 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3560 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3561 pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
3562 &pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
3563 }
3564 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3565 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3566 pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
3567 qdf_spinlock_create(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3568 qdf_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
3569 qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
3570 return 0;
3571}
3572
3573/**
3574 * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
3575 * fw stats descriptor pool
3576 * @pdev: handle to ol txrx pdev
3577 *
3578 * Return: None
3579 */
3580void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
3581{
jitiphil335d2412018-06-07 22:49:24 +05303582 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303583 ol_txrx_err("pdev is NULL");
jitiphil335d2412018-06-07 22:49:24 +05303584 return;
3585 }
3586 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303587 ol_txrx_err("Pool is not initialized");
jitiphil335d2412018-06-07 22:49:24 +05303588 return;
3589 }
3590 if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303591 ol_txrx_err("Pool is not allocated");
jitiphil335d2412018-06-07 22:49:24 +05303592 return;
3593 }
3594 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3595 qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
jitiphil335d2412018-06-07 22:49:24 +05303596 qdf_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
3597 pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
3598
3599 pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
3600 pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
3601 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3602}
3603
3604/**
3605 * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
3606 * free descriptor pool
3607 * @pdev: handle to ol txrx pdev
3608 *
3609 * Return: pointer to fw stats descriptor, NULL on failure
3610 */
3611struct ol_txrx_fw_stats_desc_t
3612 *ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
3613{
3614 struct ol_txrx_fw_stats_desc_t *desc = NULL;
3615
3616 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3617 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3618 qdf_spin_unlock_bh(&pdev->
3619 ol_txrx_fw_stats_desc_pool.pool_lock);
Nirav Shah7c8c1712018-09-10 16:01:31 +05303620 ol_txrx_err("Pool deinitialized");
jitiphil335d2412018-06-07 22:49:24 +05303621 return NULL;
3622 }
3623 if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
3624 desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
3625 pdev->ol_txrx_fw_stats_desc_pool.freelist =
3626 pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
3627 }
3628 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3629
3630 if (desc)
Nirav Shah7c8c1712018-09-10 16:01:31 +05303631 ol_txrx_dbg("desc_id %d allocated", desc->desc_id);
jitiphil335d2412018-06-07 22:49:24 +05303632 else
Nirav Shah7c8c1712018-09-10 16:01:31 +05303633 ol_txrx_err("fw stats descriptors are exhausted");
jitiphil335d2412018-06-07 22:49:24 +05303634
3635 return desc;
3636}
3637
3638/**
3639 * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
3640 * back into free pool
3641 * @pdev: handle to ol txrx pdev
3642 * @fw_stats_desc: fw_stats_desc_get descriptor
3643 *
3644 * Return: pointer to request
3645 */
3646struct ol_txrx_stats_req_internal
3647 *ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
3648 unsigned char desc_id)
3649{
3650 struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
3651 struct ol_txrx_stats_req_internal *req;
3652
3653 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3654 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3655 qdf_spin_unlock_bh(&pdev->
3656 ol_txrx_fw_stats_desc_pool.pool_lock);
Nirav Shah7c8c1712018-09-10 16:01:31 +05303657 ol_txrx_err("Desc ID %u Pool deinitialized", desc_id);
jitiphil335d2412018-06-07 22:49:24 +05303658 return NULL;
3659 }
3660 desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
3661 req = desc_elem->desc.req;
3662 desc_elem->desc.req = NULL;
3663 desc_elem->next =
3664 pdev->ol_txrx_fw_stats_desc_pool.freelist;
3665 pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
3666 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3667 return req;
3668}
3669
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003670static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003671ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07003672 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003673{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003674 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003675 struct ol_txrx_pdev_t *pdev = vdev->pdev;
jitiphil335d2412018-06-07 22:49:24 +05303676 uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003677 struct ol_txrx_stats_req_internal *non_volatile_req;
jitiphil335d2412018-06-07 22:49:24 +05303678 struct ol_txrx_fw_stats_desc_t *desc = NULL;
3679 struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003680
3681 if (!pdev ||
3682 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
3683 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
3684 return A_ERROR;
3685 }
3686
3687 /*
3688 * Allocate a non-transient stats request object.
3689 * (The one provided as an argument is likely allocated on the stack.)
3690 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303691 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003692 if (!non_volatile_req)
3693 return A_NO_MEMORY;
3694
3695 /* copy the caller's specifications */
3696 non_volatile_req->base = *req;
3697 non_volatile_req->serviced = 0;
3698 non_volatile_req->offset = 0;
tfyu9fcabd72017-09-26 17:46:48 +08003699 if (response_expected) {
jitiphil335d2412018-06-07 22:49:24 +05303700 desc = ol_txrx_fw_stats_desc_alloc(pdev);
3701 if (!desc) {
3702 qdf_mem_free(non_volatile_req);
3703 return A_ERROR;
3704 }
3705
3706 /* use the desc id as the cookie */
3707 cookie = desc->desc_id;
3708 desc->req = non_volatile_req;
tfyu9fcabd72017-09-26 17:46:48 +08003709 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3710 TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
3711 pdev->req_list_depth++;
3712 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3713 }
3714
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003715 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
3716 req->stats_type_upload_mask,
3717 req->stats_type_reset_mask,
3718 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
3719 cookie)) {
tfyu9fcabd72017-09-26 17:46:48 +08003720 if (response_expected) {
3721 qdf_spin_lock_bh(&pdev->req_list_spinlock);
jitiphil335d2412018-06-07 22:49:24 +05303722 TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
3723 req_list_elem);
tfyu9fcabd72017-09-26 17:46:48 +08003724 pdev->req_list_depth--;
3725 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
jitiphil335d2412018-06-07 22:49:24 +05303726 if (desc) {
3727 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.
3728 pool_lock);
3729 desc->req = NULL;
3730 elem = container_of(desc,
3731 struct ol_txrx_fw_stats_desc_elem_t,
3732 desc);
3733 elem->next =
3734 pdev->ol_txrx_fw_stats_desc_pool.freelist;
3735 pdev->ol_txrx_fw_stats_desc_pool.freelist = elem;
3736 qdf_spin_unlock_bh(&pdev->
3737 ol_txrx_fw_stats_desc_pool.
3738 pool_lock);
3739 }
tfyu9fcabd72017-09-26 17:46:48 +08003740 }
3741
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303742 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003743 return A_ERROR;
3744 }
3745
Nirav Shahd2310422016-01-21 18:58:06 +05303746 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303747 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05303748
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003749 return A_OK;
3750}
Dhanashri Atre12a08392016-02-17 13:10:34 -08003751
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003752void
3753ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
jitiphil335d2412018-06-07 22:49:24 +05303754 uint8_t cookie, uint8_t *stats_info_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003755{
3756 enum htt_dbg_stats_type type;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003757 enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003758 enum htt_dbg_stats_status status;
3759 int length;
3760 uint8_t *stats_data;
tfyu9fcabd72017-09-26 17:46:48 +08003761 struct ol_txrx_stats_req_internal *req, *tmp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003762 int more = 0;
tfyu9fcabd72017-09-26 17:46:48 +08003763 int found = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003764
jitiphil335d2412018-06-07 22:49:24 +05303765 if (cookie >= FW_STATS_DESC_POOL_SIZE) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303766 ol_txrx_err("Cookie is not valid");
jitiphil335d2412018-06-07 22:49:24 +05303767 return;
3768 }
3769 req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
3770 if (!req) {
3771 ol_txrx_err("%s: Request not retrieved for cookie %u", __func__,
3772 (uint8_t)cookie);
3773 return;
3774 }
tfyu9fcabd72017-09-26 17:46:48 +08003775 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3776 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
3777 if (req == tmp) {
3778 found = 1;
3779 break;
3780 }
3781 }
3782 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3783
3784 if (!found) {
3785 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05303786 "req(%pK) from firmware can't be found in the list\n", req);
tfyu9fcabd72017-09-26 17:46:48 +08003787 return;
3788 }
3789
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003790 do {
3791 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
3792 &length, &stats_data);
3793 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
3794 break;
3795 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
3796 status == HTT_DBG_STATS_STATUS_PARTIAL) {
3797 uint8_t *buf;
3798 int bytes = 0;
3799
3800 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
3801 more = 1;
3802 if (req->base.print.verbose || req->base.print.concise)
3803 /* provide the header along with the data */
3804 htt_t2h_stats_print(stats_info_list,
3805 req->base.print.concise);
3806
3807 switch (type) {
3808 case HTT_DBG_STATS_WAL_PDEV_TXRX:
3809 bytes = sizeof(struct wlan_dbg_stats);
3810 if (req->base.copy.buf) {
3811 int lmt;
3812
3813 lmt = sizeof(struct wlan_dbg_stats);
3814 if (req->base.copy.byte_limit < lmt)
3815 lmt = req->base.copy.byte_limit;
3816 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303817 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003818 }
3819 break;
3820 case HTT_DBG_STATS_RX_REORDER:
3821 bytes = sizeof(struct rx_reorder_stats);
3822 if (req->base.copy.buf) {
3823 int lmt;
3824
3825 lmt = sizeof(struct rx_reorder_stats);
3826 if (req->base.copy.byte_limit < lmt)
3827 lmt = req->base.copy.byte_limit;
3828 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303829 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003830 }
3831 break;
3832 case HTT_DBG_STATS_RX_RATE_INFO:
3833 bytes = sizeof(wlan_dbg_rx_rate_info_t);
3834 if (req->base.copy.buf) {
3835 int lmt;
3836
3837 lmt = sizeof(wlan_dbg_rx_rate_info_t);
3838 if (req->base.copy.byte_limit < lmt)
3839 lmt = req->base.copy.byte_limit;
3840 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303841 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003842 }
3843 break;
3844
3845 case HTT_DBG_STATS_TX_RATE_INFO:
3846 bytes = sizeof(wlan_dbg_tx_rate_info_t);
3847 if (req->base.copy.buf) {
3848 int lmt;
3849
3850 lmt = sizeof(wlan_dbg_tx_rate_info_t);
3851 if (req->base.copy.byte_limit < lmt)
3852 lmt = req->base.copy.byte_limit;
3853 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303854 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003855 }
3856 break;
3857
3858 case HTT_DBG_STATS_TX_PPDU_LOG:
3859 bytes = 0;
3860 /* TO DO: specify how many bytes are present */
3861 /* TO DO: add copying to the requestor's buf */
3862
3863 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003864 bytes = sizeof(struct
3865 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003866 if (req->base.copy.buf) {
3867 int limit;
3868
Yun Parkeaea8632017-04-09 09:53:45 -07003869 limit = sizeof(struct
3870 rx_remote_buffer_mgmt_stats);
3871 if (req->base.copy.byte_limit < limit)
3872 limit = req->base.copy.
3873 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003874 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303875 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003876 }
3877 break;
3878
3879 case HTT_DBG_STATS_TXBF_INFO:
3880 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
3881 if (req->base.copy.buf) {
3882 int limit;
3883
Yun Parkeaea8632017-04-09 09:53:45 -07003884 limit = sizeof(struct
3885 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003886 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003887 limit = req->base.copy.
3888 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003889 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303890 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003891 }
3892 break;
3893
3894 case HTT_DBG_STATS_SND_INFO:
3895 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
3896 if (req->base.copy.buf) {
3897 int limit;
3898
Yun Parkeaea8632017-04-09 09:53:45 -07003899 limit = sizeof(struct
3900 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003901 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003902 limit = req->base.copy.
3903 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003904 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303905 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003906 }
3907 break;
3908
3909 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003910 bytes = sizeof(struct
3911 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003912 if (req->base.copy.buf) {
3913 int limit;
3914
Yun Parkeaea8632017-04-09 09:53:45 -07003915 limit = sizeof(struct
3916 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003917 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003918 limit = req->base.copy.
3919 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003920 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303921 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003922 }
3923 break;
3924
3925 case HTT_DBG_STATS_ERROR_INFO:
3926 bytes =
3927 sizeof(struct wlan_dbg_wifi2_error_stats);
3928 if (req->base.copy.buf) {
3929 int limit;
3930
Yun Parkeaea8632017-04-09 09:53:45 -07003931 limit = sizeof(struct
3932 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003933 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003934 limit = req->base.copy.
3935 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003936 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303937 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003938 }
3939 break;
3940
3941 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
3942 bytes =
3943 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
3944 if (req->base.copy.buf) {
3945 int limit;
3946
3947 limit = sizeof(struct
3948 rx_txbf_musu_ndpa_pkts_stats);
3949 if (req->base.copy.byte_limit < limit)
3950 limit =
3951 req->base.copy.byte_limit;
3952 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303953 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003954 }
3955 break;
3956
3957 default:
3958 break;
3959 }
Yun Parkeaea8632017-04-09 09:53:45 -07003960 buf = req->base.copy.buf ?
3961 req->base.copy.buf : stats_data;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003962
3963 /* Not implemented for MCL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003964 if (req->base.callback.fp)
3965 req->base.callback.fp(req->base.callback.ctxt,
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003966 cmn_type, buf, bytes);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003967 }
3968 stats_info_list += length;
3969 } while (1);
3970
3971 if (!more) {
tfyu9fcabd72017-09-26 17:46:48 +08003972 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3973 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
3974 if (req == tmp) {
3975 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
3976 pdev->req_list_depth--;
3977 qdf_mem_free(req);
3978 break;
3979 }
3980 }
3981 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003982 }
3983}
3984
3985#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
3986int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
3987{
3988 if (debug_specs & TXRX_DBG_MASK_OBJS) {
3989#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3990 ol_txrx_pdev_display(vdev->pdev, 0);
3991#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303992 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303993 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003994#endif
3995 }
Yun Parkeaea8632017-04-09 09:53:45 -07003996 if (debug_specs & TXRX_DBG_MASK_STATS)
Mohit Khannaca4173b2017-09-12 21:52:19 -07003997 ol_txrx_stats_display(vdev->pdev,
3998 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003999 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
4000#if defined(ENABLE_TXRX_PROT_ANALYZE)
4001 ol_txrx_prot_ans_display(vdev->pdev);
4002#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304003 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304004 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004005#endif
4006 }
4007 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
4008#if defined(ENABLE_RX_REORDER_TRACE)
4009 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4010#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304011 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304012 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004013#endif
4014
4015 }
4016 return 0;
4017}
4018#endif
4019
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004020#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004021int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4022 int max_subfrms_ampdu, int max_subfrms_amsdu)
4023{
4024 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4025 max_subfrms_ampdu, max_subfrms_amsdu);
4026}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004027#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004028
4029#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4030void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4031{
4032 struct ol_txrx_vdev_t *vdev;
4033
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304034 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004035 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304036 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004037 "%*spdev object: %pK", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304038 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004039 "%*svdev list:", indent + 4, " ");
4040 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304041 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004042 }
4043 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304044 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004045 "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004046 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304047 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004048 htt_display(pdev->htt_pdev, indent);
4049}
4050
4051void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4052{
4053 struct ol_txrx_peer_t *peer;
4054
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304055 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004056 "%*stxrx vdev: %pK\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304057 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004058 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304059 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004060 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4061 indent + 4, " ",
4062 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4063 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4064 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304065 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004066 "%*speer list:", indent + 4, " ");
4067 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304068 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004069 }
4070}
4071
4072void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4073{
4074 int i;
4075
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304076 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004077 "%*stxrx peer: %pK", indent, " ", peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004078 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4079 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304080 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004081 "%*sID: %d", indent + 4, " ",
4082 peer->peer_ids[i]);
4083 }
4084 }
4085}
4086#endif /* TXRX_DEBUG_LEVEL */
4087
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004088/**
4089 * ol_txrx_stats() - update ol layer stats
4090 * @vdev_id: vdev_id
4091 * @buffer: pointer to buffer
4092 * @buf_len: length of the buffer
4093 *
4094 * Return: length of string
4095 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004096static int
Yun Parkeaea8632017-04-09 09:53:45 -07004097ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004098{
4099 uint32_t len = 0;
4100
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004101 struct ol_txrx_vdev_t *vdev =
4102 (struct ol_txrx_vdev_t *)
4103 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004104
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004105 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304106 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304107 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004108 snprintf(buffer, buf_len, "vdev not found");
4109 return len;
4110 }
4111
4112 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004113 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304114 ((vdev->ll_pause.is_q_paused == false) ?
4115 "UNPAUSED" : "PAUSED"),
4116 vdev->ll_pause.q_pause_cnt,
4117 vdev->ll_pause.q_unpause_cnt,
4118 vdev->ll_pause.q_overflow_cnt,
4119 ((vdev->ll_pause.is_q_timer_on == false)
4120 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004121 return len;
4122}
4123
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004124#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4125/**
4126 * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4127 * @peer: peer pointer
4128 *
4129 * Return: None
4130 */
4131static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4132{
Nirav Shahe6194ac2018-07-13 11:04:41 +05304133 txrx_nofl_info("cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4134 peer->bufq_info.curr,
4135 peer->bufq_info.dropped,
4136 peer->bufq_info.high_water_mark,
4137 peer->bufq_info.qdepth_no_thresh,
4138 peer->bufq_info.thresh);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004139}
4140
4141/**
4142 * ol_txrx_disp_peer_stats() - display peer stats
4143 * @pdev: pdev pointer
4144 *
4145 * Return: None
4146 */
4147static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4148{ int i;
4149 struct ol_txrx_peer_t *peer;
4150 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
4151
4152 if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4153 return;
4154
4155 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
Manjunathappa Prakasha4272ab2018-09-17 11:39:44 -07004156 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004157 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4158 peer = pdev->local_peer_ids.map[i];
Frank Liu4362e462018-01-16 11:51:55 +08004159 if (peer) {
Mohit Khannab7bec722017-11-10 11:43:44 -08004160 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +08004161 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004162 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Manjunathappa Prakasha4272ab2018-09-17 11:39:44 -07004163 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004164
4165 if (peer) {
Nirav Shahe6194ac2018-07-13 11:04:41 +05304166 txrx_nofl_info("stats: peer 0x%pK local peer id %d",
4167 peer, i);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004168 ol_txrx_disp_peer_cached_bufq_stats(peer);
Mohit Khannab7bec722017-11-10 11:43:44 -08004169 ol_txrx_peer_release_ref(peer,
4170 PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004171 }
4172 }
4173}
4174#else
4175static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4176{
Nirav Shahe6194ac2018-07-13 11:04:41 +05304177 txrx_nofl_info("peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004178}
4179#endif
4180
Mohit Khannaca4173b2017-09-12 21:52:19 -07004181void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4182 enum qdf_stats_verbosity_level level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004183{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004184 u64 tx_dropped =
4185 pdev->stats.pub.tx.dropped.download_fail.pkts
4186 + pdev->stats.pub.tx.dropped.target_discard.pkts
4187 + pdev->stats.pub.tx.dropped.no_ack.pkts
4188 + pdev->stats.pub.tx.dropped.others.pkts;
4189
4190 if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
Nirav Shahe6194ac2018-07-13 11:04:41 +05304191 txrx_nofl_dbg("STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
4192 pdev->tx_desc.num_free,
4193 pdev->tx_desc.pool_size,
4194 pdev->stats.pub.tx.from_stack.pkts,
4195 pdev->stats.pub.tx.tso.tso_pkts.pkts,
4196 pdev->stats.pub.tx.delivered.pkts,
4197 htt_tx_status_download_fail,
4198 pdev->stats.pub.tx.dropped.download_fail.pkts,
4199 htt_tx_status_discard,
4200 pdev->stats.pub.tx.dropped.
4201 target_discard.pkts,
4202 htt_tx_status_no_ack,
4203 pdev->stats.pub.tx.dropped.no_ack.pkts,
4204 pdev->stats.pub.tx.dropped.others.pkts,
4205 pdev->stats.pub.tx.dropped.host_reject.pkts,
4206 pdev->stats.pub.rx.delivered.pkts,
4207 pdev->stats.pub.rx.dropped_err.pkts,
4208 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4209 pdev->stats.pub.rx.dropped_mic_err.pkts,
4210 pdev->stats.pub.rx.intra_bss_fwd.
4211 packets_stack,
4212 pdev->stats.pub.rx.intra_bss_fwd.
4213 packets_fwd,
4214 pdev->stats.pub.rx.intra_bss_fwd.
4215 packets_stack_n_fwd);
Mohit Khannaca4173b2017-09-12 21:52:19 -07004216 return;
4217 }
4218
Nirav Shahe6194ac2018-07-13 11:04:41 +05304219 txrx_nofl_info("TX PATH Statistics:");
4220 txrx_nofl_info("sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4221 pdev->stats.pub.tx.from_stack.pkts,
4222 pdev->stats.pub.tx.from_stack.bytes,
4223 pdev->stats.pub.tx.dropped.host_reject.pkts,
4224 pdev->stats.pub.tx.dropped.host_reject.bytes,
4225 tx_dropped,
4226 pdev->stats.pub.tx.dropped.download_fail.bytes
4227 + pdev->stats.pub.tx.dropped.target_discard.bytes
4228 + pdev->stats.pub.tx.dropped.no_ack.bytes);
4229 txrx_nofl_info("successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B) others: %lld (%lld B)",
4230 pdev->stats.pub.tx.delivered.pkts,
4231 pdev->stats.pub.tx.delivered.bytes,
4232 pdev->stats.pub.tx.dropped.download_fail.pkts,
4233 pdev->stats.pub.tx.dropped.download_fail.bytes,
4234 pdev->stats.pub.tx.dropped.target_discard.pkts,
4235 pdev->stats.pub.tx.dropped.target_discard.bytes,
4236 pdev->stats.pub.tx.dropped.no_ack.pkts,
4237 pdev->stats.pub.tx.dropped.no_ack.bytes,
4238 pdev->stats.pub.tx.dropped.others.pkts,
4239 pdev->stats.pub.tx.dropped.others.bytes);
4240 txrx_nofl_info("Tx completions per HTT message:\n"
4241 "Single Packet %d\n"
4242 " 2-10 Packets %d\n"
4243 "11-20 Packets %d\n"
4244 "21-30 Packets %d\n"
4245 "31-40 Packets %d\n"
4246 "41-50 Packets %d\n"
4247 "51-60 Packets %d\n"
4248 " 60+ Packets %d\n",
4249 pdev->stats.pub.tx.comp_histogram.pkts_1,
4250 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4251 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4252 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4253 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4254 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4255 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4256 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304257
Nirav Shahe6194ac2018-07-13 11:04:41 +05304258 txrx_nofl_info("RX PATH Statistics:");
4259 txrx_nofl_info("%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
4260 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4261 "msdus with frag_ind: %d msdus with offload_ind: %d",
4262 pdev->stats.priv.rx.normal.ppdus,
4263 pdev->stats.priv.rx.normal.mpdus,
4264 pdev->stats.pub.rx.delivered.pkts,
4265 pdev->stats.pub.rx.delivered.bytes,
4266 pdev->stats.pub.rx.dropped_err.pkts,
4267 pdev->stats.pub.rx.dropped_err.bytes,
4268 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4269 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4270 pdev->stats.pub.rx.dropped_mic_err.pkts,
4271 pdev->stats.pub.rx.dropped_mic_err.bytes,
4272 pdev->stats.pub.rx.msdus_with_frag_ind,
4273 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004274
Nirav Shahe6194ac2018-07-13 11:04:41 +05304275 txrx_nofl_info(" fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4276 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4277 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4278 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304279
Nirav Shahe6194ac2018-07-13 11:04:41 +05304280 txrx_nofl_info("packets per HTT message:\n"
4281 "Single Packet %d\n"
4282 " 2-10 Packets %d\n"
4283 "11-20 Packets %d\n"
4284 "21-30 Packets %d\n"
4285 "31-40 Packets %d\n"
4286 "41-50 Packets %d\n"
4287 "51-60 Packets %d\n"
4288 " 60+ Packets %d\n",
4289 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4290 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4291 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4292 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4293 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4294 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4295 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4296 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004297
4298 ol_txrx_disp_peer_stats(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004299}
4300
4301void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4302{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304303 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004304}
4305
4306#if defined(ENABLE_TXRX_PROT_ANALYZE)
4307
4308void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4309{
4310 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4311 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4312}
4313
4314#endif /* ENABLE_TXRX_PROT_ANALYZE */
4315
4316#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4317int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4318{
4319 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4320 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4321}
4322#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4323
4324#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4325A_STATUS
4326ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4327 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4328{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304329 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304330 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304331 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304332 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004333 return A_OK;
4334}
4335#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4336
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004337static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004338{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004339 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004340
Jeff Johnson6795c3a2019-03-18 13:43:04 -07004341 if (!vdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004342 return;
4343
4344 vdev->disable_intrabss_fwd = val;
4345}
4346
Nirav Shahc657ef52016-07-26 14:22:38 +05304347/**
4348 * ol_txrx_update_mac_id() - update mac_id for vdev
4349 * @vdev_id: vdev id
4350 * @mac_id: mac id
4351 *
4352 * Return: none
4353 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004354static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304355{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004356 struct ol_txrx_vdev_t *vdev =
4357 (struct ol_txrx_vdev_t *)
4358 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304359
Jeff Johnson6795c3a2019-03-18 13:43:04 -07004360 if (!vdev) {
Nirav Shahc657ef52016-07-26 14:22:38 +05304361 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4362 "%s: Invalid vdev_id %d", __func__, vdev_id);
4363 return;
4364 }
4365 vdev->mac_id = mac_id;
4366}
4367
Alok Kumar75355aa2018-03-19 17:32:58 +05304368/**
4369 * ol_txrx_get_tx_ack_count() - get tx ack count
Sravan Kumar Kairam53b43e12019-04-19 22:13:09 +05304370 * @pdev: pdev reference
Alok Kumar75355aa2018-03-19 17:32:58 +05304371 * @vdev_id: vdev_id
4372 *
4373 * Return: tx ack count
4374 */
Sravan Kumar Kairam53b43e12019-04-19 22:13:09 +05304375static uint32_t ol_txrx_get_tx_ack_stats(struct cdp_pdev *pdev,
4376 uint8_t vdev_id)
Alok Kumar75355aa2018-03-19 17:32:58 +05304377{
4378 struct ol_txrx_vdev_t *vdev =
4379 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
4380 if (!vdev) {
4381 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4382 "%s: Invalid vdev_id %d", __func__, vdev_id);
4383 return 0;
4384 }
4385 return vdev->txrx_stats.txack_success;
4386}
4387
Leo Chang8e073612015-11-13 10:55:34 -08004388/**
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004389 * ol_txrx_display_stats() - Display OL TXRX display stats
4390 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304391 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004392 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304393 */
Mohit Khannaca4173b2017-09-12 21:52:19 -07004394static QDF_STATUS
4395ol_txrx_display_stats(void *soc, uint16_t value,
4396 enum qdf_stats_verbosity_level verb_level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004397{
4398 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004399 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004400
Anurag Chouhan6d760662016-02-20 16:05:43 +05304401 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004402 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304403 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304404 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004405 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004406 }
4407
4408 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004409 case CDP_TXRX_PATH_STATS:
Mohit Khannaca4173b2017-09-12 21:52:19 -07004410 ol_txrx_stats_display(pdev, verb_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004411 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004412 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004413 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004414 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004415 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07004416 ol_tx_dump_flow_pool_info((void *)pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004417 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004418 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304419 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004420 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004421 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4422 htt_display_rx_buf_debug(pdev->htt_pdev);
4423 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304424#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004425 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304426 ol_tx_sched_cur_state_display(pdev);
4427 ol_tx_sched_stats_display(pdev);
4428 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004429 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304430 ol_tx_queue_log_display(pdev);
4431 break;
4432#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004433 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304434 ol_tx_dump_group_credit_stats(pdev);
4435 break;
4436#endif
4437
4438#ifdef DEBUG_HL_LOGGING
Nirav Shaheb017be2018-02-15 11:20:58 +05304439 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304440 htt_dump_bundle_stats(pdev->htt_pdev);
4441 break;
4442#endif
4443#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004444 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004445 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004446 break;
4447 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004448 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004449}
4450
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004451/**
4452 * ol_txrx_clear_stats() - Clear OL TXRX stats
4453 * @value: Module id for which stats needs to be cleared
4454 *
4455 * Return: None
4456 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004457static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004458{
4459 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004460 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004461
Anurag Chouhan6d760662016-02-20 16:05:43 +05304462 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004463 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304464 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304465 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004466 return;
4467 }
4468
4469 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004470 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004471 ol_txrx_stats_clear(pdev);
4472 break;
Yun Park1027e8c2017-10-13 15:17:37 -07004473 case CDP_TXRX_TSO_STATS:
4474 ol_txrx_tso_stats_clear(pdev);
4475 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004476 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004477 ol_tx_clear_flow_pool_stats();
4478 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004479 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304480 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004481 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304482#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004483 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304484 ol_tx_sched_stats_clear(pdev);
4485 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004486 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304487 ol_tx_queue_log_clear(pdev);
4488 break;
4489#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004490 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304491 ol_tx_clear_group_credit_stats(pdev);
4492 break;
4493#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004494 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304495 htt_clear_bundle_stats(pdev->htt_pdev);
4496 break;
4497#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004498 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004499 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004500 break;
4501 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004502
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004503}
4504
4505/**
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004506 * ol_txrx_drop_nbuf_list() - drop an nbuf list
4507 * @buf_list: buffer list to be dropepd
4508 *
4509 * Return: int (number of bufs dropped)
4510 */
4511static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
4512{
4513 int num_dropped = 0;
4514 qdf_nbuf_t buf, next_buf;
4515 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4516
4517 buf = buf_list;
4518 while (buf) {
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05304519 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004520 next_buf = qdf_nbuf_queue_next(buf);
4521 if (pdev)
4522 TXRX_STATS_MSDU_INCR(pdev,
4523 rx.dropped_peer_invalid, buf);
4524 qdf_nbuf_free(buf);
4525 buf = next_buf;
4526 num_dropped++;
4527 }
4528 return num_dropped;
4529}
4530
4531/**
Alok Kumarea3b23b2019-02-28 15:32:10 +05304532 * ol_rx_data_handler() - data rx handler
4533 * @pdev: dev handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004534 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304535 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004536 *
4537 * Return: None
4538 */
Alok Kumarea3b23b2019-02-28 15:32:10 +05304539static void ol_rx_data_handler(struct ol_txrx_pdev_t *pdev,
4540 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004541{
Mohit Khanna0696eef2016-04-14 16:14:08 -07004542 void *osif_dev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004543 uint8_t drop_count = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304544 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304545 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004546 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304547 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004548
Jeff Johnsondac9e382017-09-24 10:36:08 -07004549 if (qdf_unlikely(!pdev))
Nirav Shah36a87bf2016-02-22 12:38:46 +05304550 goto free_buf;
4551
4552 /* Do not use peer directly. Derive peer from staid to
4553 * make sure that peer is valid.
4554 */
Jingxiang Ge3badb982018-01-02 17:39:01 +08004555 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
4556 staid, PEER_DEBUG_ID_OL_RX_THREAD);
Nirav Shah36a87bf2016-02-22 12:38:46 +05304557 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004558 goto free_buf;
4559
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304560 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07004561 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
4562 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304563 qdf_spin_unlock_bh(&peer->peer_info_lock);
Jingxiang Ge9f297062018-01-24 13:31:31 +08004564 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004565 goto free_buf;
4566 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08004567
4568 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07004569 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304570 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004571
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004572 qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
4573 if (!list_empty(&peer->bufq_info.cached_bufq)) {
4574 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004575 /* Flush the cached frames to HDD before passing new rx frame */
4576 ol_txrx_flush_rx_frames(peer, 0);
4577 } else
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004578 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004579
Jingxiang Ge3badb982018-01-02 17:39:01 +08004580 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
4581
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004582 buf = buf_list;
4583 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304584 next_buf = qdf_nbuf_queue_next(buf);
4585 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07004586 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304587 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304588 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05304589 if (pdev)
4590 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304591 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004592 }
4593 buf = next_buf;
4594 }
4595 return;
4596
4597free_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004598 drop_count = ol_txrx_drop_nbuf_list(buf_list);
Nirav Shah7c8c1712018-09-10 16:01:31 +05304599 ol_txrx_warn("Dropped frames %u", drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004600}
4601
Alok Kumarea3b23b2019-02-28 15:32:10 +05304602/**
4603 * ol_rx_data_cb() - data rx callback
4604 * @context: dev handle
4605 * @buf_list: buffer list
4606 * @staid: Station id
4607 *
4608 * Return: None
4609 */
4610static inline void
4611ol_rx_data_cb(void *context, qdf_nbuf_t buf_list, uint16_t staid)
4612{
4613 struct ol_txrx_pdev_t *pdev = context;
4614
4615 ol_rx_data_handler(pdev, buf_list, staid);
4616}
4617
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004618/* print for every 16th packet */
4619#define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
4620struct ol_rx_cached_buf *cache_buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304621
4622/** helper function to drop packets
4623 * Note: caller must hold the cached buq lock before invoking
4624 * this function. Also, it assumes that the pointers passed in
4625 * are valid (non-NULL)
4626 */
4627static inline void ol_txrx_drop_frames(
4628 struct ol_txrx_cached_bufq_t *bufqi,
4629 qdf_nbuf_t rx_buf_list)
4630{
4631 uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004632
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304633 bufqi->dropped += dropped;
4634 bufqi->qdepth_no_thresh += dropped;
4635
4636 if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
4637 bufqi->high_water_mark = bufqi->qdepth_no_thresh;
4638}
4639
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004640static QDF_STATUS ol_txrx_enqueue_rx_frames(
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304641 struct ol_txrx_peer_t *peer,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004642 struct ol_txrx_cached_bufq_t *bufqi,
4643 qdf_nbuf_t rx_buf_list)
4644{
4645 struct ol_rx_cached_buf *cache_buf;
4646 qdf_nbuf_t buf, next_buf;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004647 static uint32_t count;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004648
4649 if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
4650 ol_txrx_info_high(
4651 "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
4652 bufqi->curr, bufqi->dropped);
4653
4654 qdf_spin_lock_bh(&bufqi->bufq_lock);
4655 if (bufqi->curr >= bufqi->thresh) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304656 ol_txrx_drop_frames(bufqi, rx_buf_list);
4657 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4658 return QDF_STATUS_E_FAULT;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004659 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004660 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4661
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004662 buf = rx_buf_list;
4663 while (buf) {
Sravan Kumar Kairamdd5a74a2019-01-11 17:32:49 +05304664 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004665 next_buf = qdf_nbuf_queue_next(buf);
4666 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
4667 if (!cache_buf) {
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004668 qdf_nbuf_free(buf);
4669 } else {
4670 /* Add NULL terminator */
4671 qdf_nbuf_set_next(buf, NULL);
4672 cache_buf->buf = buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304673 if (peer && peer->valid) {
4674 qdf_spin_lock_bh(&bufqi->bufq_lock);
4675 list_add_tail(&cache_buf->list,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004676 &bufqi->cached_bufq);
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304677 bufqi->curr++;
4678 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4679 } else {
4680 qdf_mem_free(cache_buf);
4681 rx_buf_list = buf;
4682 qdf_nbuf_set_next(rx_buf_list, next_buf);
4683 qdf_spin_lock_bh(&bufqi->bufq_lock);
4684 ol_txrx_drop_frames(bufqi, rx_buf_list);
4685 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4686 return QDF_STATUS_E_FAULT;
4687 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004688 }
4689 buf = next_buf;
4690 }
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304691 return QDF_STATUS_SUCCESS;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004692}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004693/**
4694 * ol_rx_data_process() - process rx frame
4695 * @peer: peer
4696 * @rx_buf_list: rx buffer list
4697 *
4698 * Return: None
4699 */
4700void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05304701 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004702{
Yun Parkeaea8632017-04-09 09:53:45 -07004703 /*
4704 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004705 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07004706 * IPA kernel module API should not be called on SIRQ CTXT
4707 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08004708 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304709 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004710
4711 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304712 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004713 goto drop_rx_buf;
4714 }
4715
Dhanashri Atre182b0272016-02-17 15:35:07 -08004716 qdf_assert(peer->vdev);
4717
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304718 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004719 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08004720 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304721 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004722
4723 /*
4724 * If there is a data frame from peer before the peer is
4725 * registered for data service, enqueue them on to pending queue
4726 * which will be flushed to HDD once that station is registered.
4727 */
4728 if (!data_rx) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304729 if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
4730 rx_buf_list)
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004731 != QDF_STATUS_SUCCESS)
Poddar, Siddarth07eebf32017-04-19 12:40:26 +05304732 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4733 "%s: failed to enqueue rx frm to cached_bufq",
4734 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004735 } else {
4736#ifdef QCA_CONFIG_SMP
4737 /*
4738 * If the kernel is SMP, schedule rx thread to
4739 * better use multicores.
4740 */
4741 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Alok Kumarea3b23b2019-02-28 15:32:10 +05304742 ol_rx_data_handler(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004743 } else {
4744 p_cds_sched_context sched_ctx =
4745 get_cds_sched_ctxt();
4746 struct cds_ol_rx_pkt *pkt;
4747
4748 if (unlikely(!sched_ctx))
4749 goto drop_rx_buf;
4750
4751 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Alok Kumar3a6327d2018-08-06 17:28:25 +05304752 if (!pkt)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004753 goto drop_rx_buf;
Alok Kumar3a6327d2018-08-06 17:28:25 +05304754
Alok Kumarea3b23b2019-02-28 15:32:10 +05304755 pkt->callback = ol_rx_data_cb;
4756 pkt->context = pdev;
4757 pkt->Rxpkt = rx_buf_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004758 pkt->staId = peer->local_id;
4759 cds_indicate_rxpkt(sched_ctx, pkt);
4760 }
4761#else /* QCA_CONFIG_SMP */
Alok Kumarea3b23b2019-02-28 15:32:10 +05304762 ol_rx_data_handler(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004763#endif /* QCA_CONFIG_SMP */
4764 }
4765
4766 return;
4767
4768drop_rx_buf:
Alok Kumar3a6327d2018-08-06 17:28:25 +05304769 ol_txrx_drop_nbuf_list(rx_buf_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004770}
4771
4772/**
4773 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004774 * @sta_desc: sta descriptor
4775 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304776 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004777 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004778static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004779{
4780 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304781 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004782 union ol_txrx_peer_update_param_t param;
4783 struct privacy_exemption privacy_filter;
4784
4785 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304786 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304787 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004788 }
4789
4790 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304791 ol_txrx_err("Invalid sta id :%d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004792 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304793 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004794 }
4795
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004796 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
4797 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004798 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304799 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004800
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304801 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004802 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304803 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004804
4805 param.qos_capable = sta_desc->is_qos_enabled;
4806 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
4807 ol_txrx_peer_update_qos_capable);
4808
4809 if (sta_desc->is_wapi_supported) {
4810 /*Privacy filter to accept unencrypted WAI frames */
4811 privacy_filter.ether_type = ETHERTYPE_WAI;
4812 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
4813 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
4814 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
4815 }
4816
4817 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304818 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004819}
4820
4821/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004822 * ol_txrx_register_ocb_peer - Function to register the OCB peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004823 * @mac_addr: MAC address of the self peer
4824 * @peer_id: Pointer to the peer ID
4825 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304826 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004827 */
Jeff Johnson382bce02017-09-01 14:21:07 -07004828static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004829 uint8_t *peer_id)
4830{
4831 ol_txrx_pdev_handle pdev;
4832 ol_txrx_peer_handle peer;
4833
Anurag Chouhan6d760662016-02-20 16:05:43 +05304834 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004835 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05304836 ol_txrx_err("Unable to find pdev!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304837 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004838 }
4839
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004840 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
4841 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004842 if (!peer) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05304843 ol_txrx_err("Unable to find OCB peer!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304844 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004845 }
4846
4847 ol_txrx_set_ocb_peer(pdev, peer);
4848
4849 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004850 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004851 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004852
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304853 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004854}
4855
4856/**
4857 * ol_txrx_set_ocb_peer - Function to store the OCB peer
4858 * @pdev: Handle to the HTT instance
4859 * @peer: Pointer to the peer
4860 */
4861void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
4862 struct ol_txrx_peer_t *peer)
4863{
Jeff Johnson6795c3a2019-03-18 13:43:04 -07004864 if (!pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004865 return;
4866
4867 pdev->ocb_peer = peer;
4868 pdev->ocb_peer_valid = (NULL != peer);
4869}
4870
4871/**
4872 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
4873 * @pdev: Handle to the HTT instance
4874 * @peer: Pointer to the returned peer
4875 *
4876 * Return: true if the peer is valid, false if not
4877 */
4878bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
4879 struct ol_txrx_peer_t **peer)
4880{
4881 int rc;
4882
Jeff Johnson6795c3a2019-03-18 13:43:04 -07004883 if ((!pdev) || (!peer)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004884 rc = false;
4885 goto exit;
4886 }
4887
4888 if (pdev->ocb_peer_valid) {
4889 *peer = pdev->ocb_peer;
4890 rc = true;
4891 } else {
4892 rc = false;
4893 }
4894
4895exit:
4896 return rc;
4897}
4898
hangtian72704802019-04-17 18:16:25 +08004899/**
4900 * ol_txrx_register_pause_cb() - register pause callback
4901 * @pause_cb: pause callback
4902 *
4903 * Return: QDF status
4904 */
4905static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
4906 tx_pause_callback pause_cb)
4907{
4908 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4909
4910 if (!pdev || !pause_cb) {
4911 ol_txrx_err("pdev or pause_cb is NULL");
4912 return QDF_STATUS_E_INVAL;
4913 }
4914 pdev->pause_cb = pause_cb;
4915 return QDF_STATUS_SUCCESS;
4916}
4917
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004918#ifdef RECEIVE_OFFLOAD
4919/**
4920 * ol_txrx_offld_flush_handler() - offld flush handler
4921 * @context: dev handle
4922 * @rxpkt: rx data
4923 * @staid: station id
4924 *
4925 * This function handles an offld flush indication.
4926 * If the rx thread is enabled, it will be invoked by the rx
4927 * thread else it will be called in the tasklet context
4928 *
4929 * Return: none
4930 */
4931static void ol_txrx_offld_flush_handler(void *context,
Alok Kumarea3b23b2019-02-28 15:32:10 +05304932 qdf_nbuf_t rxpkt,
4933 uint16_t staid)
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004934{
4935 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4936
4937 if (qdf_unlikely(!pdev)) {
4938 ol_txrx_err("Invalid context");
4939 qdf_assert(0);
4940 return;
4941 }
4942
4943 if (pdev->offld_flush_cb)
4944 pdev->offld_flush_cb(context);
4945 else
4946 ol_txrx_err("offld_flush_cb NULL");
4947}
4948
4949/**
4950 * ol_txrx_offld_flush() - offld flush callback
4951 * @data: opaque data pointer
4952 *
4953 * This is the callback registered with CE to trigger
4954 * an offld flush
4955 *
4956 * Return: none
4957 */
4958static void ol_txrx_offld_flush(void *data)
4959{
4960 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
4961 struct cds_ol_rx_pkt *pkt;
4962 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4963
4964 if (qdf_unlikely(!sched_ctx))
4965 return;
4966
Amar Singhal4e855ad2018-09-04 12:19:00 -07004967 if (qdf_unlikely(!pdev)) {
4968 ol_txrx_err("TXRX module context is NULL");
4969 return;
4970 }
4971
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004972 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
4973 ol_txrx_offld_flush_handler(data, NULL, 0);
4974 } else {
4975 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Alok Kumar3a6327d2018-08-06 17:28:25 +05304976 if (qdf_unlikely(!pkt))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004977 return;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004978
4979 pkt->callback = ol_txrx_offld_flush_handler;
4980 pkt->context = data;
4981 pkt->Rxpkt = NULL;
4982 pkt->staId = 0;
4983 cds_indicate_rxpkt(sched_ctx, pkt);
4984 }
4985}
4986
4987/**
4988 * ol_register_offld_flush_cb() - register the offld flush callback
4989 * @offld_flush_cb: flush callback function
4990 * @offld_init_cb: Allocate and initialize offld data structure.
4991 *
4992 * Store the offld flush callback provided and in turn
4993 * register OL's offld flush handler with CE
4994 *
4995 * Return: none
4996 */
4997static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
4998{
4999 struct hif_opaque_softc *hif_device;
5000 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5001
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005002 if (!pdev) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005003 ol_txrx_err("pdev NULL!");
5004 TXRX_ASSERT2(0);
5005 goto out;
5006 }
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005007 if (pdev->offld_flush_cb) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005008 ol_txrx_info("offld already initialised");
5009 if (pdev->offld_flush_cb != offld_flush_cb) {
5010 ol_txrx_err(
5011 "offld_flush_cb is differ to previously registered callback")
5012 TXRX_ASSERT2(0);
5013 goto out;
5014 }
5015 goto out;
5016 }
5017 pdev->offld_flush_cb = offld_flush_cb;
5018 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5019
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005020 if (qdf_unlikely(!hif_device)) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005021 ol_txrx_err("hif_device NULL!");
5022 qdf_assert(0);
5023 goto out;
5024 }
5025
5026 hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
5027
5028out:
5029 return;
5030}
5031
5032/**
5033 * ol_deregister_offld_flush_cb() - deregister the offld flush callback
5034 *
5035 * Remove the offld flush callback provided and in turn
5036 * deregister OL's offld flush handler with CE
5037 *
5038 * Return: none
5039 */
5040static void ol_deregister_offld_flush_cb(void)
5041{
5042 struct hif_opaque_softc *hif_device;
5043 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5044
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005045 if (!pdev) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005046 ol_txrx_err("pdev NULL!");
5047 return;
5048 }
5049 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5050
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005051 if (qdf_unlikely(!hif_device)) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005052 ol_txrx_err("hif_device NULL!");
5053 qdf_assert(0);
5054 return;
5055 }
5056
5057 hif_offld_flush_cb_deregister(hif_device);
5058
5059 pdev->offld_flush_cb = NULL;
5060}
5061#endif /* RECEIVE_OFFLOAD */
5062
Poddar, Siddarth34872782017-08-10 14:08:51 +05305063/**
5064 * ol_register_data_stall_detect_cb() - register data stall callback
5065 * @data_stall_detect_callback: data stall callback function
5066 *
5067 *
5068 * Return: QDF_STATUS Enumeration
5069 */
5070static QDF_STATUS ol_register_data_stall_detect_cb(
5071 data_stall_detect_cb data_stall_detect_callback)
5072{
5073 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5074
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005075 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05305076 ol_txrx_err("pdev NULL!");
Poddar, Siddarth34872782017-08-10 14:08:51 +05305077 return QDF_STATUS_E_INVAL;
5078 }
5079 pdev->data_stall_detect_callback = data_stall_detect_callback;
5080 return QDF_STATUS_SUCCESS;
5081}
5082
5083/**
5084 * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5085 * @data_stall_detect_callback: data stall callback function
5086 *
5087 *
5088 * Return: QDF_STATUS Enumeration
5089 */
5090static QDF_STATUS ol_deregister_data_stall_detect_cb(
5091 data_stall_detect_cb data_stall_detect_callback)
5092{
5093 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5094
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005095 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05305096 ol_txrx_err("pdev NULL!");
Poddar, Siddarth34872782017-08-10 14:08:51 +05305097 return QDF_STATUS_E_INVAL;
5098 }
5099 pdev->data_stall_detect_callback = NULL;
5100 return QDF_STATUS_SUCCESS;
5101}
5102
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305103/**
5104 * ol_txrx_post_data_stall_event() - post data stall event
5105 * @indicator: Module triggering data stall
5106 * @data_stall_type: data stall event type
5107 * @pdev_id: pdev id
5108 * @vdev_id_bitmap: vdev id bitmap
5109 * @recovery_type: data stall recovery type
5110 *
5111 * Return: None
5112 */
5113static void ol_txrx_post_data_stall_event(
5114 enum data_stall_log_event_indicator indicator,
5115 enum data_stall_log_event_type data_stall_type,
5116 uint32_t pdev_id, uint32_t vdev_id_bitmap,
5117 enum data_stall_log_recovery_type recovery_type)
5118{
5119 struct scheduler_msg msg = {0};
5120 QDF_STATUS status;
5121 struct data_stall_event_info *data_stall_info;
5122 ol_txrx_pdev_handle pdev;
5123
5124 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5125 if (!pdev) {
5126 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5127 "%s: pdev is NULL.", __func__);
5128 return;
5129 }
5130 data_stall_info = qdf_mem_malloc(sizeof(*data_stall_info));
Nirav Shah7c8c1712018-09-10 16:01:31 +05305131 if (!data_stall_info)
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305132 return;
Nirav Shah7c8c1712018-09-10 16:01:31 +05305133
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305134 data_stall_info->indicator = indicator;
5135 data_stall_info->data_stall_type = data_stall_type;
5136 data_stall_info->vdev_id_bitmap = vdev_id_bitmap;
5137 data_stall_info->pdev_id = pdev_id;
5138 data_stall_info->recovery_type = recovery_type;
5139
Poddar, Siddarthb9047592017-10-05 15:48:28 +05305140 if (data_stall_info->data_stall_type ==
5141 DATA_STALL_LOG_FW_RX_REFILL_FAILED)
5142 htt_log_rx_ring_info(pdev->htt_pdev);
5143
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305144 sys_build_message_header(SYS_MSG_ID_DATA_STALL_MSG, &msg);
5145 /* Save callback and data */
5146 msg.callback = pdev->data_stall_detect_callback;
5147 msg.bodyptr = data_stall_info;
5148 msg.bodyval = 0;
5149
gaurank kathpalia9fb3f4b2018-08-28 20:19:48 +05305150 status = scheduler_post_message(QDF_MODULE_ID_TXRX,
5151 QDF_MODULE_ID_HDD,
5152 QDF_MODULE_ID_SYS, &msg);
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305153
Madhvapathi Sriram3e6627a2018-12-19 12:54:49 +05305154 if (status != QDF_STATUS_SUCCESS)
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305155 qdf_mem_free(data_stall_info);
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305156}
5157
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305158void
5159ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5160{
Nirav Shah7c8c1712018-09-10 16:01:31 +05305161 qdf_print(" Pkt: VA 0x%pK PA 0x%llx len %d\n",
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305162 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5163 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5164 qdf_nbuf_data(nbuf), len, true);
5165}
5166
Dhanashri Atre12a08392016-02-17 13:10:34 -08005167/**
5168 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5169 * @vdev_id: vdev_id
5170 *
5171 * Return: vdev handle
5172 * NULL if not found.
5173 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005174struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005175{
5176 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5177 ol_txrx_vdev_handle vdev = NULL;
5178
5179 if (qdf_unlikely(!pdev))
5180 return NULL;
5181
5182 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5183 if (vdev->vdev_id == vdev_id)
5184 break;
5185 }
5186
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005187 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005188}
Nirav Shah2e583a02016-04-30 14:06:12 +05305189
5190/**
chenguo2201c0a2018-11-15 18:07:41 +08005191 * ol_txrx_get_mon_vdev_from_pdev() - get monitor mode vdev from pdev
5192 * @ppdev: the physical device the virtual device belongs to
5193 *
5194 * Return: vdev handle
5195 * NULL if not found.
5196 */
5197struct cdp_vdev *ol_txrx_get_mon_vdev_from_pdev(struct cdp_pdev *ppdev)
5198{
5199 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
5200
5201 if (qdf_unlikely(!pdev))
5202 return NULL;
5203
5204 return (struct cdp_vdev *)pdev->monitor_vdev;
5205}
5206
5207/**
Nirav Shah2e583a02016-04-30 14:06:12 +05305208 * ol_txrx_set_wisa_mode() - set wisa mode
5209 * @vdev: vdev handle
5210 * @enable: enable flag
5211 *
5212 * Return: QDF STATUS
5213 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005214static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305215{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005216 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005217
Nirav Shah2e583a02016-04-30 14:06:12 +05305218 if (!vdev)
5219 return QDF_STATUS_E_INVAL;
5220
5221 vdev->is_wisa_mode_enable = enable;
5222 return QDF_STATUS_SUCCESS;
5223}
Leo Chang98726762016-10-28 11:07:18 -07005224
5225/**
5226 * ol_txrx_get_vdev_id() - get interface id from interface context
5227 * @pvdev: vdev handle
5228 *
5229 * Return: virtual interface id
5230 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005231static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005232{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005233 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005234
Leo Chang98726762016-10-28 11:07:18 -07005235 return vdev->vdev_id;
5236}
5237
5238/**
Leo Chang98726762016-10-28 11:07:18 -07005239 * ol_txrx_soc_attach_target() - attach soc target
5240 * @soc: soc handle
5241 *
5242 * MCL legacy OL do nothing here
5243 *
5244 * Return: 0
5245 */
Venkata Sharath Chandra Manchala598f5032018-09-05 18:55:43 -07005246static QDF_STATUS ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005247{
5248 /* MCL legacy OL do nothing here */
Venkata Sharath Chandra Manchala598f5032018-09-05 18:55:43 -07005249 return QDF_STATUS_SUCCESS;
Leo Chang98726762016-10-28 11:07:18 -07005250}
5251
5252/**
5253 * ol_txrx_soc_detach() - detach soc target
5254 * @soc: soc handle
5255 *
5256 * MCL legacy OL do nothing here
5257 *
5258 * Return: noe
5259 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005260static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005261{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005262 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005263}
5264
5265/**
5266 * ol_txrx_pkt_log_con_service() - connect packet log service
5267 * @ppdev: physical device handle
5268 * @scn: device context
5269 *
5270 * Return: noe
5271 */
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305272#ifdef REMOVE_PKT_LOG
5273static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
5274{
5275}
5276#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005277static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005278{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005279 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005280
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005281 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005282 pktlog_htc_attach();
5283}
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305284#endif
Leo Chang98726762016-10-28 11:07:18 -07005285
5286/* OL wrapper functions for CDP abstraction */
5287/**
5288 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5289 * @peer: peer handle
5290 * @drop: rx packets drop or deliver
5291 *
5292 * Return: none
5293 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005294static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005295{
5296 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5297}
5298
5299/**
5300 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5301 * @ppdev: pdev handle
5302 * @vdev_id: interface id
5303 *
5304 * Return: virtual interface instance
5305 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005306static
5307struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5308 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005309{
5310 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5311}
5312
5313/**
5314 * ol_txrx_wrapper_register_peer() - register peer
5315 * @pdev: pdev handle
5316 * @sta_desc: peer description
5317 *
5318 * Return: QDF STATUS
5319 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005320static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005321 struct ol_txrx_desc_type *sta_desc)
5322{
5323 return ol_txrx_register_peer(sta_desc);
5324}
5325
5326/**
5327 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5328 * @pdev - the data physical device object
5329 * @local_peer_id - the ID txrx assigned locally to the peer in question
5330 *
5331 * The control SW typically uses the txrx peer handle to refer to the peer.
5332 * In unusual circumstances, if it is infeasible for the control SW maintain
5333 * the txrx peer handle but it can maintain a small integer local peer ID,
5334 * this function allows the peer handled to be retrieved, based on the local
5335 * peer ID.
5336 *
5337 * @return handle to the txrx peer object
5338 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005339static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005340ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5341 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005342{
5343 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005344 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005345}
5346
5347/**
5348 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5349 * @pdev: pdev handle
5350 *
5351 * Return: 1 high latency bus
5352 * 0 low latency bus
5353 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005354static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005355{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005356 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005357}
5358
5359/**
5360 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5361 * @data_peer - which peer has changed its state
5362 * @state - the new state of the peer
5363 *
5364 * Specify the peer's authentication state (none, connected, authenticated)
5365 * to allow the data SW to determine whether to filter out invalid data frames.
5366 * (In the "connected" state, where security is enabled, but authentication
5367 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5368 * be discarded.)
5369 * This function is only relevant for systems in which the tx and rx filtering
5370 * are done in the host rather than in the target.
5371 *
5372 * Return: QDF Status
5373 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005374static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005375 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5376{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005377 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005378 peer_mac, state);
5379}
5380
5381/**
5382 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5383 * @pdev: pdev handle
Jeff Johnson37df7c32018-05-10 12:30:35 -07005384 * @peer_addr: peer address want to find
Leo Chang98726762016-10-28 11:07:18 -07005385 * @peer_id: peer id
5386 *
5387 * Return: peer instance pointer
5388 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005389static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005390 uint8_t *peer_addr, uint8_t *peer_id)
5391{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005392 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005393 peer_addr, peer_id);
5394}
5395
5396/**
Mohit Khannab7bec722017-11-10 11:43:44 -08005397 * ol_txrx_wrapper_peer_get_ref_by_addr() - get peer reference by address
5398 * @pdev: pdev handle
5399 * @peer_addr: peer address we want to find
5400 * @peer_id: peer id
5401 * @debug_id: peer debug id for tracking
5402 *
5403 * Return: peer instance pointer
5404 */
5405static void *
5406ol_txrx_wrapper_peer_get_ref_by_addr(struct cdp_pdev *pdev,
5407 u8 *peer_addr, uint8_t *peer_id,
5408 enum peer_debug_id_type debug_id)
5409{
5410 return ol_txrx_peer_get_ref_by_addr((ol_txrx_pdev_handle)pdev,
5411 peer_addr, peer_id, debug_id);
5412}
5413
5414/**
5415 * ol_txrx_wrapper_peer_release_ref() - release peer reference
5416 * @peer: peer handle
5417 * @debug_id: peer debug id for tracking
5418 *
5419 * Release peer ref acquired by peer get ref api
5420 *
5421 * Return: void
5422 */
5423static void ol_txrx_wrapper_peer_release_ref(void *peer,
5424 enum peer_debug_id_type debug_id)
5425{
5426 ol_txrx_peer_release_ref(peer, debug_id);
5427}
5428
5429/**
Leo Chang98726762016-10-28 11:07:18 -07005430 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5431 * @cfg_ctx: cfg context
5432 * @cfg_param: cfg parameters
5433 *
5434 * Return: none
5435 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08005436static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005437ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5438 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07005439{
5440 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005441 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005442 (struct txrx_pdev_cfg_param_t *)cfg_param);
5443}
5444
jitiphil377bcc12018-10-05 19:46:08 +05305445/**
5446 * ol_txrx_get_cfg() - get ini/cgf values in legacy dp
5447 * @soc: soc context
5448 * @cfg_param: cfg parameters
5449 *
5450 * Return: none
5451 */
5452static uint32_t ol_txrx_get_cfg(void *soc, enum cdp_dp_cfg cfg)
5453{
5454 struct txrx_pdev_cfg_t *cfg_ctx;
5455 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5456 uint32_t value = 0;
5457
5458 cfg_ctx = (struct txrx_pdev_cfg_t *)(pdev->ctrl_pdev);
5459 switch (cfg) {
5460 case cfg_dp_enable_data_stall:
5461 value = cfg_ctx->enable_data_stall_detection;
5462 break;
5463 case cfg_dp_enable_ip_tcp_udp_checksum_offload:
5464 value = cfg_ctx->ip_tcp_udp_checksum_offload;
5465 break;
5466 case cfg_dp_tso_enable:
5467 value = cfg_ctx->tso_enable;
5468 break;
5469 case cfg_dp_lro_enable:
5470 value = cfg_ctx->lro_enable;
5471 break;
5472 case cfg_dp_gro_enable:
5473 value = cfg_ctx->gro_enable;
5474 break;
5475#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5476 case cfg_dp_tx_flow_start_queue_offset:
5477 value = cfg_ctx->tx_flow_start_queue_offset;
5478 break;
5479 case cfg_dp_tx_flow_stop_queue_threshold:
5480 value = cfg_ctx->tx_flow_stop_queue_th;
5481 break;
5482#endif
5483 case cfg_dp_ipa_uc_tx_buf_size:
5484 value = cfg_ctx->uc_tx_buffer_size;
5485 break;
5486 case cfg_dp_ipa_uc_tx_partition_base:
5487 value = cfg_ctx->uc_tx_partition_base;
5488 break;
5489 case cfg_dp_ipa_uc_rx_ind_ring_count:
5490 value = cfg_ctx->uc_rx_indication_ring_count;
5491 break;
5492 case cfg_dp_enable_flow_steering:
5493 value = cfg_ctx->enable_flow_steering;
5494 break;
5495 case cfg_dp_reorder_offload_supported:
5496 value = cfg_ctx->is_full_reorder_offload;
5497 break;
5498 case cfg_dp_ce_classify_enable:
5499 value = cfg_ctx->ce_classify_enabled;
5500 break;
5501 case cfg_dp_disable_intra_bss_fwd:
5502 value = cfg_ctx->disable_intra_bss_fwd;
5503 break;
5504 default:
5505 value = 0;
5506 break;
5507 }
5508
5509 return value;
5510}
5511
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005512#ifdef WDI_EVENT_ENABLE
5513void *ol_get_pldev(struct cdp_pdev *txrx_pdev)
5514{
5515 struct ol_txrx_pdev_t *pdev =
5516 (struct ol_txrx_pdev_t *)txrx_pdev;
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005517 if (pdev)
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005518 return pdev->pl_dev;
5519
5520 return NULL;
5521}
5522#endif
5523
Lin Bai1a73a412018-12-13 16:40:14 +08005524/**
5525 * ol_register_packetdump_callback() - registers
5526 * tx data packet, tx mgmt. packet and rx data packet
5527 * dump callback handler.
5528 *
5529 * @ol_tx_packetdump_cb: tx packetdump cb
5530 * @ol_rx_packetdump_cb: rx packetdump cb
5531 *
5532 * This function is used to register tx data pkt, tx mgmt.
5533 * pkt and rx data pkt dump callback
5534 *
5535 * Return: None
5536 *
5537 */
5538static inline
5539void ol_register_packetdump_callback(ol_txrx_pktdump_cb ol_tx_packetdump_cb,
5540 ol_txrx_pktdump_cb ol_rx_packetdump_cb)
5541{
5542 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5543
5544 if (!pdev) {
5545 ol_txrx_err("pdev is NULL");
5546 return;
5547 }
5548
5549 pdev->ol_tx_packetdump_cb = ol_tx_packetdump_cb;
5550 pdev->ol_rx_packetdump_cb = ol_rx_packetdump_cb;
5551}
5552
5553/**
5554 * ol_deregister_packetdump_callback() - deregidters
5555 * tx data packet, tx mgmt. packet and rx data packet
5556 * dump callback handler
5557 *
5558 * This function is used to deregidter tx data pkt.,
5559 * tx mgmt. pkt and rx data pkt. dump callback
5560 *
5561 * Return: None
5562 *
5563 */
5564static inline
5565void ol_deregister_packetdump_callback(void)
5566{
5567 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5568
5569 if (!pdev) {
5570 ol_txrx_err("pdev is NULL");
5571 return;
5572 }
5573
5574 pdev->ol_tx_packetdump_cb = NULL;
5575 pdev->ol_rx_packetdump_cb = NULL;
5576}
5577
Leo Chang98726762016-10-28 11:07:18 -07005578static struct cdp_cmn_ops ol_ops_cmn = {
5579 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
5580 .txrx_vdev_attach = ol_txrx_vdev_attach,
5581 .txrx_vdev_detach = ol_txrx_vdev_detach,
5582 .txrx_pdev_attach = ol_txrx_pdev_attach,
5583 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
5584 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05305585 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07005586 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08005587 .txrx_peer_create = ol_txrx_peer_attach,
5588 .txrx_peer_setup = NULL,
5589 .txrx_peer_teardown = NULL,
5590 .txrx_peer_delete = ol_txrx_peer_detach,
Alok Kumare1977442018-11-28 17:16:03 +05305591 .txrx_peer_delete_sync = ol_txrx_peer_detach_sync,
Leo Chang98726762016-10-28 11:07:18 -07005592 .txrx_vdev_register = ol_txrx_vdev_register,
5593 .txrx_soc_detach = ol_txrx_soc_detach,
5594 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
5595 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
5596 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
chenguo2201c0a2018-11-15 18:07:41 +08005597 .txrx_get_mon_vdev_from_pdev = ol_txrx_get_mon_vdev_from_pdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08005598 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07005599 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
5600 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
Alok Kumar688eadb2019-02-14 14:44:01 +05305601 .txrx_peer_unmap_sync_cb_set = ol_txrx_peer_unmap_sync_cb_set,
Leo Chang98726762016-10-28 11:07:18 -07005602 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07005603 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005604 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
5605 .display_stats = ol_txrx_display_stats,
jitiphil377bcc12018-10-05 19:46:08 +05305606 .txrx_get_cfg = ol_txrx_get_cfg,
Leo Chang98726762016-10-28 11:07:18 -07005607 /* TODO: Add other functions */
5608};
5609
5610static struct cdp_misc_ops ol_ops_misc = {
5611 .set_ibss_vdev_heart_beat_timer =
5612 ol_txrx_set_ibss_vdev_heart_beat_timer,
5613#ifdef CONFIG_HL_SUPPORT
5614 .set_wmm_param = ol_txrx_set_wmm_param,
5615#endif /* CONFIG_HL_SUPPORT */
5616 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
5617 .bad_peer_txctl_update_threshold =
5618 ol_txrx_bad_peer_txctl_update_threshold,
5619 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
5620 .tx_non_std = ol_tx_non_std,
5621 .get_vdev_id = ol_txrx_get_vdev_id,
Alok Kumar75355aa2018-03-19 17:32:58 +05305622 .get_tx_ack_stats = ol_txrx_get_tx_ack_stats,
Leo Chang98726762016-10-28 11:07:18 -07005623 .set_wisa_mode = ol_txrx_set_wisa_mode,
Poddar, Siddarth34872782017-08-10 14:08:51 +05305624 .txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
5625 .txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305626 .txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
Leo Chang98726762016-10-28 11:07:18 -07005627#ifdef FEATURE_RUNTIME_PM
5628 .runtime_suspend = ol_txrx_runtime_suspend,
5629 .runtime_resume = ol_txrx_runtime_resume,
5630#endif /* FEATURE_RUNTIME_PM */
5631 .get_opmode = ol_txrx_get_opmode,
5632 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
5633 .update_mac_id = ol_txrx_update_mac_id,
5634 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
5635 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
5636 .pkt_log_init = htt_pkt_log_init,
Lin Bai1a73a412018-12-13 16:40:14 +08005637 .pkt_log_con_service = ol_txrx_pkt_log_con_service,
5638 .register_pktdump_cb = ol_register_packetdump_callback,
5639 .unregister_pktdump_cb = ol_deregister_packetdump_callback
Leo Chang98726762016-10-28 11:07:18 -07005640};
5641
5642static struct cdp_flowctl_ops ol_ops_flowctl = {
Leo Chang98726762016-10-28 11:07:18 -07005643 .register_pause_cb = ol_txrx_register_pause_cb,
hangtian72704802019-04-17 18:16:25 +08005644#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Leo Chang98726762016-10-28 11:07:18 -07005645 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005646 .dump_flow_pool_info = ol_tx_dump_flow_pool_info,
Sravan Kumar Kairam8433f902019-01-10 15:53:54 +05305647 .tx_desc_thresh_reached = ol_tx_desc_thresh_reached,
Leo Chang98726762016-10-28 11:07:18 -07005648#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
5649};
5650
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305651#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
Leo Chang98726762016-10-28 11:07:18 -07005652static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
Leo Chang98726762016-10-28 11:07:18 -07005653 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
5654 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
5655 .flow_control_cb = ol_txrx_flow_control_cb,
5656 .get_tx_resource = ol_txrx_get_tx_resource,
5657 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
5658 .vdev_flush = ol_txrx_vdev_flush,
5659 .vdev_pause = ol_txrx_vdev_pause,
5660 .vdev_unpause = ol_txrx_vdev_unpause
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305661}; /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
5662#elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
5663static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
5664 .register_tx_flow_control = ol_txrx_register_hl_flow_control,
5665 .vdev_flush = ol_txrx_vdev_flush,
5666 .vdev_pause = ol_txrx_vdev_pause,
Ajit Pal Singh851a7772018-05-14 16:55:09 +05305667 .vdev_unpause = ol_txrx_vdev_unpause,
Ajit Pal Singhd6c08f22018-04-25 16:55:26 +05305668 .set_vdev_os_queue_status = ol_txrx_set_vdev_os_queue_status,
5669 .set_vdev_tx_desc_limit = ol_txrx_set_vdev_tx_desc_limit
Leo Chang98726762016-10-28 11:07:18 -07005670};
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305671#else /* QCA_HL_NETDEV_FLOW_CONTROL */
5672static struct cdp_lflowctl_ops ol_ops_l_flowctl = { };
5673#endif
Leo Chang98726762016-10-28 11:07:18 -07005674
Leo Chang98726762016-10-28 11:07:18 -07005675#ifdef IPA_OFFLOAD
Yun Parkb4f591d2017-03-29 15:51:01 -07005676static struct cdp_ipa_ops ol_ops_ipa = {
Leo Chang98726762016-10-28 11:07:18 -07005677 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
5678 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
5679 .ipa_set_active = ol_txrx_ipa_uc_set_active,
5680 .ipa_op_response = ol_txrx_ipa_uc_op_response,
5681 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
5682 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
5683 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07005684 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
Yun Parkb4f591d2017-03-29 15:51:01 -07005685 .ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
5686 .ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
5687 .ipa_setup = ol_txrx_ipa_setup,
5688 .ipa_cleanup = ol_txrx_ipa_cleanup,
5689 .ipa_setup_iface = ol_txrx_ipa_setup_iface,
5690 .ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
5691 .ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
5692 .ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
5693 .ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
5694#ifdef FEATURE_METERING
Yun Park637d6482016-10-05 10:51:33 -07005695 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
5696 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Yun Parkb4f591d2017-03-29 15:51:01 -07005697#endif
Leo Chang98726762016-10-28 11:07:18 -07005698};
Yun Parkb4f591d2017-03-29 15:51:01 -07005699#endif
Leo Chang98726762016-10-28 11:07:18 -07005700
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005701#ifdef RECEIVE_OFFLOAD
5702static struct cdp_rx_offld_ops ol_rx_offld_ops = {
5703 .register_rx_offld_flush_cb = ol_register_offld_flush_cb,
5704 .deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
5705};
5706#endif
5707
Leo Chang98726762016-10-28 11:07:18 -07005708static struct cdp_bus_ops ol_ops_bus = {
5709 .bus_suspend = ol_txrx_bus_suspend,
5710 .bus_resume = ol_txrx_bus_resume
5711};
5712
Nirav Shah575282c2018-07-08 22:48:00 +05305713#ifdef WLAN_FEATURE_DSRC
Leo Chang98726762016-10-28 11:07:18 -07005714static struct cdp_ocb_ops ol_ops_ocb = {
5715 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
5716 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
5717};
Nirav Shah575282c2018-07-08 22:48:00 +05305718#endif
Leo Chang98726762016-10-28 11:07:18 -07005719
5720static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005721#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07005722 .throttle_init_period = ol_tx_throttle_init_period,
5723 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005724#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07005725};
5726
5727static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07005728 .clear_stats = ol_txrx_clear_stats,
5729 .stats = ol_txrx_stats
5730};
5731
5732static struct cdp_cfg_ops ol_ops_cfg = {
5733 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
5734 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
5735 .cfg_attach = ol_pdev_cfg_attach,
5736 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
5737 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
5738 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
5739 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
5740 .set_flow_control_parameters =
5741 ol_txrx_wrapper_set_flow_control_parameters,
5742 .set_flow_steering = ol_set_cfg_flow_steering,
Yu Wang66a250b2017-07-19 11:46:40 +08005743 .set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
jitiphilebf3a922018-11-05 14:25:00 +05305744 .set_new_htt_msg_format =
5745 ol_txrx_set_new_htt_msg_format,
Alok Kumare1977442018-11-28 17:16:03 +05305746 .set_peer_unmap_conf_support = ol_txrx_set_peer_unmap_conf_support,
5747 .get_peer_unmap_conf_support = ol_txrx_get_peer_unmap_conf_support,
Jiani Liu6d3b6a12019-05-08 15:15:06 +08005748 .set_tx_compl_tsf64 = ol_txrx_set_tx_compl_tsf64,
5749 .get_tx_compl_tsf64 = ol_txrx_get_tx_compl_tsf64,
Leo Chang98726762016-10-28 11:07:18 -07005750};
5751
5752static struct cdp_peer_ops ol_ops_peer = {
5753 .register_peer = ol_txrx_wrapper_register_peer,
5754 .clear_peer = ol_txrx_clear_peer,
Mohit Khannab7bec722017-11-10 11:43:44 -08005755 .peer_get_ref_by_addr = ol_txrx_wrapper_peer_get_ref_by_addr,
5756 .peer_release_ref = ol_txrx_wrapper_peer_release_ref,
Leo Chang98726762016-10-28 11:07:18 -07005757 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
5758 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
5759 .local_peer_id = ol_txrx_local_peer_id,
5760 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
5761 .peer_state_update = ol_txrx_wrapper_peer_state_update,
5762 .get_vdevid = ol_txrx_get_vdevid,
5763 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
5764 .register_ocb_peer = ol_txrx_register_ocb_peer,
5765 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
5766 .get_peer_state = ol_txrx_get_peer_state,
5767 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
5768 .update_ibss_add_peer_num_of_vdev =
5769 ol_txrx_update_ibss_add_peer_num_of_vdev,
5770 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
5771 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08005772#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07005773 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
5774 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005775 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
5776 .update_last_real_peer = ol_txrx_update_last_real_peer,
5777#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07005778 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
5779};
5780
5781static struct cdp_tx_delay_ops ol_ops_delay = {
5782#ifdef QCA_COMPUTE_TX_DELAY
5783 .tx_delay = ol_tx_delay,
5784 .tx_delay_hist = ol_tx_delay_hist,
5785 .tx_packet_count = ol_tx_packet_count,
5786 .tx_set_compute_interval = ol_tx_set_compute_interval
5787#endif /* QCA_COMPUTE_TX_DELAY */
5788};
5789
5790static struct cdp_pmf_ops ol_ops_pmf = {
5791 .get_pn_info = ol_txrx_get_pn_info
5792};
5793
Leo Chang98726762016-10-28 11:07:18 -07005794static struct cdp_ctrl_ops ol_ops_ctrl = {
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305795 .txrx_get_pldev = ol_get_pldev,
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005796 .txrx_wdi_event_sub = wdi_event_sub,
5797 .txrx_wdi_event_unsub = wdi_event_unsub,
Leo Chang98726762016-10-28 11:07:18 -07005798};
5799
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305800/* WINplatform specific structures */
Leo Chang98726762016-10-28 11:07:18 -07005801static struct cdp_me_ops ol_ops_me = {
5802 /* EMPTY FOR MCL */
5803};
5804
5805static struct cdp_mon_ops ol_ops_mon = {
5806 /* EMPTY FOR MCL */
5807};
5808
5809static struct cdp_host_stats_ops ol_ops_host_stats = {
5810 /* EMPTY FOR MCL */
5811};
5812
5813static struct cdp_wds_ops ol_ops_wds = {
5814 /* EMPTY FOR MCL */
5815};
5816
5817static struct cdp_raw_ops ol_ops_raw = {
5818 /* EMPTY FOR MCL */
5819};
5820
5821static struct cdp_ops ol_txrx_ops = {
5822 .cmn_drv_ops = &ol_ops_cmn,
5823 .ctrl_ops = &ol_ops_ctrl,
5824 .me_ops = &ol_ops_me,
5825 .mon_ops = &ol_ops_mon,
5826 .host_stats_ops = &ol_ops_host_stats,
5827 .wds_ops = &ol_ops_wds,
5828 .raw_ops = &ol_ops_raw,
5829 .misc_ops = &ol_ops_misc,
5830 .cfg_ops = &ol_ops_cfg,
5831 .flowctl_ops = &ol_ops_flowctl,
5832 .l_flowctl_ops = &ol_ops_l_flowctl,
Yun Parkb4f591d2017-03-29 15:51:01 -07005833#ifdef IPA_OFFLOAD
Leo Chang98726762016-10-28 11:07:18 -07005834 .ipa_ops = &ol_ops_ipa,
Yun Parkb4f591d2017-03-29 15:51:01 -07005835#endif
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005836#ifdef RECEIVE_OFFLOAD
5837 .rx_offld_ops = &ol_rx_offld_ops,
5838#endif
Leo Chang98726762016-10-28 11:07:18 -07005839 .bus_ops = &ol_ops_bus,
Nirav Shah575282c2018-07-08 22:48:00 +05305840#ifdef WLAN_FEATURE_DSRC
Leo Chang98726762016-10-28 11:07:18 -07005841 .ocb_ops = &ol_ops_ocb,
Nirav Shah575282c2018-07-08 22:48:00 +05305842#endif
Leo Chang98726762016-10-28 11:07:18 -07005843 .peer_ops = &ol_ops_peer,
5844 .throttle_ops = &ol_ops_throttle,
5845 .mob_stats_ops = &ol_ops_mob_stats,
5846 .delay_ops = &ol_ops_delay,
5847 .pmf_ops = &ol_ops_pmf
5848};
5849
Jeff Johnson02c37b42017-01-10 14:49:24 -08005850/*
5851 * Local prototype added to temporarily address warning caused by
5852 * -Wmissing-prototypes. A more correct solution, namely to expose
5853 * a prototype in an appropriate header file, will come later.
5854 */
5855struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5856 struct ol_if_ops *dp_ol_if_ops);
5857struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5858 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07005859{
5860 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005861
Nirav Shah7c8c1712018-09-10 16:01:31 +05305862 if (!soc)
Leo Chang98726762016-10-28 11:07:18 -07005863 return NULL;
Leo Chang98726762016-10-28 11:07:18 -07005864
5865 soc->ops = &ol_txrx_ops;
5866 return soc;
5867}
jitiphilebf3a922018-11-05 14:25:00 +05305868
5869bool ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t *pdev)
5870{
5871 if (!pdev) {
5872 qdf_print("%s: pdev is NULL\n", __func__);
5873 return false;
5874 }
5875 return pdev->new_htt_msg_format;
5876}
5877
5878void ol_txrx_set_new_htt_msg_format(uint8_t val)
5879{
5880 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5881
5882 if (!pdev) {
5883 qdf_print("%s: pdev is NULL\n", __func__);
5884 return;
5885 }
5886 pdev->new_htt_msg_format = val;
5887}
5888
Alok Kumare1977442018-11-28 17:16:03 +05305889bool ol_txrx_get_peer_unmap_conf_support(void)
5890{
5891 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5892
5893 if (!pdev) {
5894 qdf_print("%s: pdev is NULL\n", __func__);
5895 return false;
5896 }
5897 return pdev->enable_peer_unmap_conf_support;
5898}
5899
5900void ol_txrx_set_peer_unmap_conf_support(bool val)
5901{
5902 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5903
5904 if (!pdev) {
5905 qdf_print("%s: pdev is NULL\n", __func__);
5906 return;
5907 }
5908 pdev->enable_peer_unmap_conf_support = val;
5909}
Jiani Liu6d3b6a12019-05-08 15:15:06 +08005910
5911#ifdef WLAN_FEATURE_TSF_PLUS
5912bool ol_txrx_get_tx_compl_tsf64(void)
5913{
5914 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5915
5916 if (!pdev) {
5917 qdf_print("%s: pdev is NULL\n", __func__);
5918 return false;
5919 }
5920 return pdev->enable_tx_compl_tsf64;
5921}
5922
5923void ol_txrx_set_tx_compl_tsf64(bool val)
5924{
5925 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5926
5927 if (!pdev) {
5928 qdf_print("%s: pdev is NULL\n", __func__);
5929 return;
5930 }
5931 pdev->enable_tx_compl_tsf64 = val;
5932}
5933#else
5934bool ol_txrx_get_tx_compl_tsf64(void)
5935{
5936 return false;
5937}
5938
5939void ol_txrx_set_tx_compl_tsf64(bool val)
5940{
5941}
5942#endif