blob: f7e4c5fd6cecc990844931d9a5c7171806a9fa0f [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Sravan Kumar Kairam8433f902019-01-10 15:53:54 +05302 * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== includes ===*/
20/* header files for OS primitives */
21#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053023#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053024#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053025#include <qdf_atomic.h> /* qdf_atomic_read */
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053026#include <qdf_debugfs.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080027
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028/* header files for utilities */
29#include <cds_queue.h> /* TAILQ */
30
31/* header files for configuration API */
32#include <ol_cfg.h> /* ol_cfg_is_high_latency */
33#include <ol_if_athvar.h>
34
35/* header files for HTT API */
36#include <ol_htt_api.h>
37#include <ol_htt_tx_api.h>
38
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039/* header files for our own APIs */
40#include <ol_txrx_api.h>
41#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070042#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070043#include <ol_txrx_ctrl_api.h>
44#include <cdp_txrx_stats.h>
45#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046/* header files for our internal definitions */
47#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
48#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049#include <ol_tx.h> /* ol_tx_ll */
50#include <ol_rx.h> /* ol_rx_deliver */
51#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
52#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
53#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
54#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
55#include <ol_rx_reorder.h>
56#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
57#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
58#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053059#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080060#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070061#include <ol_txrx_types.h>
hangtian72704802019-04-17 18:16:25 +080062#include <ol_cfg.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080063#include <cdp_txrx_flow_ctrl_legacy.h>
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +053064#include <cdp_txrx_cmn_reg.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070065#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080066#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070067#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080068#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053069#include "hif.h"
wadesong9e95bd92017-04-14 14:28:40 +080070#include "hif_main.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070071#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053072#ifndef REMOVE_PKT_LOG
73#include "pktlog_ac.h"
74#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070075#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053076#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053077#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080078#include <cdp_txrx_handle.h>
Poddar, Siddarthdb568162017-07-27 18:16:38 +053079#include "wlan_qct_sys.h"
80
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080081#include <htt_internal.h>
Yun Parkb4f591d2017-03-29 15:51:01 -070082#include <ol_txrx_ipa.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070083#include "wlan_roam_debug.h"
jitiphil377bcc12018-10-05 19:46:08 +053084#include "cfg_ucfg_api.h"
85
Yun Parkb4f591d2017-03-29 15:51:01 -070086
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053087#define DPT_DEBUGFS_PERMS (QDF_FILE_USR_READ | \
88 QDF_FILE_USR_WRITE | \
89 QDF_FILE_GRP_READ | \
90 QDF_FILE_OTH_READ)
91
jitiphilecbee582018-06-06 14:29:40 +053092#define DPT_DEBUGFS_NUMBER_BASE 10
93/**
94 * enum dpt_set_param_debugfs - dpt set params
95 * @DPT_SET_PARAM_PROTO_BITMAP : set proto bitmap
96 * @DPT_SET_PARAM_NR_RECORDS: set num of records
97 * @DPT_SET_PARAM_VERBOSITY: set verbosity
98 */
99enum dpt_set_param_debugfs {
100 DPT_SET_PARAM_PROTO_BITMAP = 1,
101 DPT_SET_PARAM_NR_RECORDS = 2,
102 DPT_SET_PARAM_VERBOSITY = 3,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530103 DPT_SET_PARAM_NUM_RECORDS_TO_DUMP = 4,
jitiphilecbee582018-06-06 14:29:40 +0530104 DPT_SET_PARAM_MAX,
105};
106
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800107QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -0700108 uint8_t *peer_mac,
109 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800110static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
111 bool val);
112int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700113extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800114ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700115 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700116
Leo Chang98726762016-10-28 11:07:18 -0700117extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
118 uint64_t **last_pn, uint32_t **rmf_pn_replays);
119
Mohit Khanna78cb6bb2017-03-31 17:05:14 -0700120/* thresh for peer's cached buf queue beyond which the elements are dropped */
121#define OL_TXRX_CACHED_BUFQ_THRESH 128
122
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530123/**
124 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
125 * fw is compatible for marking first packet after wow wakeup
126 * @value: 1 for enabled/ 0 for disabled
127 *
128 * Return: None
129 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800130static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530131{
132 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
133
134 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +0530135 ol_txrx_err("pdev is NULL");
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530136 return;
137 }
138
139 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
140}
141
Nirav Shah22bf44d2015-12-10 15:39:48 +0530142/**
143 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
144 * wmi is enabled or not.
145 * @value: 1 for enabled/ 0 for disable
146 *
147 * Return: None
148 */
149void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
150{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +0530151 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
152 ol_txrx_pdev_handle pdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700153
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +0530154 if (qdf_unlikely(!soc)) {
155 ol_txrx_err("soc is NULL");
Nirav Shah22bf44d2015-12-10 15:39:48 +0530156 return;
157 }
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +0530158
159 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
160 if (!pdev) {
161 ol_txrx_err("pdev is NULL");
162 return;
163 }
164
Nirav Shah22bf44d2015-12-10 15:39:48 +0530165 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530166}
167
168/**
169 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
170 *
171 * Return: is_mgmt_over_wmi_enabled
172 */
173uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
174{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +0530175 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
176 ol_txrx_pdev_handle pdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700177
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +0530178 if (qdf_unlikely(!soc)) {
179 ol_txrx_err("soc is NULL");
Nirav Shah22bf44d2015-12-10 15:39:48 +0530180 return 0;
181 }
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +0530182
183 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
184 if (!pdev) {
185 ol_txrx_err("pdev is NULL");
186 return 0;
187 }
188
Nirav Shah22bf44d2015-12-10 15:39:48 +0530189 return pdev->is_mgmt_over_wmi_enabled;
190}
191
192
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800193#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800194static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800195ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
196 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800197{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800198 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
199 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200 struct ol_txrx_peer_t *peer;
201
202 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
203 if (!peer)
204 return NULL;
205 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800206 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800207 return peer;
208}
209
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800210static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800211{
Leo Chang98726762016-10-28 11:07:18 -0700212 struct ol_txrx_peer_t *peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700213
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800214 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530215 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530216 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530217 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800218 }
219
220 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530221 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800222}
223
Vevek Venkatesanb8e96622019-10-14 18:40:32 +0530224ol_txrx_vdev_handle
Rakshith Suresh Patkar6a8802f2019-07-26 14:08:09 +0530225ol_txrx_get_vdev_by_peer_addr(struct cdp_pdev *ppdev,
226 struct qdf_mac_addr peer_addr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800227{
Yun Park0dad1002017-07-14 14:57:01 -0700228 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800229 struct ol_txrx_peer_t *peer = NULL;
Yun Park5dd9a122018-01-12 15:00:12 -0800230 ol_txrx_vdev_handle vdev;
Rakshith Suresh Patkar6a8802f2019-07-26 14:08:09 +0530231 /* peer_id to be removed PEER_ID_CLEANUP */
232 uint8_t peer_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800233
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800234 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530235 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Rakshith Suresh Patkar6a8802f2019-07-26 14:08:09 +0530236 "PDEV not found for peer_addr: " QDF_MAC_ADDR_STR,
237 QDF_MAC_ADDR_ARRAY(peer_addr.bytes));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800238 return NULL;
239 }
240
Rakshith Suresh Patkar6a8802f2019-07-26 14:08:09 +0530241 peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr.bytes, &peer_id,
242 PEER_DEBUG_ID_OL_INTERNAL);
243
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800244 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530245 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Rakshith Suresh Patkar6a8802f2019-07-26 14:08:09 +0530246 "PDEV not found for peer_addr:" QDF_MAC_ADDR_STR,
247 QDF_MAC_ADDR_ARRAY(peer_addr.bytes));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800248 return NULL;
249 }
250
Yun Park5dd9a122018-01-12 15:00:12 -0800251 vdev = peer->vdev;
252 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
253
Vevek Venkatesanb8e96622019-10-14 18:40:32 +0530254 return vdev;
255}
256
257/**
258 * ol_txrx_wrapper_get_vdev_by_peer_addr() - Get vdev handle by peer mac address
259 * @ppdev - data path device instance
260 * @peer_addr - peer mac address
261 *
262 * Get virtual interface handle by local peer mac address
263 *
264 * Return: Virtual interface instance handle
265 * NULL in case cannot find
266 */
267static struct cdp_vdev *
268ol_txrx_wrapper_get_vdev_by_peer_addr(struct cdp_pdev *ppdev,
269 struct qdf_mac_addr peer_addr)
270{
271 return (struct cdp_vdev *)ol_txrx_get_vdev_by_peer_addr(ppdev,
272 peer_addr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800273}
274
Mohit Khannababadb82017-02-21 18:54:19 -0800275/**
276 * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
277 * @ppdev: pointer of type cdp_pdev
278 * @peer_addr: peer mac addr
279 * @peer_id: pointer to fill in the value of peer->local_id for caller
280 *
281 * This function finds a peer with given mac address and returns its peer_id.
282 * Note that this function does not increment the peer->ref_cnt.
283 * This means that the peer may be deleted in some other parallel context after
284 * its been found.
285 *
286 * Return: peer handle if peer is found, NULL if peer is not found.
287 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800288void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Yun Park0dad1002017-07-14 14:57:01 -0700289 uint8_t *peer_addr,
290 uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800291{
292 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800293 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800294
Mohit Khannab7bec722017-11-10 11:43:44 -0800295 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
296 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800297 if (!peer)
298 return NULL;
299 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800300 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khannababadb82017-02-21 18:54:19 -0800301 return peer;
302}
303
304/**
Mohit Khannab7bec722017-11-10 11:43:44 -0800305 * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
Mohit Khannababadb82017-02-21 18:54:19 -0800306 * @pdev: pointer of type ol_txrx_pdev_handle
307 * @peer_addr: peer mac addr
308 * @peer_id: pointer to fill in the value of peer->local_id for caller
309 *
310 * This function finds the peer with given mac address and returns its peer_id.
311 * Note that this function increments the peer->ref_cnt.
312 * This makes sure that peer will be valid. This also means the caller needs to
Mohit Khannab7bec722017-11-10 11:43:44 -0800313 * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
Mohit Khannababadb82017-02-21 18:54:19 -0800314 * reference.
315 * Sample usage:
316 * {
317 * //the API call below increments the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800318 * peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800319 *
320 * // Once peer usage is done
321 *
322 * //the API call below decrements the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800323 * ol_txrx_peer_release_ref(peer, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800324 * }
325 *
326 * Return: peer handle if the peer is found, NULL if peer is not found.
327 */
Mohit Khannab7bec722017-11-10 11:43:44 -0800328ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
329 u8 *peer_addr,
330 u8 *peer_id,
331 enum peer_debug_id_type dbg_id)
Mohit Khannababadb82017-02-21 18:54:19 -0800332{
333 struct ol_txrx_peer_t *peer;
334
Mohit Khannab7bec722017-11-10 11:43:44 -0800335 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
336 dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800337 if (!peer)
338 return NULL;
339 *peer_id = peer->local_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800340 return peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800341}
342
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800343static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800344{
Leo Chang98726762016-10-28 11:07:18 -0700345 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700346
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800347 return peer->local_id;
348}
349
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700350/**
351 * @brief Find a txrx peer handle from a peer's local ID
352 * @details
353 * The control SW typically uses the txrx peer handle to refer to the peer.
354 * In unusual circumstances, if it is infeasible for the control SW maintain
355 * the txrx peer handle but it can maintain a small integer local peer ID,
356 * this function allows the peer handled to be retrieved, based on the local
357 * peer ID.
358 *
359 * @param pdev - the data physical device object
360 * @param local_peer_id - the ID txrx assigned locally to the peer in question
361 * @return handle to the txrx peer object
362 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800363ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800364ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800365 uint8_t local_peer_id)
366{
367 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800368 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700369
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800370 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
371 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
372 return NULL;
373 }
374
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530375 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530377 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800378 return peer;
379}
380
Jingxiang Ge3badb982018-01-02 17:39:01 +0800381/**
382 * @brief Find a txrx peer handle from a peer's local ID
383 * @param pdev - the data physical device object
384 * @param local_peer_id - the ID txrx assigned locally to the peer in question
385 * @dbg_id - debug_id to track caller
386 * @return handle to the txrx peer object
387 * @details
388 * The control SW typically uses the txrx peer handle to refer to the peer.
389 * In unusual circumstances, if it is infeasible for the control SW maintain
390 * the txrx peer handle but it can maintain a small integer local peer ID,
391 * this function allows the peer handled to be retrieved, based on the local
392 * peer ID.
393 *
394 * Note that this function increments the peer->ref_cnt.
395 * This makes sure that peer will be valid. This also means the caller needs to
396 * call the corresponding API -
397 * ol_txrx_peer_release_ref
398 *
399 * reference.
400 * Sample usage:
401 * {
402 * //the API call below increments the peer->ref_cnt
403 * peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
404 *
405 * // Once peer usage is done
406 *
407 * //the API call below decrements the peer->ref_cnt
408 * ol_txrx_peer_release_ref(peer, dbg_id);
409 * }
410 *
411 * Return: peer handle if the peer is found, NULL if peer is not found.
412 */
413ol_txrx_peer_handle
414ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
415 uint8_t local_peer_id,
416 enum peer_debug_id_type dbg_id)
417{
418 struct ol_txrx_peer_t *peer = NULL;
419 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
420
421 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
422 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
423 return NULL;
424 }
425
426 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
427 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
428 peer = pdev->local_peer_ids.map[local_peer_id];
429 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
430 if (peer && peer->valid)
431 ol_txrx_peer_get_ref(peer, dbg_id);
Jingxiang Ge9f297062018-01-24 13:31:31 +0800432 else
433 peer = NULL;
Jingxiang Ge3badb982018-01-02 17:39:01 +0800434 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
435
436 return peer;
437}
438
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800439static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
440{
441 int i;
442
443 /* point the freelist to the first ID */
444 pdev->local_peer_ids.freelist = 0;
445
446 /* link each ID to the next one */
447 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
448 pdev->local_peer_ids.pool[i] = i + 1;
449 pdev->local_peer_ids.map[i] = NULL;
450 }
451
452 /* link the last ID to itself, to mark the end of the list */
453 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
454 pdev->local_peer_ids.pool[i] = i;
455
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530456 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800457}
458
459static void
460ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
461 struct ol_txrx_peer_t *peer)
462{
463 int i;
464
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530465 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800466 i = pdev->local_peer_ids.freelist;
467 if (pdev->local_peer_ids.pool[i] == i) {
468 /* the list is empty, except for the list-end marker */
469 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
470 } else {
471 /* take the head ID and advance the freelist */
472 peer->local_id = i;
473 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
474 pdev->local_peer_ids.map[i] = peer;
475 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530476 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477}
478
479static void
480ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
481 struct ol_txrx_peer_t *peer)
482{
483 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700484
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
486 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
487 return;
488 }
489 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530490 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800491 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
492 pdev->local_peer_ids.freelist = i;
493 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530494 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800495}
496
497static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
498{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530499 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800500}
501
502#else
503#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
504#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
505#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
506#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
507#endif
508
Nirav Shahd21a2e32018-04-20 16:34:43 +0530509#if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800510/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530511 * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
512 * @file: file to read
513 * @arg: pdev object
514 *
515 * Return: QDF_STATUS
516 */
517static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
518 void *arg)
519{
520 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
521 uint32_t i = 0;
522 QDF_STATUS status = QDF_STATUS_SUCCESS;
523
524 if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
525 return QDF_STATUS_E_INVAL;
526 else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
527 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
528 return QDF_STATUS_SUCCESS;
529 }
530
531 i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
532 status = qdf_dpt_dump_stats_debugfs(file, i);
533 if (status == QDF_STATUS_E_FAILURE)
534 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
535 else if (status == QDF_STATUS_SUCCESS)
536 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
537
538 return status;
539}
540
541/**
jitiphilecbee582018-06-06 14:29:40 +0530542 * ol_txrx_conv_str_to_int_debugfs() - convert string to int
543 * @buf: buffer containing string
544 * @len: buffer len
545 * @proto_bitmap: defines the protocol to be tracked
546 * @nr_records: defines the nth packet which is traced
547 * @verbosity: defines the verbosity level
548 *
549 * This function expects char buffer to be null terminated.
550 * Otherwise results could be unexpected values.
551 *
552 * Return: 0 on success
553 */
554static int ol_txrx_conv_str_to_int_debugfs(char *buf, qdf_size_t len,
555 int *proto_bitmap,
556 int *nr_records,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530557 int *verbosity,
558 int *num_records_to_dump)
jitiphilecbee582018-06-06 14:29:40 +0530559{
560 int num_value = DPT_SET_PARAM_PROTO_BITMAP;
561 int ret, param_value = 0;
562 char *buf_param = buf;
563 int i;
564
565 for (i = 1; i < DPT_SET_PARAM_MAX; i++) {
566 /* Loop till you reach space as kstrtoint operates till
567 * null character. Replace space with null character
568 * to read each value.
569 * terminate the loop either at null terminated char or
570 * len is 0.
571 */
572 while (*buf && len) {
573 if (*buf == ' ') {
574 *buf = '\0';
575 buf++;
576 len--;
577 break;
578 }
579 buf++;
580 len--;
581 }
582 /* get the parameter */
583 ret = qdf_kstrtoint(buf_param,
584 DPT_DEBUGFS_NUMBER_BASE,
585 &param_value);
586 if (ret) {
587 QDF_TRACE(QDF_MODULE_ID_TXRX,
588 QDF_TRACE_LEVEL_ERROR,
589 "%s: Error while parsing buffer. ret %d",
590 __func__, ret);
591 return ret;
592 }
593 switch (num_value) {
594 case DPT_SET_PARAM_PROTO_BITMAP:
595 *proto_bitmap = param_value;
596 break;
597 case DPT_SET_PARAM_NR_RECORDS:
598 *nr_records = param_value;
599 break;
600 case DPT_SET_PARAM_VERBOSITY:
601 *verbosity = param_value;
602 break;
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530603 case DPT_SET_PARAM_NUM_RECORDS_TO_DUMP:
604 if (param_value > MAX_QDF_DP_TRACE_RECORDS)
605 param_value = MAX_QDF_DP_TRACE_RECORDS;
606 *num_records_to_dump = param_value;
607 break;
jitiphilecbee582018-06-06 14:29:40 +0530608 default:
609 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530610 "%s %d: :Set command needs exactly 4 arguments in format <proto_bitmap> <number of record> <Verbosity> <number of records to dump>.",
jitiphilecbee582018-06-06 14:29:40 +0530611 __func__, __LINE__);
612 break;
613 }
614 num_value++;
615 /*buf_param should now point to the next param value. */
616 buf_param = buf;
617 }
618
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530619 /* buf is not yet NULL implies more than 4 params are passed. */
jitiphilecbee582018-06-06 14:29:40 +0530620 if (*buf) {
621 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530622 "%s %d: :Set command needs exactly 4 arguments in format <proto_bitmap> <number of record> <Verbosity> <number of records to dump>.",
jitiphilecbee582018-06-06 14:29:40 +0530623 __func__, __LINE__);
624 return -EINVAL;
625 }
626 return 0;
627}
628
629/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530630 * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
631 * @priv: pdev object
632 * @buf: buff to get value for dpt parameters
633 * @len: buf length
634 *
635 * Return: QDF_STATUS
636 */
637static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
638 const char *buf,
639 qdf_size_t len)
640{
jitiphilecbee582018-06-06 14:29:40 +0530641 int ret;
642 int proto_bitmap = 0;
643 int nr_records = 0;
644 int verbosity = 0;
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530645 int num_records_to_dump = 0;
jitiphilecbee582018-06-06 14:29:40 +0530646 char *buf1 = NULL;
647
648 if (!buf || !len) {
649 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
650 "%s: null buffer or len. len %u",
651 __func__, (uint8_t)len);
652 return QDF_STATUS_E_FAULT;
653 }
654
655 buf1 = (char *)qdf_mem_malloc(len);
Nirav Shah7c8c1712018-09-10 16:01:31 +0530656 if (!buf1)
jitiphilecbee582018-06-06 14:29:40 +0530657 return QDF_STATUS_E_FAULT;
Nirav Shah7c8c1712018-09-10 16:01:31 +0530658
jitiphilecbee582018-06-06 14:29:40 +0530659 qdf_mem_copy(buf1, buf, len);
660 ret = ol_txrx_conv_str_to_int_debugfs(buf1, len, &proto_bitmap,
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530661 &nr_records, &verbosity,
662 &num_records_to_dump);
jitiphilecbee582018-06-06 14:29:40 +0530663 if (ret) {
664 qdf_mem_free(buf1);
665 return QDF_STATUS_E_INVAL;
666 }
667
Rakshith Suresh Patkar9c46af12018-12-06 20:48:05 +0530668 qdf_dpt_set_value_debugfs(proto_bitmap, nr_records, verbosity,
669 num_records_to_dump);
jitiphilecbee582018-06-06 14:29:40 +0530670 qdf_mem_free(buf1);
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530671 return QDF_STATUS_SUCCESS;
672}
673
674static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
675{
676 pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
677 pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
678 pdev->dpt_debugfs_fops.priv = pdev;
679
680 pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
681
682 if (!pdev->dpt_stats_log_dir) {
683 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
684 "%s: error while creating debugfs dir for %s",
685 __func__, "dpt_stats");
686 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
687 return -EBUSY;
688 }
689
690 if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
691 pdev->dpt_stats_log_dir,
692 &pdev->dpt_debugfs_fops)) {
693 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
694 "%s: debug Entry creation failed!",
695 __func__);
696 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
697 return -EBUSY;
698 }
699
700 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
701 return 0;
702}
703
Nirav Shahd21a2e32018-04-20 16:34:43 +0530704static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
705{
706 qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
707}
708#else
709static inline int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
710{
711 return 0;
712}
713
714static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
715{
716}
717#endif
718
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530719/**
Dhanashri Atre12a08392016-02-17 13:10:34 -0800720 * ol_txrx_pdev_attach() - allocate txrx pdev
Rakesh Pillaica99b832019-06-24 15:05:13 +0530721 * @soc: soc handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800722 * @ctrl_pdev: cfg pdev
723 * @htc_pdev: HTC pdev
724 * @osdev: os dev
Rakesh Pillaica99b832019-06-24 15:05:13 +0530725 * @pdev_id: pdev identifier for pdev attach
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800726 *
727 * Return: txrx pdev handle
728 * NULL for failure
729 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800730static struct cdp_pdev *
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530731ol_txrx_pdev_attach(ol_txrx_soc_handle soc,
732 struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700733 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800734{
Rakesh Pillaica99b832019-06-24 15:05:13 +0530735 struct ol_txrx_soc_t *ol_soc = cdp_soc_t_to_ol_txrx_soc_t(soc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800736 struct ol_txrx_pdev_t *pdev;
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530737 struct cdp_cfg *cfg_pdev = (struct cdp_cfg *)ctrl_pdev;
hqufd227fe2017-06-26 17:01:14 +0800738 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800739
Rakesh Pillaica99b832019-06-24 15:05:13 +0530740 if (pdev_id == OL_TXRX_INVALID_PDEV_ID)
741 return NULL;
742
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530743 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800744 if (!pdev)
745 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800746
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530747 /* init LL/HL cfg here */
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530748 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(cfg_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530749 /*
750 * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
751 * enabled or not.
752 */
753 pdev->cfg.credit_update_enabled =
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530754 ol_cfg_is_credit_update_enabled(cfg_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530755
756 /* Explicitly request TX Completions from FW */
757 pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() ||
758 cds_is_packet_log_enabled();
759
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530760 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(cfg_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761
762 /* store provided params */
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530763 pdev->ctrl_pdev = cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800764 pdev->osdev = osdev;
Rakesh Pillaica99b832019-06-24 15:05:13 +0530765 pdev->id = pdev_id;
766 pdev->soc = ol_soc;
767 ol_soc->pdev_list[pdev_id] = pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800768
769 for (i = 0; i < htt_num_sec_types; i++)
770 pdev->sec_types[i] = (enum ol_sec_type)i;
771
772 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530773 ol_txrx_tso_stats_init(pdev);
jitiphil335d2412018-06-07 22:49:24 +0530774 ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800775
776 TAILQ_INIT(&pdev->vdev_list);
777
tfyu9fcabd72017-09-26 17:46:48 +0800778 TAILQ_INIT(&pdev->req_list);
779 pdev->req_list_depth = 0;
780 qdf_spinlock_create(&pdev->req_list_spinlock);
Ajit Pal Singh8184e932018-07-25 13:54:13 +0530781 qdf_spinlock_create(&pdev->tx_mutex);
tfyu9fcabd72017-09-26 17:46:48 +0800782
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800783 /* do initial set up of the peer ID -> peer object lookup map */
784 if (ol_txrx_peer_find_attach(pdev))
785 goto fail1;
786
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530787 /* initialize the counter of the target's tx buffer availability */
788 qdf_atomic_init(&pdev->target_tx_credit);
789 qdf_atomic_init(&pdev->orig_target_tx_credit);
Visweswara Tanuku2e839e52019-06-11 10:16:30 +0530790 qdf_atomic_init(&pdev->pad_reserve_tx_credit);
791 qdf_atomic_add(1, &pdev->pad_reserve_tx_credit);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530792
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530793 if (ol_cfg_is_high_latency(cfg_pdev)) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530794 qdf_spinlock_create(&pdev->tx_queue_spinlock);
795 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
Jeff Johnson6795c3a2019-03-18 13:43:04 -0700796 if (!pdev->tx_sched.scheduler)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530797 goto fail2;
798 }
799 ol_txrx_pdev_txq_log_init(pdev);
800 ol_txrx_pdev_grp_stats_init(pdev);
801
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800802 pdev->htt_pdev =
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530803 htt_pdev_alloc(pdev, cfg_pdev, htc_pdev, osdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800804 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530805 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800806
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530807 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
808 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +0800809
810 /*
811 * Init the tid --> category table.
812 * Regular tids (0-15) map to their AC.
813 * Extension tids get their own categories.
814 */
815 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
816 int ac = TXRX_TID_TO_WMM_AC(tid);
817
818 pdev->tid_to_ac[tid] = ac;
819 }
820 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
821 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
822 pdev->tid_to_ac[OL_TX_MGMT_TID] =
823 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
824 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
825 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
826 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
827 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
828
Alok Kumar604b0332019-01-24 17:49:25 +0530829 if (ol_cfg_is_flow_steering_enabled(pdev->ctrl_pdev))
830 pdev->peer_id_unmap_ref_cnt =
831 TXRX_RFS_ENABLE_PEER_ID_UNMAP_COUNT;
832 else
833 pdev->peer_id_unmap_ref_cnt =
834 TXRX_RFS_DISABLE_PEER_ID_UNMAP_COUNT;
835
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530836 ol_txrx_debugfs_init(pdev);
837
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800838 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800839
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530840fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800841 ol_txrx_peer_find_detach(pdev);
842
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530843fail2:
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530844 if (ol_cfg_is_high_latency(cfg_pdev))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530845 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
846
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800847fail1:
Ajit Pal Singh8184e932018-07-25 13:54:13 +0530848 qdf_spinlock_destroy(&pdev->tx_mutex);
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530849 ol_txrx_tso_stats_deinit(pdev);
jitiphil335d2412018-06-07 22:49:24 +0530850 ol_txrx_fw_stats_desc_pool_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530851 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800852
853fail0:
854 return NULL;
855}
856
Komal Seelamc4b28632016-02-03 15:02:18 +0530857#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
858/**
859 * htt_pkt_log_init() - API to initialize packet log
860 * @handle: pdev handle
861 * @scn: HIF context
862 *
863 * Return: void
864 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800865void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +0530866{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800867 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700868
Komal Seelamc4b28632016-02-03 15:02:18 +0530869 if (handle->pkt_log_init)
870 return;
871
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530872 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -0700873 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -0700874 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -0800875 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Komal Seelamc4b28632016-02-03 15:02:18 +0530876 if (pktlogmod_init(scn))
Nirav Shah7c8c1712018-09-10 16:01:31 +0530877 qdf_print(" pktlogmod_init failed");
Komal Seelamc4b28632016-02-03 15:02:18 +0530878 else
879 handle->pkt_log_init = true;
880 }
881}
882
883/**
884 * htt_pktlogmod_exit() - API to cleanup pktlog info
885 * @handle: Pdev handle
886 * @scn: HIF Context
887 *
888 * Return: void
889 */
Houston Hoffman8c485042017-02-08 13:40:21 -0800890static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +0530891{
Houston Hoffman8c485042017-02-08 13:40:21 -0800892 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -0700893 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +0530894 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -0800895 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +0530896 handle->pkt_log_init = false;
897 }
898}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800899
Komal Seelamc4b28632016-02-03 15:02:18 +0530900#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800901void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -0800902static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +0530903#endif
904
hangtianb9c91362019-06-07 10:39:38 +0800905#ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
906/**
907 * ol_txrx_pdev_set_threshold() - set pdev pool stop/start threshold
908 * @pdev: txrx pdev
909 *
910 * Return: void
911 */
912static void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
913{
914 uint32_t stop_threshold;
915 uint32_t start_threshold;
916 uint16_t desc_pool_size = pdev->tx_desc.pool_size;
917
918 stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
919 start_threshold = stop_threshold +
920 ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
921 pdev->tx_desc.start_th = (start_threshold * desc_pool_size) / 100;
922 pdev->tx_desc.stop_th = (stop_threshold * desc_pool_size) / 100;
923 pdev->tx_desc.stop_priority_th =
924 (TX_PRIORITY_TH * pdev->tx_desc.stop_th) / 100;
925 if (pdev->tx_desc.stop_priority_th >= MAX_TSO_SEGMENT_DESC)
926 pdev->tx_desc.stop_priority_th -= MAX_TSO_SEGMENT_DESC;
927
928 pdev->tx_desc.start_priority_th =
929 (TX_PRIORITY_TH * pdev->tx_desc.start_th) / 100;
930 if (pdev->tx_desc.start_priority_th >= MAX_TSO_SEGMENT_DESC)
931 pdev->tx_desc.start_priority_th -= MAX_TSO_SEGMENT_DESC;
932 pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
933}
934#else
935static inline void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
936{
937}
938#endif
939
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800940/**
Dhanashri Atre12a08392016-02-17 13:10:34 -0800941 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800942 * @pdev: txrx pdev
943 *
944 * Return: 0 for success
945 */
946int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800947ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800948{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800949 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -0700950 uint16_t i;
951 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800952 int ret = 0;
953 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530954 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800955
Leo Chang376398b2015-10-23 14:19:02 -0700956 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
957 union ol_tx_desc_list_elem_t *c_element;
958 unsigned int sig_bit;
959 uint16_t desc_per_page;
960
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800961 if (!osc) {
962 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -0700963 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800964 }
965
966 /*
967 * For LL, limit the number of host's tx descriptors to match
968 * the number of target FW tx descriptors.
969 * This simplifies the FW, by ensuring the host will never
970 * download more tx descriptors than the target has space for.
971 * The FW will drop/free low-priority tx descriptors when it
972 * starts to run low, so that in theory the host should never
973 * run out of tx descriptors.
974 */
975
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800976 /*
977 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530978 * HL - wait for a HTT target credit initialization
979 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800980 */
Nirav Shah52d85aa2018-04-26 14:03:00 +0530981 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
982 ol_tx_init_pdev(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800983
Nirav Shah76291962016-04-25 10:50:37 +0530984 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
985
Nirav Shah5ff1fd02018-03-11 14:55:53 +0530986 ol_tx_setup_fastpath_ce_handles(osc, pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800987
Rakshith Suresh Patkar0f6375c2018-12-04 20:59:07 +0530988 if ((ol_txrx_get_new_htt_msg_format(pdev)))
989 ol_set_cfg_new_htt_format(pdev->ctrl_pdev, true);
990 else
991 ol_set_cfg_new_htt_format(pdev->ctrl_pdev, false);
992
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800993 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
994 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +0530995 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800996
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800997 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -0800998 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
999 ret = htt_ipa_uc_attach(pdev->htt_pdev);
1000 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -07001001 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001002 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001003
Leo Chang376398b2015-10-23 14:19:02 -07001004 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301005 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301006 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -07001007 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1008 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
Jeff Johnson6795c3a2019-03-18 13:43:04 -07001009 (!pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301010 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001011 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -08001012 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001013 goto page_alloc_fail;
1014 }
1015 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1016 pdev->tx_desc.offset_filter = desc_per_page - 1;
1017 /* Calculate page divider to find page number */
1018 sig_bit = 0;
1019 while (desc_per_page) {
1020 sig_bit++;
1021 desc_per_page = desc_per_page >> 1;
1022 }
1023 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -08001024 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -07001025 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1026 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1027 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1028 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001029
1030 /*
1031 * Each SW tx desc (used only within the tx datapath SW) has a
1032 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1033 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1034 * desc now, to avoid doing it during time-critical transmit.
1035 */
1036 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -07001037 pdev->tx_desc.freelist =
1038 (union ol_tx_desc_list_elem_t *)
1039 (*pdev->tx_desc.desc_pages.cacheable_pages);
1040 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001041 for (i = 0; i < desc_pool_size; i++) {
1042 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001043 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301044 qdf_dma_addr_t frag_paddr = 0;
1045 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001046
Leo Chang376398b2015-10-23 14:19:02 -07001047 if (i == (desc_pool_size - 1))
1048 c_element->next = NULL;
1049 else
1050 c_element->next = (union ol_tx_desc_list_elem_t *)
1051 ol_tx_desc_find(pdev, i + 1);
1052
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001053 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001054 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301055 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001056 "%s: failed to alloc HTT tx desc (%d of %d)",
1057 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001058 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -08001059 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001060 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001061 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001062
Leo Chang376398b2015-10-23 14:19:02 -07001063 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001064 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001065 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001066 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001067 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301068 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001069 "%s: failed to alloc HTT frag dsc (%d/%d)",
1070 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001071 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001072 fail_idx = i;
1073 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001074 }
Leo Chang376398b2015-10-23 14:19:02 -07001075 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -07001076 /*
1077 * Initialize the first 6 words (TSO flags)
1078 * of the frag descriptor
1079 */
Leo Chang376398b2015-10-23 14:19:02 -07001080 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1081 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001082 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001083 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001084#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001085 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001086#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001087 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001088 0xffffffff;
1089#endif
1090#endif
Leo Chang376398b2015-10-23 14:19:02 -07001091 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301092 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001093 c_element = c_element->next;
1094 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001095 }
1096
1097 /* link SW tx descs into a freelist */
1098 pdev->tx_desc.num_free = desc_pool_size;
Nirav Shah7c8c1712018-09-10 16:01:31 +05301099 ol_txrx_dbg("first tx_desc:0x%pK Last tx desc:0x%pK",
1100 (uint32_t *)pdev->tx_desc.freelist,
1101 (uint32_t *)(pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001102
hangtianb9c91362019-06-07 10:39:38 +08001103 ol_txrx_pdev_set_threshold(pdev);
hangtian72704802019-04-17 18:16:25 +08001104
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001105 /* check what format of frames are expected to be delivered by the OS */
1106 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1107 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1108 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1109 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1110 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1111 pdev->htt_pkt_type = htt_pkt_type_eth2;
1112 else
1113 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1114 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001116 "%s Invalid standard frame type: %d",
1117 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001118 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001119 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001120 }
1121
1122 /* setup the global rx defrag waitlist */
1123 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1124
1125 /* configure where defrag timeout and duplicate detection is handled */
1126 pdev->rx.flags.defrag_timeout_check =
1127 pdev->rx.flags.dup_check =
1128 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1129
1130#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1131 /* Need to revisit this part. Currently,hardcode to riva's caps */
1132 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1133 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1134 /*
1135 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1136 * header for non-first subframe of A-MSDU.
1137 */
1138 pdev->sw_subfrm_hdr_recovery_enable = 1;
1139 /*
1140 * The Riva HW doesn't have the capability to set Protected Frame bit
1141 * in the MAC header for encrypted data frame.
1142 */
1143 pdev->sw_pf_proc_enable = 1;
1144
1145 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001146 /*
1147 * sw llc process is only needed in
1148 * 802.3 to 802.11 transform case
1149 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001150 pdev->sw_tx_llc_proc_enable = 1;
1151 pdev->sw_rx_llc_proc_enable = 1;
1152 } else {
1153 pdev->sw_tx_llc_proc_enable = 0;
1154 pdev->sw_rx_llc_proc_enable = 0;
1155 }
1156
1157 switch (pdev->frame_format) {
1158 case wlan_frm_fmt_raw:
1159 pdev->sw_tx_encap =
1160 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1161 ? 0 : 1;
1162 pdev->sw_rx_decap =
1163 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1164 ? 0 : 1;
1165 break;
1166 case wlan_frm_fmt_native_wifi:
1167 pdev->sw_tx_encap =
1168 pdev->
1169 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1170 ? 0 : 1;
1171 pdev->sw_rx_decap =
1172 pdev->
1173 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1174 ? 0 : 1;
1175 break;
1176 case wlan_frm_fmt_802_3:
1177 pdev->sw_tx_encap =
1178 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1179 ? 0 : 1;
1180 pdev->sw_rx_decap =
1181 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1182 ? 0 : 1;
1183 break;
1184 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301185 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001186 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1187 pdev->frame_format,
1188 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001189 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001190 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001191 }
1192#endif
1193
1194 /*
1195 * Determine what rx processing steps are done within the host.
1196 * Possibilities:
1197 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1198 * (This is unlikely; even if the target is doing rx->tx forwarding,
1199 * the host should be doing rx->tx forwarding too, as a back up for
1200 * the target's rx->tx forwarding, in case the target runs short on
1201 * memory, and can't store rx->tx frames that are waiting for
1202 * missing prior rx frames to arrive.)
1203 * 2. Just rx -> tx forwarding.
1204 * This is the typical configuration for HL, and a likely
1205 * configuration for LL STA or small APs (e.g. retail APs).
1206 * 3. Both PN check and rx -> tx forwarding.
1207 * This is the typical configuration for large LL APs.
1208 * Host-side PN check without rx->tx forwarding is not a valid
1209 * configuration, since the PN check needs to be done prior to
1210 * the rx->tx forwarding.
1211 */
1212 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001213 /*
1214 * PN check, rx-tx forwarding and rx reorder is done by
1215 * the target
1216 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001217 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1218 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1219 else
1220 pdev->rx_opt_proc = ol_rx_fwd_check;
1221 } else {
1222 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1223 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1224 /*
1225 * PN check done on host,
1226 * rx->tx forwarding not done at all.
1227 */
1228 pdev->rx_opt_proc = ol_rx_pn_check_only;
1229 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1230 /*
1231 * Both PN check and rx->tx forwarding done
1232 * on host.
1233 */
1234 pdev->rx_opt_proc = ol_rx_pn_check;
1235 } else {
1236#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1237"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301238 QDF_TRACE(QDF_MODULE_ID_TXRX,
1239 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001240 "%s: %s", __func__, TRACESTR01);
1241#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001242 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001243 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001244 }
1245 } else {
1246 /* PN check done on target */
1247 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1248 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1249 /*
1250 * rx->tx forwarding done on host (possibly as
1251 * back-up for target-side primary rx->tx
1252 * forwarding)
1253 */
1254 pdev->rx_opt_proc = ol_rx_fwd_check;
1255 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001256 /*
1257 * rx->tx forwarding either done in target,
1258 * or not done at all
1259 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001260 pdev->rx_opt_proc = ol_rx_deliver;
1261 }
1262 }
1263 }
1264
1265 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301266 qdf_spinlock_create(&pdev->peer_ref_mutex);
1267 qdf_spinlock_create(&pdev->rx.mutex);
1268 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001269 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001270 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1271
Yun Parkf01f6e22017-01-18 17:27:02 -08001272 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1273 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001274 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001275 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001276
Yun Parkf01f6e22017-01-18 17:27:02 -08001277 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1278 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001279 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001280 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001281
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001282 /*
1283 * WDI event attach
1284 */
1285 wdi_event_attach(pdev);
1286
1287 /*
1288 * Initialize rx PN check characteristics for different security types.
1289 */
hangtian127c9532019-01-12 13:29:07 +08001290 qdf_mem_zero(&pdev->rx_pn[0], sizeof(pdev->rx_pn));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001291
1292 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1293 pdev->rx_pn[htt_sec_type_tkip].len =
1294 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1295 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1296 pdev->rx_pn[htt_sec_type_tkip].cmp =
1297 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1298 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1299
1300 /* WAPI: 128-bit PN */
1301 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1302 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1303
1304 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1305
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001306 ol_txrx_dbg("Created pdev %pK\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001307
1308 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1309
1310#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1311#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1312
1313/* #if 1 -- TODO: clean this up */
1314#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1315 /* avg = 100% * new + 0% * old */ \
1316 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1317/*
Yun Parkeaea8632017-04-09 09:53:45 -07001318 * #else
1319 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1320 * //avg = 25% * new + 25% * old
1321 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1322 * #endif
1323 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001324 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1325 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1326#endif
1327
1328 ol_txrx_local_peer_id_pool_init(pdev);
1329
1330 pdev->cfg.ll_pause_txq_limit =
1331 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1332
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301333 /* TX flow control for peer who is in very bad link status */
1334 ol_tx_badpeer_flow_cl_init(pdev);
1335
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001336#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301337 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301338 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001339
1340 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301341 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001342 {
1343 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001344
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001345 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301346 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001347 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1348 * 1000);
1349 /*
1350 * Compute a factor and shift that together are equal to the
1351 * inverse of the bin_width time, so that rather than dividing
1352 * by the bin width time, approximately the same result can be
1353 * obtained much more efficiently by a multiply + shift.
1354 * multiply_factor >> shift = 1 / bin_width_time, so
1355 * multiply_factor = (1 << shift) / bin_width_time.
1356 *
1357 * Pick the shift semi-arbitrarily.
1358 * If we knew statically what the bin_width would be, we could
1359 * choose a shift that minimizes the error.
1360 * Since the bin_width is determined dynamically, simply use a
1361 * shift that is about half of the uint32_t size. This should
1362 * result in a relatively large multiplier value, which
1363 * minimizes error from rounding the multiplier to an integer.
1364 * The rounding error only becomes significant if the tick units
1365 * are on the order of 1 microsecond. In most systems, it is
1366 * expected that the tick units will be relatively low-res,
1367 * on the order of 1 millisecond. In such systems the rounding
1368 * error is negligible.
1369 * It would be more accurate to dynamically try out different
1370 * shifts and choose the one that results in the smallest
1371 * rounding error, but that extra level of fidelity is
1372 * not needed.
1373 */
1374 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1375 pdev->tx_delay.hist_internal_bin_width_mult =
1376 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1377 1000 + (bin_width_1000ticks >> 1)) /
1378 bin_width_1000ticks;
1379 }
1380#endif /* QCA_COMPUTE_TX_DELAY */
1381
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382 /* Thermal Mitigation */
1383 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001384
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001385 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001386
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301387 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1388
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001389 ol_tx_register_flow_control(pdev);
1390
1391 return 0; /* success */
1392
Leo Chang376398b2015-10-23 14:19:02 -07001393pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001394 OL_RX_REORDER_TRACE_DETACH(pdev);
1395
Leo Chang376398b2015-10-23 14:19:02 -07001396reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301397 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1398 qdf_spinlock_destroy(&pdev->rx.mutex);
1399 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301400 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001401 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1402
Leo Chang376398b2015-10-23 14:19:02 -07001403control_init_fail:
1404desc_alloc_fail:
1405 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001406 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001407 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001408
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301409 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001410 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001411
Leo Chang376398b2015-10-23 14:19:02 -07001412page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001413 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1414 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001415uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001416 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301417htt_attach_fail:
1418 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001419ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001420 return ret; /* fail */
1421}
1422
Dhanashri Atre12a08392016-02-17 13:10:34 -08001423/**
1424 * ol_txrx_pdev_attach_target() - send target configuration
1425 *
1426 * @pdev - the physical device being initialized
1427 *
1428 * The majority of the data SW setup are done by the pdev_attach
1429 * functions, but this function completes the data SW setup by
1430 * sending datapath configuration messages to the target.
1431 *
1432 * Return: 0 - success 1 - failure
1433 */
Rajeev Kumar Sirasanagandlaed4d1b32019-01-29 11:08:20 -08001434static int ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001435{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001436 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001437
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05301438 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001439}
1440
Dhanashri Atre12a08392016-02-17 13:10:34 -08001441/**
Mohit Khanna54f3a382017-03-13 17:56:32 -07001442 * ol_tx_free_descs_inuse - free tx descriptors which are in use
1443 * @pdev - the physical device for which tx descs need to be freed
1444 *
1445 * Cycle through the list of TX descriptors (for a pdev) which are in use,
1446 * for which TX completion has not been received and free them. Should be
1447 * called only when the interrupts are off and all lower layer RX is stopped.
1448 * Otherwise there may be a race condition with TX completions.
1449 *
1450 * Return: None
1451 */
1452static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1453{
1454 int i;
1455 void *htt_tx_desc;
1456 struct ol_tx_desc_t *tx_desc;
1457 int num_freed_tx_desc = 0;
1458
1459 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1460 tx_desc = ol_tx_desc_find(pdev, i);
1461 /*
1462 * Confirm that each tx descriptor is "empty", i.e. it has
1463 * no tx frame attached.
1464 * In particular, check that there are no frames that have
1465 * been given to the target to transmit, for which the
1466 * target has never provided a response.
1467 */
1468 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
1469 ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
1470 ol_tx_desc_frame_free_nonstd(pdev,
1471 tx_desc, 1);
1472 num_freed_tx_desc++;
1473 }
1474 htt_tx_desc = tx_desc->htt_tx_desc;
1475 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1476 }
1477
1478 if (num_freed_tx_desc)
1479 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1480 "freed %d tx frames for which no resp from target",
1481 num_freed_tx_desc);
1482
1483}
1484
1485/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301486 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08001487 * @pdev - the data physical device object being removed
1488 * @force - delete the pdev (and its vdevs and peers) even if
1489 * there are outstanding references by the target to the vdevs
1490 * and peers within the pdev
1491 *
1492 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301493 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08001494 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301495 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08001496 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301497static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001498{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001499 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001501 /* preconditions */
1502 TXRX_ASSERT2(pdev);
1503
1504 /* check that the pdev has no vdevs allocated */
1505 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1506
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001507#ifdef QCA_SUPPORT_TX_THROTTLE
1508 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301509 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1510 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001511#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301512 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1513 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001514#endif
1515#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001516
1517 if (force) {
1518 /*
1519 * The assertion above confirms that all vdevs within this pdev
1520 * were detached. However, they may not have actually been
1521 * deleted.
1522 * If the vdev had peers which never received a PEER_UNMAP msg
1523 * from the target, then there are still zombie peer objects,
1524 * and the vdev parents of the zombie peers are also zombies,
1525 * hanging around until their final peer gets deleted.
1526 * Go through the peer hash table and delete any peers left.
1527 * As a side effect, this will complete the deletion of any
1528 * vdevs that are waiting for their peers to finish deletion.
1529 */
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001530 ol_txrx_dbg("Force delete for pdev %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001531 pdev);
1532 ol_txrx_peer_find_hash_erase(pdev);
1533 }
1534
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301535 /* to get flow pool status before freeing descs */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07001536 ol_tx_dump_flow_pool_info((void *)pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001537 ol_tx_free_descs_inuse(pdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301538 ol_tx_deregister_flow_control(pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001539
1540 /*
1541 * ol_tso_seg_list_deinit should happen after
1542 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
1543 * which is being de-initilized in ol_tso_seg_list_deinit
1544 */
1545 ol_tso_seg_list_deinit(pdev);
1546 ol_tso_num_seg_list_deinit(pdev);
1547
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301548 /* Stop the communication between HTT and target at first */
1549 htt_detach_target(pdev->htt_pdev);
1550
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301551 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001552 &pdev->tx_desc.desc_pages, 0, true);
1553 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001554
1555 /* Detach micro controller data path offload resource */
1556 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1557 htt_ipa_uc_detach(pdev->htt_pdev);
1558
1559 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05301560 ol_tx_desc_dup_detect_deinit(pdev);
1561
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301562 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1563 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1564 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001565 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001566#ifdef QCA_SUPPORT_TX_THROTTLE
1567 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301568 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001569#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301570
1571 /* TX flow control for peer who is in very bad link status */
1572 ol_tx_badpeer_flow_cl_deinit(pdev);
1573
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001574 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1575
1576 OL_RX_REORDER_TRACE_DETACH(pdev);
1577 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301578
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001579 /*
1580 * WDI event detach
1581 */
1582 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301583
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001584 ol_txrx_local_peer_id_cleanup(pdev);
1585
1586#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301587 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001588#endif
1589}
1590
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301591/**
1592 * ol_txrx_pdev_detach() - delete the data SW state
1593 * @ppdev - the data physical device object being removed
1594 * @force - delete the pdev (and its vdevs and peers) even if
1595 * there are outstanding references by the target to the vdevs
1596 * and peers within the pdev
1597 *
1598 * This function is used when the WLAN driver is being removed to
1599 * remove the host data component within the driver.
1600 * All virtual devices within the physical device need to be deleted
1601 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1602 *
1603 * Return: None
1604 */
1605static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
1606{
Rakesh Pillaica99b832019-06-24 15:05:13 +05301607 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301608 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rakesh Pillai33942c42018-05-09 11:45:38 +05301609 struct ol_txrx_stats_req_internal *req, *temp_req;
tfyu9fcabd72017-09-26 17:46:48 +08001610 int i = 0;
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301611
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05301612 if (!soc) {
Rakesh Pillaica99b832019-06-24 15:05:13 +05301613 ol_txrx_err("soc is NULL");
1614 return;
1615 }
1616
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301617 /*checking to ensure txrx pdev structure is not NULL */
1618 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301619 ol_txrx_err("pdev is NULL");
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301620 return;
1621 }
1622
1623 htt_pktlogmod_exit(pdev);
1624
tfyu9fcabd72017-09-26 17:46:48 +08001625 qdf_spin_lock_bh(&pdev->req_list_spinlock);
1626 if (pdev->req_list_depth > 0)
1627 ol_txrx_err(
1628 "Warning: the txrx req list is not empty, depth=%d\n",
1629 pdev->req_list_depth
1630 );
Rakesh Pillai33942c42018-05-09 11:45:38 +05301631 TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
tfyu9fcabd72017-09-26 17:46:48 +08001632 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
1633 pdev->req_list_depth--;
1634 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05301635 "%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
tfyu9fcabd72017-09-26 17:46:48 +08001636 i++,
1637 req,
1638 req->base.print.verbose,
1639 req->base.print.concise,
1640 req->base.stats_type_upload_mask,
1641 req->base.stats_type_reset_mask
1642 );
1643 qdf_mem_free(req);
1644 }
1645 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
1646
1647 qdf_spinlock_destroy(&pdev->req_list_spinlock);
Ajit Pal Singh8184e932018-07-25 13:54:13 +05301648 qdf_spinlock_destroy(&pdev->tx_mutex);
tfyu9fcabd72017-09-26 17:46:48 +08001649
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301650 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
1651
1652 if (pdev->cfg.is_high_latency)
1653 ol_tx_sched_detach(pdev);
1654
1655 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
1656
1657 htt_pdev_free(pdev->htt_pdev);
1658 ol_txrx_peer_find_detach(pdev);
1659 ol_txrx_tso_stats_deinit(pdev);
jitiphil335d2412018-06-07 22:49:24 +05301660 ol_txrx_fw_stats_desc_pool_deinit(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301661
1662 ol_txrx_pdev_txq_log_destroy(pdev);
1663 ol_txrx_pdev_grp_stat_destroy(pdev);
Alok Kumarddd457e2018-04-09 13:51:42 +05301664
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301665 ol_txrx_debugfs_exit(pdev);
1666
Rakesh Pillaica99b832019-06-24 15:05:13 +05301667 soc->pdev_list[pdev->id] = NULL;
Alok Kumarddd457e2018-04-09 13:51:42 +05301668 qdf_mem_free(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301669}
1670
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301671#if defined(QCA_HL_NETDEV_FLOW_CONTROL)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301672
1673/**
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301674 * ol_txrx_vdev_per_vdev_tx_desc_init() - initialise per vdev tx desc count
1675 * related variables.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301676 * @vdev: the virtual device object
1677 *
1678 * Return: None
1679 */
1680static inline void
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301681ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301682{
1683 qdf_atomic_init(&vdev->tx_desc_count);
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301684 vdev->tx_desc_limit = 0;
1685 vdev->queue_restart_th = 0;
1686 vdev->prio_q_paused = 0;
1687 vdev->queue_stop_th = 0;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301688}
1689#else
1690
1691static inline void
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301692ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301693{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301694}
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301695#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301696
Dhanashri Atre12a08392016-02-17 13:10:34 -08001697/**
1698 * ol_txrx_vdev_attach - Allocate and initialize the data object
1699 * for a new virtual device.
1700 *
1701 * @data_pdev - the physical device the virtual device belongs to
1702 * @vdev_mac_addr - the MAC address of the virtual device
1703 * @vdev_id - the ID used to identify the virtual device to the target
1704 * @op_mode - whether this virtual device is operating as an AP,
1705 * an IBSS, or a STA
Rakesh Pillai31d7fb22019-09-18 19:25:49 +05301706 * @subtype: Subtype of the operating vdev
Dhanashri Atre12a08392016-02-17 13:10:34 -08001707 *
1708 * Return: success: handle to new data vdev object, failure: NULL
1709 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001710static struct cdp_vdev *
1711ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001712 uint8_t *vdev_mac_addr,
Rakesh Pillai31d7fb22019-09-18 19:25:49 +05301713 uint8_t vdev_id, enum wlan_op_mode op_mode,
1714 enum wlan_op_subtype subtype)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001715{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001716 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001717 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001718 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001719
1720 /* preconditions */
1721 TXRX_ASSERT2(pdev);
1722 TXRX_ASSERT2(vdev_mac_addr);
1723
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301724 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001725 if (!vdev)
1726 return NULL; /* failure */
1727
1728 /* store provided params */
1729 vdev->pdev = pdev;
1730 vdev->vdev_id = vdev_id;
1731 vdev->opmode = op_mode;
Rakesh Pillai31d7fb22019-09-18 19:25:49 +05301732 vdev->subtype = subtype;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001733
1734 vdev->delete.pending = 0;
1735 vdev->safemode = 0;
1736 vdev->drop_unenc = 1;
1737 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301738 vdev->fwd_tx_packets = 0;
1739 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001740
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301741 ol_txrx_vdev_per_vdev_tx_desc_init(vdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301742
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301743 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -08001744 QDF_MAC_ADDR_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001745
1746 TAILQ_INIT(&vdev->peer_list);
1747 vdev->last_real_peer = NULL;
1748
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001749 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301750
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001751#ifdef QCA_IBSS_SUPPORT
1752 vdev->ibss_peer_num = 0;
1753 vdev->ibss_peer_heart_beat_timer = 0;
1754#endif
1755
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301756 ol_txrx_vdev_txqs_init(vdev);
1757
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301758 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001759 vdev->ll_pause.paused_reason = 0;
1760 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
1761 vdev->ll_pause.txq.depth = 0;
wadesong5e2e8012017-08-21 16:56:03 +08001762 qdf_atomic_init(&vdev->delete.detaching);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301763 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001764 &vdev->ll_pause.timer,
1765 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301766 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301767 qdf_atomic_init(&vdev->os_q_paused);
1768 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001769 vdev->tx_fl_lwm = 0;
1770 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001771 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001772 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05301773 qdf_mem_zero(&vdev->last_peer_mac_addr,
1774 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301775 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001776 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08001777 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001778 vdev->osif_fc_ctx = NULL;
1779
Alok Kumar75355aa2018-03-19 17:32:58 +05301780 vdev->txrx_stats.txack_success = 0;
1781 vdev->txrx_stats.txack_failed = 0;
1782
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001783 /* Default MAX Q depth for every VDEV */
1784 vdev->ll_pause.max_q_depth =
1785 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001786 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Tiger Yue40e7832019-04-25 10:46:53 +08001787
1788 ol_txrx_vdev_init_tcp_del_ack(vdev);
1789
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001790 /* add this vdev into the pdev's list */
1791 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
chenguo2201c0a2018-11-15 18:07:41 +08001792 if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam())
1793 pdev->monitor_vdev = vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001794
Poddar, Siddarth14521792017-03-14 21:19:42 +05301795 ol_txrx_dbg(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07001796 "Created vdev %pK ("QDF_MAC_ADDR_STR")\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001797 vdev,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07001798 QDF_MAC_ADDR_ARRAY(vdev->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001799
1800 /*
1801 * We've verified that htt_op_mode == wlan_op_mode,
1802 * so no translation is needed.
1803 */
1804 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
1805
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001806 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001807}
1808
Dhanashri Atre12a08392016-02-17 13:10:34 -08001809/**
1810 *ol_txrx_vdev_register - Link a vdev's data object with the
1811 * matching OS shim vdev object.
1812 *
1813 * @txrx_vdev: the virtual device's data object
1814 * @osif_vdev: the virtual device's OS shim object
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301815 * @ctrl_vdev: UMAC vdev objmgr handle
Dhanashri Atre12a08392016-02-17 13:10:34 -08001816 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
1817 *
1818 * The data object for a virtual device is created by the
1819 * function ol_txrx_vdev_attach. However, rather than fully
1820 * linking the data vdev object with the vdev objects from the
1821 * other subsystems that the data vdev object interacts with,
1822 * the txrx_vdev_attach function focuses primarily on creating
1823 * the data vdev object. After the creation of both the data
1824 * vdev object and the OS shim vdev object, this
1825 * txrx_osif_vdev_attach function is used to connect the two
1826 * vdev objects, so the data SW can use the OS shim vdev handle
1827 * when passing rx data received by a vdev up to the OS shim.
1828 */
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301829static void ol_txrx_vdev_register(struct cdp_vdev *pvdev, void *osif_vdev,
1830 struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
1831 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001832{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001833 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001834
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001835 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301836 qdf_print("vdev/txrx_ops is NULL!");
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001837 qdf_assert(0);
1838 return;
1839 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001840
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001841 vdev->osif_dev = osif_vdev;
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301842 vdev->ctrl_vdev = ctrl_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001843 vdev->rx = txrx_ops->rx.rx;
Poddar, Siddarth3906e172018-01-09 11:24:58 +05301844 vdev->stats_rx = txrx_ops->rx.stats_rx;
Alok Kumar4696fb02018-06-06 00:10:18 +05301845 vdev->tx_comp = txrx_ops->tx.tx_comp;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001846 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001847}
1848
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001849void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
1850{
1851 vdev->safemode = val;
1852}
1853
Dhanashri Atre12a08392016-02-17 13:10:34 -08001854/**
1855 * ol_txrx_set_privacy_filters - set the privacy filter
1856 * @vdev - the data virtual device object
1857 * @filter - filters to be set
1858 * @num - the number of filters
1859 *
1860 * Rx related. Set the privacy filters. When rx packets, check
1861 * the ether type, filter type and packet type to decide whether
1862 * discard these packets.
1863 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08001864static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001865ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
1866 void *filters, uint32_t num)
1867{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301868 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001869 num * sizeof(struct privacy_exemption));
1870 vdev->num_filters = num;
1871}
1872
1873void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
1874{
1875 vdev->drop_unenc = val;
1876}
1877
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001878#if defined(CONFIG_HL_SUPPORT) || defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
gbian016a42e2017-03-01 18:49:11 +08001879
1880static void
1881ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1882{
1883 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1884 int i;
1885 struct ol_tx_desc_t *tx_desc;
1886
1887 qdf_spin_lock_bh(&pdev->tx_mutex);
1888 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1889 tx_desc = ol_tx_desc_find(pdev, i);
1890 if (tx_desc->vdev == vdev)
1891 tx_desc->vdev = NULL;
1892 }
1893 qdf_spin_unlock_bh(&pdev->tx_mutex);
1894}
1895
1896#else
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001897#ifdef QCA_LL_TX_FLOW_CONTROL_V2
1898static void ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1899{
1900 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1901 struct ol_tx_flow_pool_t *pool;
1902 int i;
1903 struct ol_tx_desc_t *tx_desc;
gbian016a42e2017-03-01 18:49:11 +08001904
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001905 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
1906 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1907 tx_desc = ol_tx_desc_find(pdev, i);
1908 if (!qdf_atomic_read(&tx_desc->ref_cnt))
1909 /* not in use */
1910 continue;
1911
1912 pool = tx_desc->pool;
1913 qdf_spin_lock_bh(&pool->flow_pool_lock);
1914 if (tx_desc->vdev == vdev)
1915 tx_desc->vdev = NULL;
1916 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1917 }
1918 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
1919}
1920
1921#else
gbian016a42e2017-03-01 18:49:11 +08001922static void
1923ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1924{
gbian016a42e2017-03-01 18:49:11 +08001925}
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001926#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1927#endif /* CONFIG_HL_SUPPORT */
gbian016a42e2017-03-01 18:49:11 +08001928
Dhanashri Atre12a08392016-02-17 13:10:34 -08001929/**
1930 * ol_txrx_vdev_detach - Deallocate the specified data virtual
1931 * device object.
1932 * @data_vdev: data object for the virtual device in question
1933 * @callback: function to call (if non-NULL) once the vdev has
1934 * been wholly deleted
1935 * @callback_context: context to provide in the callback
1936 *
1937 * All peers associated with the virtual device need to be deleted
1938 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
1939 * However, for the peers to be fully deleted, the peer deletion has to
1940 * percolate through the target data FW and back up to the host data SW.
1941 * Thus, even though the host control SW may have issued a peer_detach
1942 * call for each of the vdev's peers, the peer objects may still be
1943 * allocated, pending removal of all references to them by the target FW.
1944 * In this case, though the vdev_detach function call will still return
1945 * immediately, the vdev itself won't actually be deleted, until the
1946 * deletions of all its peers complete.
1947 * The caller can provide a callback function pointer to be notified when
1948 * the vdev deletion actually happens - whether it's directly within the
1949 * vdev_detach call, or if it's deferred until all in-progress peer
1950 * deletions have completed.
1951 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08001952static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001953ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001954 ol_txrx_vdev_delete_cb callback, void *context)
1955{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001956 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
wadesong5e2e8012017-08-21 16:56:03 +08001957 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001958
1959 /* preconditions */
1960 TXRX_ASSERT2(vdev);
wadesong5e2e8012017-08-21 16:56:03 +08001961 pdev = vdev->pdev;
1962
1963 /* prevent anyone from restarting the ll_pause timer again */
1964 qdf_atomic_set(&vdev->delete.detaching, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001965
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301966 ol_txrx_vdev_tx_queue_free(vdev);
1967
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301968 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301969 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001970 vdev->ll_pause.is_q_timer_on = false;
1971 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301972 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07001973
Nirav Shahcbc6d722016-03-01 16:24:53 +05301974 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301975 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001976 vdev->ll_pause.txq.head = next;
1977 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301978 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
wadesong5e2e8012017-08-21 16:56:03 +08001979
1980 /* ll_pause timer should be deleted without any locks held, and
1981 * no timer function should be executed after this point because
1982 * qdf_timer_free is deleting the timer synchronously.
1983 */
1984 qdf_timer_free(&vdev->ll_pause.timer);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301985 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001986
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301987 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001988 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08001989 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001990 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301991 qdf_spin_unlock_bh(&vdev->flow_control_lock);
1992 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001993
1994 /* remove the vdev from its parent pdev's list */
1995 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
1996
1997 /*
1998 * Use peer_ref_mutex while accessing peer_list, in case
1999 * a peer is in the process of being removed from the list.
2000 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302001 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002002 /* check that the vdev has no peers allocated */
2003 if (!TAILQ_EMPTY(&vdev->peer_list)) {
2004 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05302005 ol_txrx_dbg(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002006 "not deleting vdev object %pK ("QDF_MAC_ADDR_STR") until deletion finishes for all its peers\n",
Nirav Shah7c8c1712018-09-10 16:01:31 +05302007 vdev,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002008 QDF_MAC_ADDR_ARRAY(vdev->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002009 /* indicate that the vdev needs to be deleted */
2010 vdev->delete.pending = 1;
2011 vdev->delete.callback = callback;
2012 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302013 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002014 return;
2015 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302016 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002017 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002018
Poddar, Siddarth14521792017-03-14 21:19:42 +05302019 ol_txrx_dbg(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002020 "deleting vdev obj %pK ("QDF_MAC_ADDR_STR")\n",
Nirav Shah7c8c1712018-09-10 16:01:31 +05302021 vdev,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002022 QDF_MAC_ADDR_ARRAY(vdev->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002023
2024 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2025
2026 /*
Yun Parkeaea8632017-04-09 09:53:45 -07002027 * The ol_tx_desc_free might access the invalid content of vdev referred
2028 * by tx desc, since this vdev might be detached in another thread
2029 * asynchronous.
2030 *
2031 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2032 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2033 * to avoid crash.
2034 *
2035 */
gbian016a42e2017-03-01 18:49:11 +08002036 ol_txrx_tx_desc_reset_vdev(vdev);
2037
2038 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002039 * Doesn't matter if there are outstanding tx frames -
2040 * they will be freed once the target sends a tx completion
2041 * message for them.
2042 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302043 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002044 if (callback)
2045 callback(context);
2046}
2047
2048/**
2049 * ol_txrx_flush_rx_frames() - flush cached rx frames
2050 * @peer: peer
2051 * @drop: set flag to drop frames
2052 *
2053 * Return: None
2054 */
2055void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302056 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002057{
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002058 struct ol_txrx_cached_bufq_t *bufqi;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002059 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302060 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002061 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002062
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302063 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2064 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002065 return;
2066 }
2067
Dhanashri Atre182b0272016-02-17 15:35:07 -08002068 qdf_assert(peer->vdev);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302069 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002070 bufqi = &peer->bufq_info;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002071
Dhanashri Atre50141c52016-04-07 13:15:29 -07002072 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002073 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002074 else
2075 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302076 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002077
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002078 qdf_spin_lock_bh(&bufqi->bufq_lock);
2079 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002080 typeof(*cache_buf), list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002081 while (!list_empty(&bufqi->cached_bufq)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002082 list_del(&cache_buf->list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002083 bufqi->curr--;
2084 qdf_assert(bufqi->curr >= 0);
2085 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002086 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302087 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002088 } else {
2089 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002090 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302091 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302092 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002093 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302094 qdf_mem_free(cache_buf);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002095 qdf_spin_lock_bh(&bufqi->bufq_lock);
2096 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002097 typeof(*cache_buf), list);
2098 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002099 bufqi->qdepth_no_thresh = bufqi->curr;
2100 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302101 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002102}
2103
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07002104static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302105{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05302106 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302107 struct ol_txrx_peer_t *peer;
Rakshith Suresh Patkar73654d02019-08-01 16:12:22 +05302108 struct ol_txrx_vdev_t *vdev;
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05302109 ol_txrx_pdev_handle pdev;
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302110
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05302111 if (qdf_unlikely(!soc)) {
2112 ol_txrx_err("soc is NULL");
2113 return;
2114 }
2115
2116 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302117 if (!pdev)
2118 return;
2119
Rakshith Suresh Patkar73654d02019-08-01 16:12:22 +05302120 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2121 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2122 ol_txrx_flush_rx_frames(peer, 1);
2123 }
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302124 }
2125}
2126
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302127/* Define short name to use in cds_trigger_recovery */
2128#define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2129
Dhanashri Atre12a08392016-02-17 13:10:34 -08002130/**
Naveen Rawat17c42a82018-02-01 19:18:27 -08002131 * ol_txrx_dump_peer_access_list() - dump peer access list
2132 * @peer: peer handle
2133 *
2134 * This function will dump if any peer debug ids are still accessing peer
2135 *
2136 * Return: None
2137 */
2138static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2139{
2140 u32 i;
2141 u32 pending_ref;
2142
2143 for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2144 pending_ref = qdf_atomic_read(&peer->access_list[i]);
2145 if (pending_ref)
2146 ol_txrx_info_high("id %d pending refs %d",
2147 i, pending_ref);
2148 }
2149}
2150
2151/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08002152 * ol_txrx_peer_attach - Allocate and set up references for a
2153 * data peer object.
2154 * @data_pdev: data physical device object that will indirectly
2155 * own the data_peer object
2156 * @data_vdev - data virtual device object that will directly
2157 * own the data_peer object
2158 * @peer_mac_addr - MAC address of the new peer
2159 *
2160 * When an association with a peer starts, the host's control SW
2161 * uses this function to inform the host data SW.
2162 * The host data SW allocates its own peer object, and stores a
2163 * reference to the control peer object within the data peer object.
2164 * The host data SW also stores a reference to the virtual device
2165 * that the peer is associated with. This virtual device handle is
2166 * used when the data SW delivers rx data frames to the OS shim layer.
2167 * The host data SW returns a handle to the new peer data object,
2168 * so a reference within the control peer object can be set to the
2169 * data peer object.
2170 *
2171 * Return: handle to new data peer object, or NULL if the attach
2172 * fails
2173 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002174static void *
psimha8696f772018-04-03 17:38:38 -07002175ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05302176 struct cdp_ctrl_objmgr_peer *ctrl_peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002177{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002178 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002179 struct ol_txrx_peer_t *peer;
2180 struct ol_txrx_peer_t *temp_peer;
2181 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002182 bool wait_on_deletion = false;
2183 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002184 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302185 bool cmp_wait_mac = false;
2186 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Alok Kumare1977442018-11-28 17:16:03 +05302187 u8 check_valid = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002188
2189 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002190 TXRX_ASSERT2(vdev);
2191 TXRX_ASSERT2(peer_mac_addr);
2192
Dhanashri Atre12a08392016-02-17 13:10:34 -08002193 pdev = vdev->pdev;
2194 TXRX_ASSERT2(pdev);
2195
Alok Kumare1977442018-11-28 17:16:03 +05302196 if (pdev->enable_peer_unmap_conf_support)
2197 check_valid = 1;
2198
Abhishek Singh217d9782017-04-28 23:49:11 +05302199 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2200 QDF_MAC_ADDR_SIZE))
2201 cmp_wait_mac = true;
2202
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302203 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002204 /* check for duplicate existing peer */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002205 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2206 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
Alok Kumare1977442018-11-28 17:16:03 +05302207 (union ol_txrx_align_mac_addr_t *)peer_mac_addr) &&
2208 (check_valid == 0 || temp_peer->valid)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302209 ol_txrx_info_high(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002210 "vdev_id %d ("QDF_MAC_ADDR_STR") already exists.\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002211 vdev->vdev_id,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002212 QDF_MAC_ADDR_ARRAY(peer_mac_addr));
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302213 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002214 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002215 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002216 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302217 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002218 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302219 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002220 return NULL;
2221 }
2222 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302223 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2224 &temp_peer->mac_addr,
Alok Kumare1977442018-11-28 17:16:03 +05302225 &vdev->last_peer_mac_addr) &&
2226 (check_valid == 0 ||
2227 temp_peer->valid)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302228 ol_txrx_info_high(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002229 "vdev_id %d ("QDF_MAC_ADDR_STR") old peer exists.\n",
Abhishek Singh217d9782017-04-28 23:49:11 +05302230 vdev->vdev_id,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002231 QDF_MAC_ADDR_ARRAY(vdev->last_peer_mac_addr.raw));
Abhishek Singh217d9782017-04-28 23:49:11 +05302232 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2233 vdev->wait_on_peer_id = temp_peer->local_id;
2234 qdf_event_reset(&vdev->wait_delete_comp);
2235 wait_on_deletion = true;
2236 break;
2237 } else {
2238 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2239 ol_txrx_err("peer not found");
2240 return NULL;
2241 }
2242 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002243 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302244 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002245
Abhishek Singh217d9782017-04-28 23:49:11 +05302246 qdf_mem_zero(&vdev->last_peer_mac_addr,
2247 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002248 if (wait_on_deletion) {
2249 /* wait for peer deletion */
Nachiket Kukade0396b732017-11-14 16:35:16 +05302250 rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002251 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002252 if (QDF_STATUS_SUCCESS != rc) {
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002253 ol_txrx_err("error waiting for peer_id(%d) deletion, status %d\n",
Dustin Brown100201e2017-07-10 11:48:40 -07002254 vdev->wait_on_peer_id, (int) rc);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002255 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08002256 ol_txrx_dump_peer_access_list(temp_peer);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002257 wlan_roam_debug_dump_table();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002258 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Dustin Brown100201e2017-07-10 11:48:40 -07002259
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002260 return NULL;
2261 }
2262 }
2263
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302264 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002265 if (!peer)
2266 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002267
2268 /* store provided params */
2269 peer->vdev = vdev;
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05302270 peer->ctrl_peer = peer->ctrl_peer;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302271 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -08002272 QDF_MAC_ADDR_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002273
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302274 ol_txrx_peer_txqs_init(pdev, peer);
2275
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002276 INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302277 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002278 /* add this peer into the vdev's list */
2279 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302280 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002281 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
Frank Liu4362e462018-01-16 11:51:55 +08002282 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2283 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002284 vdev->last_real_peer = peer;
Frank Liu4362e462018-01-16 11:51:55 +08002285 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2286 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002287
2288 peer->rx_opt_proc = pdev->rx_opt_proc;
2289
2290 ol_rx_peer_init(pdev, peer);
2291
2292 /* initialize the peer_id */
2293 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2294 peer->peer_ids[i] = HTT_INVALID_PEER;
2295
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302296 qdf_spinlock_create(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002297 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2298
2299 peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002300
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302301 qdf_atomic_init(&peer->delete_in_progress);
2302 qdf_atomic_init(&peer->flush_in_progress);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302303 qdf_atomic_init(&peer->ref_cnt);
Mohit Khannab7bec722017-11-10 11:43:44 -08002304
2305 for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2306 qdf_atomic_init(&peer->access_list[i]);
2307
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002308 /* keep one reference for attach */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002309 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002310
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002311 /* Set a flag to indicate peer create is pending in firmware */
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002312 qdf_atomic_init(&peer->fw_create_pending);
2313 qdf_atomic_set(&peer->fw_create_pending, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002314
2315 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002316 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2317 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002318
2319 ol_txrx_peer_find_hash_add(pdev, peer);
2320
Mohit Khanna47384bc2016-08-15 15:37:05 -07002321 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002322 "vdev %pK created peer %pK ref_cnt %d ("QDF_MAC_ADDR_STR")\n",
Mohit Khanna47384bc2016-08-15 15:37:05 -07002323 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07002324 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002325 /*
2326 * For every peer MAp message search and set if bss_peer
2327 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002328 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -08002329 QDF_MAC_ADDR_SIZE))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002330 peer->bss_peer = 1;
2331
2332 /*
2333 * The peer starts in the "disc" state while association is in progress.
2334 * Once association completes, the peer will get updated to "auth" state
2335 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2336 * or else to the "conn" state. For non-open mode, the peer will
2337 * progress to "auth" state once the authentication completes.
2338 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002339 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002340 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002341 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002342
2343#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2344 peer->rssi_dbm = HTT_RSSI_INVALID;
2345#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002346 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2347 !pdev->self_peer) {
2348 pdev->self_peer = peer;
2349 /*
2350 * No Tx in monitor mode, otherwise results in target assert.
2351 * Setting disable_intrabss_fwd to true
2352 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002353 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002354 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002355
2356 ol_txrx_local_peer_id_alloc(pdev, peer);
2357
Leo Chang98726762016-10-28 11:07:18 -07002358 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002359}
2360
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302361#undef PEER_DEL_TIMEOUT
2362
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002363/*
2364 * Discarding tx filter - removes all data frames (disconnected state)
2365 */
2366static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2367{
2368 return A_ERROR;
2369}
2370
2371/*
2372 * Non-autentication tx filter - filters out data frames that are not
2373 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2374 * data frames (connected state)
2375 */
2376static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2377{
2378 return
2379 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2380 tx_msdu_info->htt.info.ethertype ==
2381 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2382}
2383
2384/*
2385 * Pass-through tx filter - lets all data frames through (authenticated state)
2386 */
2387static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2388{
2389 return A_OK;
2390}
2391
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002392/**
2393 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2394 * @peer: handle to peer
2395 *
2396 * returns mac addrs for module which do not know peer type
2397 *
2398 * Return: the mac_addr from peer
2399 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002400static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002401ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002402{
Leo Chang98726762016-10-28 11:07:18 -07002403 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002404
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002405 if (!peer)
2406 return NULL;
2407
2408 return peer->mac_addr.raw;
2409}
2410
Abhishek Singhcfb44482017-03-10 12:42:37 +05302411#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002412/**
2413 * ol_txrx_get_pn_info() - Returns pn info from peer
2414 * @peer: handle to peer
2415 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2416 * @last_pn: return last_rmf_pn value from peer.
2417 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2418 *
2419 * Return: NONE
2420 */
2421void
Leo Chang98726762016-10-28 11:07:18 -07002422ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002423 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2424{
Leo Chang98726762016-10-28 11:07:18 -07002425 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002426 *last_pn_valid = &peer->last_rmf_pn_valid;
2427 *last_pn = &peer->last_rmf_pn;
2428 *rmf_pn_replays = &peer->rmf_pn_replays;
2429}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302430#else
2431void
2432ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2433 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2434{
2435}
2436#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002437
2438/**
2439 * ol_txrx_get_opmode() - Return operation mode of vdev
2440 * @vdev: vdev handle
2441 *
2442 * Return: operation mode.
2443 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002444static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002445{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002446 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002447
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002448 return vdev->opmode;
2449}
2450
2451/**
2452 * ol_txrx_get_peer_state() - Return peer state of peer
2453 * @peer: peer handle
2454 *
2455 * Return: return peer state
2456 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002457static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002458{
Leo Chang98726762016-10-28 11:07:18 -07002459 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002460
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002461 return peer->state;
2462}
2463
2464/**
2465 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2466 * @peer: peer handle
2467 *
2468 * Return: vdev handle from peer
2469 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002470static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002471{
Leo Chang98726762016-10-28 11:07:18 -07002472 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002473
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002474 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002475}
2476
2477/**
2478 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2479 * @vdev: vdev handle
2480 *
2481 * Return: vdev mac address
2482 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002483static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002484ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002485{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002486 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002487
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002488 if (!vdev)
2489 return NULL;
2490
2491 return vdev->mac_addr.raw;
2492}
2493
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002494#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002495/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002496 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002497 * vdev
2498 * @vdev: vdev handle
2499 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002500 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002501 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002502struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002503ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2504{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002505 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002506}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002507#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002508
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002509#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002510/**
2511 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2512 * @vdev: vdev handle
2513 *
2514 * Return: Handle to pdev
2515 */
2516ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2517{
2518 return vdev->pdev;
2519}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002520#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002521
2522/**
2523 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2524 * @vdev: vdev handle
2525 *
2526 * Return: Handle to control pdev
2527 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002528static struct cdp_cfg *
2529ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002530{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002531 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002532
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002533 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002534}
2535
2536/**
2537 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2538 * @vdev: vdev handle
2539 *
2540 * Return: Rx Fwd disabled status
2541 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002542static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002543ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002544{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002545 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002546 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2547 vdev->pdev->ctrl_pdev;
2548 return cfg->rx_fwd_disabled;
2549}
2550
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002551#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002552/**
2553 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2554 * @vdev: vdev handle
2555 * @peer_num_delta: peer nums to be adjusted
2556 *
2557 * Return: -1 for failure or total peer nums after adjustment.
2558 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002559static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002560ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002561 int16_t peer_num_delta)
2562{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002563 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002564 int16_t new_peer_num;
2565
2566 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07002567 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002568 return OL_TXRX_INVALID_NUM_PEERS;
2569
2570 vdev->ibss_peer_num = new_peer_num;
2571
2572 return new_peer_num;
2573}
2574
2575/**
2576 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2577 * beat timer
2578 * @vdev: vdev handle
2579 * @timer_value_sec: new heart beat timer value
2580 *
2581 * Return: Old timer value set in vdev.
2582 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002583static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2584 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002585{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002586 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002587 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2588
2589 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2590
2591 return old_timer_value;
2592}
jiad391c5282018-11-26 16:21:04 +08002593#else /* !QCA_IBSS_SUPPORT */
2594static inline int16_t
2595ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
2596 int16_t peer_num_delta)
2597{
2598 return 0;
2599}
2600
2601static inline uint16_t
2602ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2603 uint16_t timer_value_sec)
2604{
2605 return 0;
2606}
2607#endif /* QCA_IBSS_SUPPORT */
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002608
2609/**
2610 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
2611 * @vdev: vdev handle
2612 * @callback: callback function to remove the peer.
2613 * @callback_context: handle for callback function
2614 * @remove_last_peer: Does it required to last peer.
2615 *
2616 * Return: NONE
2617 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002618static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002619ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002620 ol_txrx_vdev_peer_remove_cb callback,
2621 void *callback_context, bool remove_last_peer)
2622{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002623 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002624 ol_txrx_peer_handle peer, temp;
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002625 int self_removed = 0;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002626 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002627 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002628
2629 temp = NULL;
2630 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
2631 peer_list_elem) {
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05302632 if (qdf_atomic_read(&peer->delete_in_progress))
2633 continue;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002634 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002635 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05302636 callback(callback_context, temp->mac_addr.raw,
Jiachao Wu641760e2018-01-21 12:11:31 +08002637 vdev->vdev_id, temp);
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002638 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002639 }
2640 /* self peer is deleted last */
2641 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002642 self_removed = 1;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002643 break;
Yun Parkeaea8632017-04-09 09:53:45 -07002644 }
2645 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002646 }
2647
Mohit Khanna137b97d2016-04-21 16:11:33 -07002648 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2649
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002650 if (self_removed)
Nirav Shah7c8c1712018-09-10 16:01:31 +05302651 ol_txrx_info("self peer removed by caller");
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002652
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002653 if (remove_last_peer) {
2654 /* remove IBSS bss peer last */
2655 peer = TAILQ_FIRST(&vdev->peer_list);
2656 callback(callback_context, (uint8_t *) &vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08002657 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002658 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002659}
2660
2661/**
2662 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
2663 * @vdev: vdev handle
2664 * @callback: callback function to remove the peer.
2665 * @callback_context: handle for callback function
2666 *
2667 * Return: NONE
2668 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002669static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002670ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002671 ol_txrx_vdev_peer_remove_cb callback,
2672 void *callback_context)
2673{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002674 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002675 ol_txrx_peer_handle peer = NULL;
Jiachao Wu641760e2018-01-21 12:11:31 +08002676 ol_txrx_peer_handle tmp_peer = NULL;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002677
Jiachao Wu641760e2018-01-21 12:11:31 +08002678 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, peer_list_elem, tmp_peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302679 ol_txrx_info_high(
Nirav Shah7c8c1712018-09-10 16:01:31 +05302680 "peer found for vdev id %d. deleting the peer",
2681 vdev->vdev_id);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002682 callback(callback_context, (uint8_t *)&vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08002683 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002684 }
2685}
2686
Nirav Shah575282c2018-07-08 22:48:00 +05302687#ifdef WLAN_FEATURE_DSRC
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002688/**
2689 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2690 * @vdev: vdev handle
2691 * @ocb_set_chan: OCB channel information to be set in vdev.
2692 *
2693 * Return: NONE
2694 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002695static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002696 struct ol_txrx_ocb_set_chan ocb_set_chan)
2697{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002698 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002699
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002700 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2701 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2702}
2703
2704/**
2705 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2706 * @vdev: vdev handle
2707 *
2708 * Return: handle to struct ol_txrx_ocb_chan_info
2709 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002710static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002711ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002712{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002713 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002714
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002715 return vdev->ocb_channel_info;
2716}
Nirav Shah575282c2018-07-08 22:48:00 +05302717#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002718
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002719/**
2720 * @brief specify the peer's authentication state
2721 * @details
2722 * Specify the peer's authentication state (none, connected, authenticated)
2723 * to allow the data SW to determine whether to filter out invalid data frames.
2724 * (In the "connected" state, where security is enabled, but authentication
2725 * has not completed, tx and rx data frames other than EAPOL or WAPI should
2726 * be discarded.)
2727 * This function is only relevant for systems in which the tx and rx filtering
2728 * are done in the host rather than in the target.
2729 *
2730 * @param data_peer - which peer has changed its state
2731 * @param state - the new state of the peer
2732 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002733 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002734 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002735QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002736 uint8_t *peer_mac,
2737 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002738{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002739 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002740 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002741 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002742
Anurag Chouhanc5548422016-02-24 18:33:27 +05302743 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302744 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05302745 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302746 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002747 }
2748
Mohit Khannab7bec722017-11-10 11:43:44 -08002749 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2750 PEER_DEBUG_ID_OL_INTERNAL);
Jeff Johnson6795c3a2019-03-18 13:43:04 -07002751 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302752 ol_txrx_err(
Nirav Shah7c8c1712018-09-10 16:01:31 +05302753 "peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302754 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2755 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302756 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002757 }
2758
2759 /* TODO: Should we send WMI command of the connection state? */
2760 /* avoid multiple auth state change. */
2761 if (peer->state == state) {
2762#ifdef TXRX_PRINT_VERBOSE_ENABLE
Nirav Shah7c8c1712018-09-10 16:01:31 +05302763 ol_txrx_dbg("no state change, returns directly");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002764#endif
Mohit Khannab7bec722017-11-10 11:43:44 -08002765 peer_ref_cnt = ol_txrx_peer_release_ref
2766 (peer,
2767 PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302768 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002769 }
2770
Nirav Shah7c8c1712018-09-10 16:01:31 +05302771 ol_txrx_dbg("change from %d to %d",
2772 peer->state, state);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002773
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002774 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002775 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002776 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002777 ? ol_tx_filter_non_auth
2778 : ol_tx_filter_discard);
2779
2780 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002781 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002782 int tid;
2783 /*
2784 * Pause all regular (non-extended) TID tx queues until
2785 * data arrives and ADDBA negotiation has completed.
2786 */
Nirav Shah7c8c1712018-09-10 16:01:31 +05302787 ol_txrx_dbg("pause peer and unpause mgmt/non-qos");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002788 ol_txrx_peer_pause(peer); /* pause all tx queues */
2789 /* unpause mgmt and non-QoS tx queues */
2790 for (tid = OL_TX_NUM_QOS_TIDS;
2791 tid < OL_TX_NUM_TIDS; tid++)
2792 ol_txrx_peer_tid_unpause(peer, tid);
2793 }
2794 }
Mohit Khannab7bec722017-11-10 11:43:44 -08002795 peer_ref_cnt = ol_txrx_peer_release_ref(peer,
2796 PEER_DEBUG_ID_OL_INTERNAL);
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002797 /*
Mohit Khannab7bec722017-11-10 11:43:44 -08002798 * after ol_txrx_peer_release_ref, peer object cannot be accessed
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002799 * if the return code was 0
2800 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08002801 if (peer_ref_cnt > 0)
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002802 /*
2803 * Set the state after the Pause to avoid the race condiction
2804 * with ADDBA check in tx path
2805 */
2806 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302807 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002808}
2809
2810void
2811ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
2812{
2813 peer->keyinstalled = val;
2814}
2815
2816void
2817ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
2818 uint8_t *peer_mac,
2819 union ol_txrx_peer_update_param_t *param,
2820 enum ol_txrx_peer_update_select_t select)
2821{
2822 struct ol_txrx_peer_t *peer;
2823
Mohit Khannab7bec722017-11-10 11:43:44 -08002824 peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
2825 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002826 if (!peer) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05302827 ol_txrx_dbg("peer is null");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002828 return;
2829 }
2830
2831 switch (select) {
2832 case ol_txrx_peer_update_qos_capable:
2833 {
2834 /* save qos_capable here txrx peer,
2835 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
2836 */
2837 peer->qos_capable = param->qos_capable;
2838 /*
2839 * The following function call assumes that the peer has a
2840 * single ID. This is currently true, and
2841 * is expected to remain true.
2842 */
2843 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
2844 peer->peer_ids[0],
2845 peer->qos_capable);
2846 break;
2847 }
2848 case ol_txrx_peer_update_uapsdMask:
2849 {
2850 peer->uapsd_mask = param->uapsd_mask;
2851 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
2852 peer->peer_ids[0],
2853 peer->uapsd_mask);
2854 break;
2855 }
2856 case ol_txrx_peer_update_peer_security:
2857 {
2858 enum ol_sec_type sec_type = param->sec_type;
2859 enum htt_sec_type peer_sec_type = htt_sec_type_none;
2860
2861 switch (sec_type) {
2862 case ol_sec_type_none:
2863 peer_sec_type = htt_sec_type_none;
2864 break;
2865 case ol_sec_type_wep128:
2866 peer_sec_type = htt_sec_type_wep128;
2867 break;
2868 case ol_sec_type_wep104:
2869 peer_sec_type = htt_sec_type_wep104;
2870 break;
2871 case ol_sec_type_wep40:
2872 peer_sec_type = htt_sec_type_wep40;
2873 break;
2874 case ol_sec_type_tkip:
2875 peer_sec_type = htt_sec_type_tkip;
2876 break;
2877 case ol_sec_type_tkip_nomic:
2878 peer_sec_type = htt_sec_type_tkip_nomic;
2879 break;
2880 case ol_sec_type_aes_ccmp:
2881 peer_sec_type = htt_sec_type_aes_ccmp;
2882 break;
2883 case ol_sec_type_wapi:
2884 peer_sec_type = htt_sec_type_wapi;
2885 break;
2886 default:
2887 peer_sec_type = htt_sec_type_none;
2888 break;
2889 }
2890
2891 peer->security[txrx_sec_ucast].sec_type =
2892 peer->security[txrx_sec_mcast].sec_type =
2893 peer_sec_type;
2894
2895 break;
2896 }
2897 default:
2898 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05302899 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002900 "ERROR: unknown param %d in %s", select,
2901 __func__);
2902 break;
2903 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002904 } /* switch */
Mohit Khannab7bec722017-11-10 11:43:44 -08002905 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002906}
2907
2908uint8_t
2909ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2910{
2911
2912 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07002913
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002914 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
2915 if (peer)
2916 return peer->uapsd_mask;
2917 return 0;
2918}
2919
2920uint8_t
2921ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2922{
2923
2924 struct ol_txrx_peer_t *peer_t =
2925 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
Jeff Johnson6795c3a2019-03-18 13:43:04 -07002926 if (peer_t)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002927 return peer_t->qos_capable;
2928 return 0;
2929}
2930
Mohit Khannab7bec722017-11-10 11:43:44 -08002931/**
Mohit Khannab7bec722017-11-10 11:43:44 -08002932 * ol_txrx_peer_free_tids() - free tids for the peer
2933 * @peer: peer handle
2934 *
2935 * Return: None
2936 */
2937static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
2938{
2939 int i = 0;
2940 /*
2941 * 'array' is allocated in addba handler and is supposed to be
2942 * freed in delba handler. There is the case (for example, in
2943 * SSR) where delba handler is not called. Because array points
2944 * to address of 'base' by default and is reallocated in addba
2945 * handler later, only free the memory when the array does not
2946 * point to base.
2947 */
2948 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
2949 if (peer->tids_rx_reorder[i].array !=
2950 &peer->tids_rx_reorder[i].base) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05302951 ol_txrx_dbg("delete reorder arr, tid:%d", i);
Mohit Khannab7bec722017-11-10 11:43:44 -08002952 qdf_mem_free(peer->tids_rx_reorder[i].array);
2953 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
2954 (uint8_t)i);
2955 }
2956 }
2957}
2958
2959/**
2960 * ol_txrx_peer_release_ref() - release peer reference
2961 * @peer: peer handle
2962 *
2963 * Release peer reference and delete peer if refcount is 0
2964 *
wadesong9f2b1102017-12-20 22:58:35 +08002965 * Return: Resulting peer ref_cnt after this function is invoked
Mohit Khannab7bec722017-11-10 11:43:44 -08002966 */
2967int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
2968 enum peer_debug_id_type debug_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002969{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002970 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002971 struct ol_txrx_vdev_t *vdev;
2972 struct ol_txrx_pdev_t *pdev;
Ajit Pal Singhbd3d3642019-02-25 14:25:21 +05302973 bool ref_silent = true;
Jingxiang Ge190679b2018-01-30 08:56:19 +08002974 int access_list = 0;
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002975 uint32_t err_code = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002976
2977 /* preconditions */
2978 TXRX_ASSERT2(peer);
2979
2980 vdev = peer->vdev;
Jeff Johnson6795c3a2019-03-18 13:43:04 -07002981 if (!vdev) {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002982 ol_txrx_err("The vdev is not present anymore\n");
Amar Singhal7ef59092018-09-11 15:32:35 -07002983 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002984 }
2985
2986 pdev = vdev->pdev;
Jeff Johnson6795c3a2019-03-18 13:43:04 -07002987 if (!pdev) {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002988 ol_txrx_err("The pdev is not present anymore\n");
2989 err_code = 0xbad2;
2990 goto ERR_STATE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002991 }
2992
Mohit Khannab7bec722017-11-10 11:43:44 -08002993 if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
2994 ol_txrx_err("incorrect debug_id %d ", debug_id);
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002995 err_code = 0xbad3;
2996 goto ERR_STATE;
Mohit Khannab7bec722017-11-10 11:43:44 -08002997 }
2998
Jingxiang Ge3badb982018-01-02 17:39:01 +08002999 if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
3000 ref_silent = true;
3001
3002 if (!ref_silent)
3003 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3004 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003005 peer, 0xdead,
Jingxiang Ge3badb982018-01-02 17:39:01 +08003006 qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003007
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003008
3009 /*
3010 * Hold the lock all the way from checking if the peer ref count
3011 * is zero until the peer references are removed from the hash
3012 * table and vdev list (if the peer ref count is zero).
3013 * This protects against a new HL tx operation starting to use the
3014 * peer object just after this function concludes it's done being used.
3015 * Furthermore, the lock needs to be held while checking whether the
3016 * vdev's list of peers is empty, to make sure that list is not modified
3017 * concurrently with the empty check.
3018 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303019 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003020
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003021 /*
3022 * Check for the reference count before deleting the peer
3023 * as we noticed that sometimes we are re-entering this
3024 * function again which is leading to dead-lock.
3025 * (A double-free should never happen, so assert if it does.)
3026 */
3027 rc = qdf_atomic_read(&(peer->ref_cnt));
3028
3029 if (rc == 0) {
3030 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3031 ol_txrx_err("The Peer is not present anymore\n");
3032 qdf_assert(0);
3033 return -EACCES;
3034 }
3035 /*
3036 * now decrement rc; this will be the return code.
3037 * 0 : peer deleted
3038 * >0: peer ref removed, but still has other references
3039 * <0: sanity failed - no changes to the state of the peer
3040 */
3041 rc--;
3042
Mohit Khannab7bec722017-11-10 11:43:44 -08003043 if (!qdf_atomic_read(&peer->access_list[debug_id])) {
3044 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
jitiphil8ad8a6f2018-03-01 23:45:05 +05303045 ol_txrx_err("peer %pK ref was not taken by %d",
Mohit Khannab7bec722017-11-10 11:43:44 -08003046 peer, debug_id);
3047 ol_txrx_dump_peer_access_list(peer);
3048 QDF_BUG(0);
3049 return -EACCES;
3050 }
Mohit Khannab7bec722017-11-10 11:43:44 -08003051 qdf_atomic_dec(&peer->access_list[debug_id]);
3052
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07003053 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Mohit Khannab7bec722017-11-10 11:43:44 -08003054 u16 peer_id;
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003055 wlan_roam_debug_log(vdev->vdev_id,
3056 DEBUG_DELETING_PEER_OBJ,
3057 DEBUG_INVALID_PEER_ID,
3058 &peer->mac_addr.raw, peer, 0,
3059 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003060 peer_id = peer->local_id;
3061 /* remove the reference to the peer from the hash table */
3062 ol_txrx_peer_find_hash_remove(pdev, peer);
3063
3064 /* remove the peer from its parent vdev's list */
3065 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3066
3067 /* cleanup the Rx reorder queues for this peer */
3068 ol_rx_peer_cleanup(vdev, peer);
3069
Jingxiang Ge3badb982018-01-02 17:39:01 +08003070 qdf_spinlock_destroy(&peer->peer_info_lock);
3071 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
3072
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003073 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303074 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003075
3076 /*
3077 * Set wait_delete_comp event if the current peer id matches
3078 * with registered peer id.
3079 */
3080 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303081 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003082 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3083 }
3084
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003085 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3086 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003087
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003088 /* check whether the parent vdev has no peers left */
3089 if (TAILQ_EMPTY(&vdev->peer_list)) {
3090 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003091 * Check if the parent vdev was waiting for its peers
3092 * to be deleted, in order for it to be deleted too.
3093 */
3094 if (vdev->delete.pending) {
3095 ol_txrx_vdev_delete_cb vdev_delete_cb =
3096 vdev->delete.callback;
3097 void *vdev_delete_context =
3098 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303099 /*
3100 * Now that there are no references to the peer,
3101 * we can release the peer reference lock.
3102 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303103 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303104
gbian016a42e2017-03-01 18:49:11 +08003105 /*
Yun Parkeaea8632017-04-09 09:53:45 -07003106 * The ol_tx_desc_free might access the invalid
3107 * content of vdev referred by tx desc, since
3108 * this vdev might be detached in another thread
3109 * asynchronous.
3110 *
3111 * Go through tx desc pool to set corresponding
3112 * tx desc's vdev to NULL when detach this vdev,
3113 * and add vdev checking in the ol_tx_desc_free
3114 * to avoid crash.
3115 */
gbian016a42e2017-03-01 18:49:11 +08003116 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05303117 ol_txrx_dbg(
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003118 "deleting vdev object %pK ("QDF_MAC_ADDR_STR") - its last peer is done",
Nirav Shah7c8c1712018-09-10 16:01:31 +05303119 vdev,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003120 QDF_MAC_ADDR_ARRAY(vdev->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003121 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303122 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003123 if (vdev_delete_cb)
3124 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303125 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303126 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003127 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303128 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303129 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303130 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003131
jitiphil8ad8a6f2018-03-01 23:45:05 +05303132 ol_txrx_info_high("[%d][%d]: Deleting peer %pK ref_cnt -> %d %s",
Mohit Khannab7bec722017-11-10 11:43:44 -08003133 debug_id,
3134 qdf_atomic_read(&peer->access_list[debug_id]),
3135 peer, rc,
3136 qdf_atomic_read(&peer->fw_create_pending)
3137 == 1 ?
3138 "(No Maps received)" : "");
Mohit Khanna8ee37c62017-08-07 17:15:20 -07003139
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303140 ol_txrx_peer_tx_queue_free(pdev, peer);
3141
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -07003142 /* Remove mappings from peer_id to peer object */
3143 ol_txrx_peer_clear_map_peer(pdev, peer);
3144
wadesong9f2b1102017-12-20 22:58:35 +08003145 /* Remove peer pointer from local peer ID map */
3146 ol_txrx_local_peer_id_free(pdev, peer);
3147
Mohit Khannab7bec722017-11-10 11:43:44 -08003148 ol_txrx_peer_free_tids(peer);
3149
3150 ol_txrx_dump_peer_access_list(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003151
Alok Kumar8df4c762018-12-04 18:06:29 +05303152 if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam() &&
3153 pdev->self_peer == peer)
3154 pdev->self_peer = NULL;
3155
Alok Kumar8e178242018-06-15 12:49:57 +05303156 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003157 } else {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003158 access_list = qdf_atomic_read(&peer->access_list[debug_id]);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303159 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Jingxiang Ge3badb982018-01-02 17:39:01 +08003160 if (!ref_silent)
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003161 ol_txrx_info_high("[%d][%d]: ref delete peer %pK ref_cnt -> %d",
3162 debug_id,
3163 access_list,
3164 peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003165 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003166 return rc;
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003167ERR_STATE:
3168 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3169 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3170 peer, err_code, qdf_atomic_read(&peer->ref_cnt));
3171 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003172}
3173
Dhanashri Atre12a08392016-02-17 13:10:34 -08003174/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003175 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3176 * @peer: pointer to ol txrx peer structure
3177 *
3178 * Return: QDF Status
3179 */
3180static QDF_STATUS
3181ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3182{
3183 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3184 /* Drop pending Rx frames in CDS */
3185 if (sched_ctx)
3186 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3187
3188 /* Purge the cached rx frame queue */
3189 ol_txrx_flush_rx_frames(peer, 1);
3190
3191 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003192 peer->state = OL_TXRX_PEER_STATE_DISC;
3193 qdf_spin_unlock_bh(&peer->peer_info_lock);
3194
3195 return QDF_STATUS_SUCCESS;
3196}
3197
3198/**
3199 * ol_txrx_clear_peer() - clear peer
Rakshith Suresh Patkar0dd44df2019-07-26 12:18:09 +05303200 * peer_addr: peer mac address
Mohit Khanna0696eef2016-04-14 16:14:08 -07003201 *
3202 * Return: QDF Status
3203 */
Rakshith Suresh Patkar0dd44df2019-07-26 12:18:09 +05303204static QDF_STATUS
3205ol_txrx_clear_peer(struct cdp_pdev *ppdev,
3206 struct qdf_mac_addr peer_addr)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003207{
3208 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003209 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Zhu Jianmin99523042018-06-06 20:01:44 +08003210 QDF_STATUS status;
Rakshith Suresh Patkar0dd44df2019-07-26 12:18:09 +05303211 /* peer_id to be removed */
3212 uint8_t peer_id;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003213
3214 if (!pdev) {
Zhu Jianmin99523042018-06-06 20:01:44 +08003215 ol_txrx_err("Unable to find pdev!");
Mohit Khanna0696eef2016-04-14 16:14:08 -07003216 return QDF_STATUS_E_FAILURE;
3217 }
3218
Rakshith Suresh Patkar0dd44df2019-07-26 12:18:09 +05303219 peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr.bytes, &peer_id,
3220 PEER_DEBUG_ID_OL_INTERNAL);
Kabilan Kannanfa163982018-01-30 12:03:41 -08003221
3222 /* Return success, if the peer is already cleared by
3223 * data path via peer detach function.
3224 */
Mohit Khanna0696eef2016-04-14 16:14:08 -07003225 if (!peer)
Kabilan Kannanfa163982018-01-30 12:03:41 -08003226 return QDF_STATUS_SUCCESS;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003227
Zhu Jianmin99523042018-06-06 20:01:44 +08003228 ol_txrx_dbg("Clear peer rx frames: " QDF_MAC_ADDR_STR,
3229 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
3230 ol_txrx_clear_peer_internal(peer);
3231 status = ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003232
Zhu Jianmin99523042018-06-06 20:01:44 +08003233 return status;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003234}
3235
3236/**
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003237 * peer_unmap_timer_handler() - peer unmap timer function
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003238 * @data: peer object pointer
3239 *
3240 * Return: none
3241 */
3242void peer_unmap_timer_handler(void *data)
3243{
3244 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
3245
Vulupala Shashank Reddy6d6f68d2019-08-20 17:45:22 +05303246 if (!peer)
3247 return;
3248
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003249 ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003250 peer, qdf_atomic_read(&peer->ref_cnt));
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003251 ol_txrx_err("peer %pK ("QDF_MAC_ADDR_STR")",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003252 peer,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003253 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
Yeshwanth Sriram Guntuka0bf1a722019-10-10 14:08:36 +05303254
3255 cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003256}
3257
3258
3259/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003260 * ol_txrx_peer_detach() - Delete a peer's data object.
3261 * @peer - the object to detach
Naveen Rawatf4ada152017-09-05 14:56:12 -07003262 * @bitmap - bitmap indicating special handling of request.
Dhanashri Atre12a08392016-02-17 13:10:34 -08003263 *
3264 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003265 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003266 * stored in the control peer object to the data peer
3267 * object (set up by a call to ol_peer_store()) is provided.
3268 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003269 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003270 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003271static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003272{
Leo Chang98726762016-10-28 11:07:18 -07003273 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003274 struct ol_txrx_vdev_t *vdev = peer->vdev;
3275
3276 /* redirect peer's rx delivery function to point to a discard func */
3277 peer->rx_opt_proc = ol_rx_discard;
3278
3279 peer->valid = 0;
3280
Mohit Khanna0696eef2016-04-14 16:14:08 -07003281 /* flush all rx packets before clearing up the peer local_id */
3282 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003283
3284 /* debug print to dump rx reorder state */
3285 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3286
Abhinav Kumar50d4dc72018-06-15 16:35:50 +05303287 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003288 "%s:peer %pK ("QDF_MAC_ADDR_STR")",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003289 __func__, peer,
Srinivas Girigowdacb7b8b82019-04-10 14:27:47 -07003290 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003291
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303292 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003293 if (vdev->last_real_peer == peer)
3294 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303295 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003296 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3297
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003298 /*
3299 * set delete_in_progress to identify that wma
3300 * is waiting for unmap massage for this peer
3301 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303302 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003303
Lin Bai973e6922018-01-08 17:59:19 +08003304 if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
Naveen Rawatf4ada152017-09-05 14:56:12 -07003305 if (vdev->opmode == wlan_op_mode_sta) {
3306 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3307 &peer->mac_addr,
3308 sizeof(union ol_txrx_align_mac_addr_t));
Abhishek Singh217d9782017-04-28 23:49:11 +05303309
Lin Bai973e6922018-01-08 17:59:19 +08003310 /*
3311 * Create a timer to track unmap events when the
3312 * sta peer gets deleted.
3313 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003314 qdf_timer_start(&peer->peer_unmap_timer,
3315 OL_TXRX_PEER_UNMAP_TIMEOUT);
Mohit Khannab7bec722017-11-10 11:43:44 -08003316 ol_txrx_info_high
3317 ("started peer_unmap_timer for peer %pK",
3318 peer);
Naveen Rawatf4ada152017-09-05 14:56:12 -07003319 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003320 }
3321
3322 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003323 * Remove the reference added during peer_attach.
3324 * The peer will still be left allocated until the
3325 * PEER_UNMAP message arrives to remove the other
3326 * reference, added by the PEER_MAP message.
3327 */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003328 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003329}
3330
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003331/**
3332 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
Lin Bai973e6922018-01-08 17:59:19 +08003333 * @ppeer - the object to detach
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003334 *
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003335 * Detach a peer and force peer object to be removed. It is called during
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003336 * roaming scenario when the firmware has already deleted a peer.
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003337 * Remove it from the peer_id_to_object map. Peer object is actually freed
3338 * when last reference is deleted.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003339 *
3340 * Return: None
3341 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003342static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003343{
Leo Chang98726762016-10-28 11:07:18 -07003344 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003345 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3346
Nirav Shah7c8c1712018-09-10 16:01:31 +05303347 ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
3348 peer, qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003349
3350 /* Clear the peer_id_to_obj map entries */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003351 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
Lin Bai973e6922018-01-08 17:59:19 +08003352 ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003353}
3354
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003355/**
Alok Kumare1977442018-11-28 17:16:03 +05303356 * ol_txrx_peer_detach_sync() - peer detach sync callback
3357 * @ppeer - the peer object
3358 * @peer_unmap_sync - peer unmap sync cb.
3359 * @bitmap - bitmap indicating special handling of request.
3360 *
Alok Kumare1977442018-11-28 17:16:03 +05303361 * Return: None
3362 */
3363static void ol_txrx_peer_detach_sync(void *ppeer,
3364 ol_txrx_peer_unmap_sync_cb peer_unmap_sync,
3365 uint32_t bitmap)
3366{
3367 ol_txrx_peer_handle peer = ppeer;
Alok Kumar604b0332019-01-24 17:49:25 +05303368 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
Alok Kumare1977442018-11-28 17:16:03 +05303369
3370 ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d", __func__,
3371 peer, qdf_atomic_read(&peer->ref_cnt));
3372
Alok Kumar604b0332019-01-24 17:49:25 +05303373 if (!pdev->peer_unmap_sync_cb)
3374 pdev->peer_unmap_sync_cb = peer_unmap_sync;
3375
Alok Kumare1977442018-11-28 17:16:03 +05303376 ol_txrx_peer_detach(peer, bitmap);
3377}
3378
3379/**
Alok Kumar688eadb2019-02-14 14:44:01 +05303380 * ol_txrx_peer_unmap_sync_cb_set() - set peer unmap sync callback
3381 * @ppdev - TXRX pdev context
3382 * @peer_unmap_sync - peer unmap sync callback
3383 *
3384 * Return: None
3385 */
3386static void ol_txrx_peer_unmap_sync_cb_set(
3387 struct cdp_pdev *ppdev,
3388 ol_txrx_peer_unmap_sync_cb peer_unmap_sync)
3389{
3390 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
3391
3392 if (!pdev->peer_unmap_sync_cb)
3393 pdev->peer_unmap_sync_cb = peer_unmap_sync;
3394}
3395
3396/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003397 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3398 * @txrx_pdev: Pointer to txrx pdev
3399 *
3400 * Return: none
3401 */
3402static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3403{
3404 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003405 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003406
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303407 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3408 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3409 else
3410 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003411
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003412 num_free = ol_tx_get_total_free_desc(pdev);
3413
Kapil Gupta53d9b572017-06-28 17:53:25 +05303414 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303415 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003416 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003417
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003418}
3419
3420/**
3421 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3422 * @timeout: timeout in ms
3423 *
3424 * Wait for tx queue to be empty, return timeout error if
3425 * queue doesn't empty before timeout occurs.
3426 *
3427 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303428 * QDF_STATUS_SUCCESS if the queue empties,
3429 * QDF_STATUS_E_TIMEOUT in case of timeout,
3430 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003431 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003432static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003433{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05303434 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
3435 struct ol_txrx_pdev_t *txrx_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003436
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05303437 if (qdf_unlikely(!soc)) {
3438 ol_txrx_err("soc is NULL");
3439 return QDF_STATUS_E_FAULT;
3440 }
3441
3442 txrx_pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Jeff Johnson6795c3a2019-03-18 13:43:04 -07003443 if (!txrx_pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303444 ol_txrx_err("txrx context is null");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303445 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003446 }
3447
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003448 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303449 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003450 if (timeout <= 0) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303451 ol_txrx_err("tx frames are pending");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003452 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303453 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003454 }
3455 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3456 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303457 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003458}
3459
3460#ifndef QCA_WIFI_3_0_EMU
Himanshu Agarwal83a87572017-05-25 14:09:50 +05303461#define SUSPEND_DRAIN_WAIT 500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003462#else
3463#define SUSPEND_DRAIN_WAIT 3000
3464#endif
3465
Yue Ma1e11d792016-02-26 18:58:44 -08003466#ifdef FEATURE_RUNTIME_PM
3467/**
3468 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3469 * @txrx_pdev: TXRX pdev context
3470 *
3471 * TXRX is ready to runtime suspend if there are no pending packets
3472 * in the tx queue.
3473 *
3474 * Return: QDF_STATUS
3475 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003476static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003477{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003478 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003479
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003480 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003481 return QDF_STATUS_E_BUSY;
3482 else
3483 return QDF_STATUS_SUCCESS;
3484}
3485
3486/**
3487 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3488 * @txrx_pdev: TXRX pdev context
3489 *
3490 * This is a dummy function for symmetry.
3491 *
3492 * Return: QDF_STATUS_SUCCESS
3493 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003494static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003495{
3496 return QDF_STATUS_SUCCESS;
3497}
3498#endif
3499
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003500/**
3501 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003502 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003503 *
3504 * Ensure that ol_txrx is ready for bus suspend
3505 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303506 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003507 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003508static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003509{
3510 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3511}
3512
3513/**
3514 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003515 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003516 *
3517 * Dummy function for symetry
3518 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303519 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003520 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003521static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003522{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303523 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003524}
3525
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003526/**
3527 * ol_txrx_get_tx_pending - Get the number of pending transmit
3528 * frames that are awaiting completion.
3529 *
3530 * @pdev - the data physical device object
3531 * Mainly used in clean up path to make sure all buffers have been freed
3532 *
3533 * Return: count of pending frames
3534 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003535int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003536{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003537 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003538 uint32_t total;
3539
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303540 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3541 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3542 else
3543 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003544
Nirav Shah55b45a02016-01-21 10:00:16 +05303545 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003546}
3547
3548void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3549{
3550 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07003551 /*
3552 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303553 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07003554 * which is tha same with normal data send complete path
3555 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003556 htt_tx_pending_discard(pdev_handle->htt_pdev);
3557
3558 TAILQ_INIT(&tx_descs);
3559 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3560 /* Discard Frames in Discard List */
3561 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3562
3563 ol_tx_discard_target_frms(pdev_handle);
3564}
3565
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003566static inline
3567uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3568{
3569 return (uint64_t) ((size_t) req);
3570}
3571
3572static inline
3573struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3574{
3575 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3576}
3577
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003578#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003579void
3580ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3581 uint8_t cfg_stats_type, uint32_t cfg_val)
3582{
jitiphil335d2412018-06-07 22:49:24 +05303583 uint8_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07003584
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003585 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3586 0 /* reset mask */,
3587 cfg_stats_type, cfg_val, dummy_cookie);
3588}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003589#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003590
jitiphil335d2412018-06-07 22:49:24 +05303591/**
3592 * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
3593 * @pdev: handle to ol txrx pdev
3594 * @pool_size: Size of fw stats descriptor pool
3595 *
3596 * Return: 0 for success, error code on failure.
3597 */
3598int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
3599 uint8_t pool_size)
3600{
3601 int i;
3602
3603 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303604 ol_txrx_err("pdev is NULL");
jitiphil335d2412018-06-07 22:49:24 +05303605 return -EINVAL;
3606 }
3607 pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
3608 sizeof(struct ol_txrx_fw_stats_desc_elem_t));
Nirav Shah7c8c1712018-09-10 16:01:31 +05303609 if (!pdev->ol_txrx_fw_stats_desc_pool.pool)
jitiphil335d2412018-06-07 22:49:24 +05303610 return -ENOMEM;
Nirav Shah7c8c1712018-09-10 16:01:31 +05303611
jitiphil335d2412018-06-07 22:49:24 +05303612 pdev->ol_txrx_fw_stats_desc_pool.freelist =
3613 &pdev->ol_txrx_fw_stats_desc_pool.pool[0];
3614 pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
3615
3616 for (i = 0; i < (pool_size - 1); i++) {
3617 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3618 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3619 pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
3620 &pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
3621 }
3622 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3623 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3624 pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
3625 qdf_spinlock_create(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3626 qdf_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
3627 qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
3628 return 0;
3629}
3630
3631/**
3632 * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
3633 * fw stats descriptor pool
3634 * @pdev: handle to ol txrx pdev
3635 *
3636 * Return: None
3637 */
3638void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
3639{
jitiphil335d2412018-06-07 22:49:24 +05303640 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303641 ol_txrx_err("pdev is NULL");
jitiphil335d2412018-06-07 22:49:24 +05303642 return;
3643 }
3644 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303645 ol_txrx_err("Pool is not initialized");
jitiphil335d2412018-06-07 22:49:24 +05303646 return;
3647 }
3648 if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303649 ol_txrx_err("Pool is not allocated");
jitiphil335d2412018-06-07 22:49:24 +05303650 return;
3651 }
3652 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3653 qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
jitiphil335d2412018-06-07 22:49:24 +05303654 qdf_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
3655 pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
3656
3657 pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
3658 pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
3659 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3660}
3661
3662/**
3663 * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
3664 * free descriptor pool
3665 * @pdev: handle to ol txrx pdev
3666 *
3667 * Return: pointer to fw stats descriptor, NULL on failure
3668 */
3669struct ol_txrx_fw_stats_desc_t
3670 *ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
3671{
3672 struct ol_txrx_fw_stats_desc_t *desc = NULL;
3673
3674 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3675 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3676 qdf_spin_unlock_bh(&pdev->
3677 ol_txrx_fw_stats_desc_pool.pool_lock);
Nirav Shah7c8c1712018-09-10 16:01:31 +05303678 ol_txrx_err("Pool deinitialized");
jitiphil335d2412018-06-07 22:49:24 +05303679 return NULL;
3680 }
3681 if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
3682 desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
3683 pdev->ol_txrx_fw_stats_desc_pool.freelist =
3684 pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
3685 }
3686 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3687
3688 if (desc)
Nirav Shah7c8c1712018-09-10 16:01:31 +05303689 ol_txrx_dbg("desc_id %d allocated", desc->desc_id);
jitiphil335d2412018-06-07 22:49:24 +05303690 else
Nirav Shah7c8c1712018-09-10 16:01:31 +05303691 ol_txrx_err("fw stats descriptors are exhausted");
jitiphil335d2412018-06-07 22:49:24 +05303692
3693 return desc;
3694}
3695
3696/**
3697 * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
3698 * back into free pool
3699 * @pdev: handle to ol txrx pdev
3700 * @fw_stats_desc: fw_stats_desc_get descriptor
3701 *
3702 * Return: pointer to request
3703 */
3704struct ol_txrx_stats_req_internal
3705 *ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
3706 unsigned char desc_id)
3707{
3708 struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
3709 struct ol_txrx_stats_req_internal *req;
3710
3711 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3712 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3713 qdf_spin_unlock_bh(&pdev->
3714 ol_txrx_fw_stats_desc_pool.pool_lock);
Nirav Shah7c8c1712018-09-10 16:01:31 +05303715 ol_txrx_err("Desc ID %u Pool deinitialized", desc_id);
jitiphil335d2412018-06-07 22:49:24 +05303716 return NULL;
3717 }
3718 desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
3719 req = desc_elem->desc.req;
3720 desc_elem->desc.req = NULL;
3721 desc_elem->next =
3722 pdev->ol_txrx_fw_stats_desc_pool.freelist;
3723 pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
3724 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3725 return req;
3726}
3727
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003728static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003729ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07003730 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003731{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003732 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003733 struct ol_txrx_pdev_t *pdev = vdev->pdev;
jitiphil335d2412018-06-07 22:49:24 +05303734 uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003735 struct ol_txrx_stats_req_internal *non_volatile_req;
jitiphil335d2412018-06-07 22:49:24 +05303736 struct ol_txrx_fw_stats_desc_t *desc = NULL;
3737 struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003738
3739 if (!pdev ||
3740 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
3741 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
3742 return A_ERROR;
3743 }
3744
3745 /*
3746 * Allocate a non-transient stats request object.
3747 * (The one provided as an argument is likely allocated on the stack.)
3748 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303749 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003750 if (!non_volatile_req)
3751 return A_NO_MEMORY;
3752
3753 /* copy the caller's specifications */
3754 non_volatile_req->base = *req;
3755 non_volatile_req->serviced = 0;
3756 non_volatile_req->offset = 0;
tfyu9fcabd72017-09-26 17:46:48 +08003757 if (response_expected) {
jitiphil335d2412018-06-07 22:49:24 +05303758 desc = ol_txrx_fw_stats_desc_alloc(pdev);
3759 if (!desc) {
3760 qdf_mem_free(non_volatile_req);
3761 return A_ERROR;
3762 }
3763
3764 /* use the desc id as the cookie */
3765 cookie = desc->desc_id;
3766 desc->req = non_volatile_req;
tfyu9fcabd72017-09-26 17:46:48 +08003767 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3768 TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
3769 pdev->req_list_depth++;
3770 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3771 }
3772
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003773 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
3774 req->stats_type_upload_mask,
3775 req->stats_type_reset_mask,
3776 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
3777 cookie)) {
tfyu9fcabd72017-09-26 17:46:48 +08003778 if (response_expected) {
3779 qdf_spin_lock_bh(&pdev->req_list_spinlock);
jitiphil335d2412018-06-07 22:49:24 +05303780 TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
3781 req_list_elem);
tfyu9fcabd72017-09-26 17:46:48 +08003782 pdev->req_list_depth--;
3783 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
jitiphil335d2412018-06-07 22:49:24 +05303784 if (desc) {
3785 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.
3786 pool_lock);
3787 desc->req = NULL;
3788 elem = container_of(desc,
3789 struct ol_txrx_fw_stats_desc_elem_t,
3790 desc);
3791 elem->next =
3792 pdev->ol_txrx_fw_stats_desc_pool.freelist;
3793 pdev->ol_txrx_fw_stats_desc_pool.freelist = elem;
3794 qdf_spin_unlock_bh(&pdev->
3795 ol_txrx_fw_stats_desc_pool.
3796 pool_lock);
3797 }
tfyu9fcabd72017-09-26 17:46:48 +08003798 }
3799
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303800 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003801 return A_ERROR;
3802 }
3803
Nirav Shahd2310422016-01-21 18:58:06 +05303804 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303805 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05303806
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003807 return A_OK;
3808}
Dhanashri Atre12a08392016-02-17 13:10:34 -08003809
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003810void
3811ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
jitiphil335d2412018-06-07 22:49:24 +05303812 uint8_t cookie, uint8_t *stats_info_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003813{
3814 enum htt_dbg_stats_type type;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003815 enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003816 enum htt_dbg_stats_status status;
3817 int length;
3818 uint8_t *stats_data;
tfyu9fcabd72017-09-26 17:46:48 +08003819 struct ol_txrx_stats_req_internal *req, *tmp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003820 int more = 0;
tfyu9fcabd72017-09-26 17:46:48 +08003821 int found = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003822
jitiphil335d2412018-06-07 22:49:24 +05303823 if (cookie >= FW_STATS_DESC_POOL_SIZE) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303824 ol_txrx_err("Cookie is not valid");
jitiphil335d2412018-06-07 22:49:24 +05303825 return;
3826 }
3827 req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
3828 if (!req) {
3829 ol_txrx_err("%s: Request not retrieved for cookie %u", __func__,
3830 (uint8_t)cookie);
3831 return;
3832 }
tfyu9fcabd72017-09-26 17:46:48 +08003833 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3834 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
3835 if (req == tmp) {
3836 found = 1;
3837 break;
3838 }
3839 }
3840 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3841
3842 if (!found) {
3843 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05303844 "req(%pK) from firmware can't be found in the list\n", req);
tfyu9fcabd72017-09-26 17:46:48 +08003845 return;
3846 }
3847
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003848 do {
3849 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
3850 &length, &stats_data);
3851 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
3852 break;
3853 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
3854 status == HTT_DBG_STATS_STATUS_PARTIAL) {
3855 uint8_t *buf;
3856 int bytes = 0;
3857
3858 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
3859 more = 1;
3860 if (req->base.print.verbose || req->base.print.concise)
3861 /* provide the header along with the data */
3862 htt_t2h_stats_print(stats_info_list,
3863 req->base.print.concise);
3864
3865 switch (type) {
3866 case HTT_DBG_STATS_WAL_PDEV_TXRX:
3867 bytes = sizeof(struct wlan_dbg_stats);
3868 if (req->base.copy.buf) {
3869 int lmt;
3870
3871 lmt = sizeof(struct wlan_dbg_stats);
3872 if (req->base.copy.byte_limit < lmt)
3873 lmt = req->base.copy.byte_limit;
3874 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303875 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003876 }
3877 break;
3878 case HTT_DBG_STATS_RX_REORDER:
3879 bytes = sizeof(struct rx_reorder_stats);
3880 if (req->base.copy.buf) {
3881 int lmt;
3882
3883 lmt = sizeof(struct rx_reorder_stats);
3884 if (req->base.copy.byte_limit < lmt)
3885 lmt = req->base.copy.byte_limit;
3886 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303887 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003888 }
3889 break;
3890 case HTT_DBG_STATS_RX_RATE_INFO:
3891 bytes = sizeof(wlan_dbg_rx_rate_info_t);
3892 if (req->base.copy.buf) {
3893 int lmt;
3894
3895 lmt = sizeof(wlan_dbg_rx_rate_info_t);
3896 if (req->base.copy.byte_limit < lmt)
3897 lmt = req->base.copy.byte_limit;
3898 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303899 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003900 }
3901 break;
3902
3903 case HTT_DBG_STATS_TX_RATE_INFO:
3904 bytes = sizeof(wlan_dbg_tx_rate_info_t);
3905 if (req->base.copy.buf) {
3906 int lmt;
3907
3908 lmt = sizeof(wlan_dbg_tx_rate_info_t);
3909 if (req->base.copy.byte_limit < lmt)
3910 lmt = req->base.copy.byte_limit;
3911 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303912 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003913 }
3914 break;
3915
3916 case HTT_DBG_STATS_TX_PPDU_LOG:
3917 bytes = 0;
3918 /* TO DO: specify how many bytes are present */
3919 /* TO DO: add copying to the requestor's buf */
3920
3921 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003922 bytes = sizeof(struct
3923 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003924 if (req->base.copy.buf) {
3925 int limit;
3926
Yun Parkeaea8632017-04-09 09:53:45 -07003927 limit = sizeof(struct
3928 rx_remote_buffer_mgmt_stats);
3929 if (req->base.copy.byte_limit < limit)
3930 limit = req->base.copy.
3931 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003932 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303933 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003934 }
3935 break;
3936
3937 case HTT_DBG_STATS_TXBF_INFO:
3938 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
3939 if (req->base.copy.buf) {
3940 int limit;
3941
Yun Parkeaea8632017-04-09 09:53:45 -07003942 limit = sizeof(struct
3943 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003944 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003945 limit = req->base.copy.
3946 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003947 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303948 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003949 }
3950 break;
3951
3952 case HTT_DBG_STATS_SND_INFO:
3953 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
3954 if (req->base.copy.buf) {
3955 int limit;
3956
Yun Parkeaea8632017-04-09 09:53:45 -07003957 limit = sizeof(struct
3958 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003959 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003960 limit = req->base.copy.
3961 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003962 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303963 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003964 }
3965 break;
3966
3967 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003968 bytes = sizeof(struct
3969 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003970 if (req->base.copy.buf) {
3971 int limit;
3972
Yun Parkeaea8632017-04-09 09:53:45 -07003973 limit = sizeof(struct
3974 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003975 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003976 limit = req->base.copy.
3977 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003978 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303979 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003980 }
3981 break;
3982
3983 case HTT_DBG_STATS_ERROR_INFO:
3984 bytes =
3985 sizeof(struct wlan_dbg_wifi2_error_stats);
3986 if (req->base.copy.buf) {
3987 int limit;
3988
Yun Parkeaea8632017-04-09 09:53:45 -07003989 limit = sizeof(struct
3990 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003991 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003992 limit = req->base.copy.
3993 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003994 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303995 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003996 }
3997 break;
3998
3999 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
4000 bytes =
4001 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
4002 if (req->base.copy.buf) {
4003 int limit;
4004
4005 limit = sizeof(struct
4006 rx_txbf_musu_ndpa_pkts_stats);
4007 if (req->base.copy.byte_limit < limit)
4008 limit =
4009 req->base.copy.byte_limit;
4010 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304011 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004012 }
4013 break;
4014
4015 default:
4016 break;
4017 }
Yun Parkeaea8632017-04-09 09:53:45 -07004018 buf = req->base.copy.buf ?
4019 req->base.copy.buf : stats_data;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004020
4021 /* Not implemented for MCL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004022 if (req->base.callback.fp)
4023 req->base.callback.fp(req->base.callback.ctxt,
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004024 cmn_type, buf, bytes);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004025 }
4026 stats_info_list += length;
4027 } while (1);
4028
4029 if (!more) {
tfyu9fcabd72017-09-26 17:46:48 +08004030 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4031 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4032 if (req == tmp) {
4033 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
4034 pdev->req_list_depth--;
4035 qdf_mem_free(req);
4036 break;
4037 }
4038 }
4039 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004040 }
4041}
4042
4043#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
4044int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
4045{
4046 if (debug_specs & TXRX_DBG_MASK_OBJS) {
4047#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4048 ol_txrx_pdev_display(vdev->pdev, 0);
4049#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304050 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304051 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004052#endif
4053 }
Yun Parkeaea8632017-04-09 09:53:45 -07004054 if (debug_specs & TXRX_DBG_MASK_STATS)
Mohit Khannaca4173b2017-09-12 21:52:19 -07004055 ol_txrx_stats_display(vdev->pdev,
4056 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004057 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
4058#if defined(ENABLE_TXRX_PROT_ANALYZE)
4059 ol_txrx_prot_ans_display(vdev->pdev);
4060#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304061 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304062 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004063#endif
4064 }
4065 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
4066#if defined(ENABLE_RX_REORDER_TRACE)
4067 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4068#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304069 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304070 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004071#endif
4072
4073 }
4074 return 0;
4075}
4076#endif
4077
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004078#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004079int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4080 int max_subfrms_ampdu, int max_subfrms_amsdu)
4081{
4082 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4083 max_subfrms_ampdu, max_subfrms_amsdu);
4084}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004085#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004086
4087#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4088void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4089{
4090 struct ol_txrx_vdev_t *vdev;
4091
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304092 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004093 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304094 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004095 "%*spdev object: %pK", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304096 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004097 "%*svdev list:", indent + 4, " ");
4098 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304099 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004100 }
4101 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304102 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004103 "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004104 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304105 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004106 htt_display(pdev->htt_pdev, indent);
4107}
4108
4109void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4110{
4111 struct ol_txrx_peer_t *peer;
4112
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304113 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004114 "%*stxrx vdev: %pK\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004116 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304117 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004118 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4119 indent + 4, " ",
4120 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4121 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4122 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304123 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004124 "%*speer list:", indent + 4, " ");
4125 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304126 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004127 }
4128}
4129
4130void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4131{
4132 int i;
4133
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304134 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004135 "%*stxrx peer: %pK", indent, " ", peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004136 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4137 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304138 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004139 "%*sID: %d", indent + 4, " ",
4140 peer->peer_ids[i]);
4141 }
4142 }
4143}
4144#endif /* TXRX_DEBUG_LEVEL */
4145
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004146/**
4147 * ol_txrx_stats() - update ol layer stats
4148 * @vdev_id: vdev_id
4149 * @buffer: pointer to buffer
4150 * @buf_len: length of the buffer
4151 *
4152 * Return: length of string
4153 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004154static int
Yun Parkeaea8632017-04-09 09:53:45 -07004155ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004156{
4157 uint32_t len = 0;
4158
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004159 struct ol_txrx_vdev_t *vdev =
4160 (struct ol_txrx_vdev_t *)
4161 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004162
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004163 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304164 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304165 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004166 snprintf(buffer, buf_len, "vdev not found");
4167 return len;
4168 }
4169
4170 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004171 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304172 ((vdev->ll_pause.is_q_paused == false) ?
4173 "UNPAUSED" : "PAUSED"),
4174 vdev->ll_pause.q_pause_cnt,
4175 vdev->ll_pause.q_unpause_cnt,
4176 vdev->ll_pause.q_overflow_cnt,
4177 ((vdev->ll_pause.is_q_timer_on == false)
4178 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004179 return len;
4180}
4181
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004182#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4183/**
4184 * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4185 * @peer: peer pointer
4186 *
4187 * Return: None
4188 */
4189static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4190{
Nirav Shahe6194ac2018-07-13 11:04:41 +05304191 txrx_nofl_info("cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4192 peer->bufq_info.curr,
4193 peer->bufq_info.dropped,
4194 peer->bufq_info.high_water_mark,
4195 peer->bufq_info.qdepth_no_thresh,
4196 peer->bufq_info.thresh);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004197}
4198
4199/**
4200 * ol_txrx_disp_peer_stats() - display peer stats
4201 * @pdev: pdev pointer
4202 *
4203 * Return: None
4204 */
4205static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4206{ int i;
4207 struct ol_txrx_peer_t *peer;
4208 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
4209
4210 if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4211 return;
4212
4213 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
Manjunathappa Prakasha4272ab2018-09-17 11:39:44 -07004214 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004215 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4216 peer = pdev->local_peer_ids.map[i];
Frank Liu4362e462018-01-16 11:51:55 +08004217 if (peer) {
Mohit Khannab7bec722017-11-10 11:43:44 -08004218 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +08004219 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004220 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Manjunathappa Prakasha4272ab2018-09-17 11:39:44 -07004221 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004222
4223 if (peer) {
Nirav Shahe6194ac2018-07-13 11:04:41 +05304224 txrx_nofl_info("stats: peer 0x%pK local peer id %d",
4225 peer, i);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004226 ol_txrx_disp_peer_cached_bufq_stats(peer);
Mohit Khannab7bec722017-11-10 11:43:44 -08004227 ol_txrx_peer_release_ref(peer,
4228 PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004229 }
4230 }
4231}
4232#else
4233static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4234{
Nirav Shahe6194ac2018-07-13 11:04:41 +05304235 txrx_nofl_info("peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004236}
4237#endif
4238
Mohit Khannaca4173b2017-09-12 21:52:19 -07004239void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4240 enum qdf_stats_verbosity_level level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004241{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004242 u64 tx_dropped =
4243 pdev->stats.pub.tx.dropped.download_fail.pkts
4244 + pdev->stats.pub.tx.dropped.target_discard.pkts
4245 + pdev->stats.pub.tx.dropped.no_ack.pkts
Chaoli Zhou74af4172019-08-12 14:54:17 +08004246 + pdev->stats.pub.tx.dropped.target_drop.pkts
Mohit Khannaca4173b2017-09-12 21:52:19 -07004247 + pdev->stats.pub.tx.dropped.others.pkts;
4248
4249 if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
Chaoli Zhou74af4172019-08-12 14:54:17 +08004250 txrx_nofl_dbg("STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
Nirav Shahe6194ac2018-07-13 11:04:41 +05304251 pdev->tx_desc.num_free,
4252 pdev->tx_desc.pool_size,
4253 pdev->stats.pub.tx.from_stack.pkts,
4254 pdev->stats.pub.tx.tso.tso_pkts.pkts,
4255 pdev->stats.pub.tx.delivered.pkts,
4256 htt_tx_status_download_fail,
4257 pdev->stats.pub.tx.dropped.download_fail.pkts,
4258 htt_tx_status_discard,
4259 pdev->stats.pub.tx.dropped.
4260 target_discard.pkts,
4261 htt_tx_status_no_ack,
4262 pdev->stats.pub.tx.dropped.no_ack.pkts,
Chaoli Zhou74af4172019-08-12 14:54:17 +08004263 htt_tx_status_drop,
4264 pdev->stats.pub.tx.dropped.target_drop.pkts,
Nirav Shahe6194ac2018-07-13 11:04:41 +05304265 pdev->stats.pub.tx.dropped.others.pkts,
4266 pdev->stats.pub.tx.dropped.host_reject.pkts,
4267 pdev->stats.pub.rx.delivered.pkts,
4268 pdev->stats.pub.rx.dropped_err.pkts,
4269 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4270 pdev->stats.pub.rx.dropped_mic_err.pkts,
4271 pdev->stats.pub.rx.intra_bss_fwd.
4272 packets_stack,
4273 pdev->stats.pub.rx.intra_bss_fwd.
4274 packets_fwd,
4275 pdev->stats.pub.rx.intra_bss_fwd.
4276 packets_stack_n_fwd);
Mohit Khannaca4173b2017-09-12 21:52:19 -07004277 return;
4278 }
4279
Nirav Shahe6194ac2018-07-13 11:04:41 +05304280 txrx_nofl_info("TX PATH Statistics:");
4281 txrx_nofl_info("sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4282 pdev->stats.pub.tx.from_stack.pkts,
4283 pdev->stats.pub.tx.from_stack.bytes,
4284 pdev->stats.pub.tx.dropped.host_reject.pkts,
4285 pdev->stats.pub.tx.dropped.host_reject.bytes,
4286 tx_dropped,
4287 pdev->stats.pub.tx.dropped.download_fail.bytes
4288 + pdev->stats.pub.tx.dropped.target_discard.bytes
Chaoli Zhou74af4172019-08-12 14:54:17 +08004289 + pdev->stats.pub.tx.dropped.target_drop.bytes
Nirav Shahe6194ac2018-07-13 11:04:41 +05304290 + pdev->stats.pub.tx.dropped.no_ack.bytes);
Chaoli Zhou74af4172019-08-12 14:54:17 +08004291 txrx_nofl_info("successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B),target drop: %lld (%lld B), others: %lld (%lld B)",
Nirav Shahe6194ac2018-07-13 11:04:41 +05304292 pdev->stats.pub.tx.delivered.pkts,
4293 pdev->stats.pub.tx.delivered.bytes,
4294 pdev->stats.pub.tx.dropped.download_fail.pkts,
4295 pdev->stats.pub.tx.dropped.download_fail.bytes,
4296 pdev->stats.pub.tx.dropped.target_discard.pkts,
4297 pdev->stats.pub.tx.dropped.target_discard.bytes,
4298 pdev->stats.pub.tx.dropped.no_ack.pkts,
4299 pdev->stats.pub.tx.dropped.no_ack.bytes,
Chaoli Zhou74af4172019-08-12 14:54:17 +08004300 pdev->stats.pub.tx.dropped.target_drop.pkts,
4301 pdev->stats.pub.tx.dropped.target_drop.bytes,
Nirav Shahe6194ac2018-07-13 11:04:41 +05304302 pdev->stats.pub.tx.dropped.others.pkts,
4303 pdev->stats.pub.tx.dropped.others.bytes);
4304 txrx_nofl_info("Tx completions per HTT message:\n"
4305 "Single Packet %d\n"
4306 " 2-10 Packets %d\n"
4307 "11-20 Packets %d\n"
4308 "21-30 Packets %d\n"
4309 "31-40 Packets %d\n"
4310 "41-50 Packets %d\n"
4311 "51-60 Packets %d\n"
4312 " 60+ Packets %d\n",
4313 pdev->stats.pub.tx.comp_histogram.pkts_1,
4314 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4315 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4316 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4317 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4318 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4319 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4320 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304321
Nirav Shahe6194ac2018-07-13 11:04:41 +05304322 txrx_nofl_info("RX PATH Statistics:");
4323 txrx_nofl_info("%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
4324 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4325 "msdus with frag_ind: %d msdus with offload_ind: %d",
4326 pdev->stats.priv.rx.normal.ppdus,
4327 pdev->stats.priv.rx.normal.mpdus,
4328 pdev->stats.pub.rx.delivered.pkts,
4329 pdev->stats.pub.rx.delivered.bytes,
4330 pdev->stats.pub.rx.dropped_err.pkts,
4331 pdev->stats.pub.rx.dropped_err.bytes,
4332 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4333 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4334 pdev->stats.pub.rx.dropped_mic_err.pkts,
4335 pdev->stats.pub.rx.dropped_mic_err.bytes,
4336 pdev->stats.pub.rx.msdus_with_frag_ind,
4337 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004338
Nirav Shahe6194ac2018-07-13 11:04:41 +05304339 txrx_nofl_info(" fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4340 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4341 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4342 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304343
Nirav Shahe6194ac2018-07-13 11:04:41 +05304344 txrx_nofl_info("packets per HTT message:\n"
4345 "Single Packet %d\n"
4346 " 2-10 Packets %d\n"
4347 "11-20 Packets %d\n"
4348 "21-30 Packets %d\n"
4349 "31-40 Packets %d\n"
4350 "41-50 Packets %d\n"
4351 "51-60 Packets %d\n"
4352 " 60+ Packets %d\n",
4353 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4354 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4355 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4356 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4357 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4358 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4359 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4360 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004361
4362 ol_txrx_disp_peer_stats(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004363}
4364
4365void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4366{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304367 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004368}
4369
4370#if defined(ENABLE_TXRX_PROT_ANALYZE)
4371
4372void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4373{
4374 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4375 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4376}
4377
4378#endif /* ENABLE_TXRX_PROT_ANALYZE */
4379
4380#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4381int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4382{
4383 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4384 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4385}
4386#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4387
4388#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4389A_STATUS
4390ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4391 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4392{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304393 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304394 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304395 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304396 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004397 return A_OK;
4398}
4399#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4400
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004401static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004402{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004403 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004404
Jeff Johnson6795c3a2019-03-18 13:43:04 -07004405 if (!vdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004406 return;
4407
4408 vdev->disable_intrabss_fwd = val;
4409}
4410
Nirav Shahc657ef52016-07-26 14:22:38 +05304411/**
4412 * ol_txrx_update_mac_id() - update mac_id for vdev
4413 * @vdev_id: vdev id
4414 * @mac_id: mac id
4415 *
4416 * Return: none
4417 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004418static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304419{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004420 struct ol_txrx_vdev_t *vdev =
4421 (struct ol_txrx_vdev_t *)
4422 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304423
Jeff Johnson6795c3a2019-03-18 13:43:04 -07004424 if (!vdev) {
Nirav Shahc657ef52016-07-26 14:22:38 +05304425 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4426 "%s: Invalid vdev_id %d", __func__, vdev_id);
4427 return;
4428 }
4429 vdev->mac_id = mac_id;
4430}
4431
Alok Kumar75355aa2018-03-19 17:32:58 +05304432/**
4433 * ol_txrx_get_tx_ack_count() - get tx ack count
Sravan Kumar Kairam53b43e12019-04-19 22:13:09 +05304434 * @pdev: pdev reference
Alok Kumar75355aa2018-03-19 17:32:58 +05304435 * @vdev_id: vdev_id
4436 *
4437 * Return: tx ack count
4438 */
Sravan Kumar Kairam53b43e12019-04-19 22:13:09 +05304439static uint32_t ol_txrx_get_tx_ack_stats(struct cdp_pdev *pdev,
4440 uint8_t vdev_id)
Alok Kumar75355aa2018-03-19 17:32:58 +05304441{
4442 struct ol_txrx_vdev_t *vdev =
4443 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
4444 if (!vdev) {
4445 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4446 "%s: Invalid vdev_id %d", __func__, vdev_id);
4447 return 0;
4448 }
4449 return vdev->txrx_stats.txack_success;
4450}
4451
Leo Chang8e073612015-11-13 10:55:34 -08004452/**
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004453 * ol_txrx_display_stats() - Display OL TXRX display stats
4454 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304455 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004456 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304457 */
Mohit Khannaca4173b2017-09-12 21:52:19 -07004458static QDF_STATUS
4459ol_txrx_display_stats(void *soc, uint16_t value,
4460 enum qdf_stats_verbosity_level verb_level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004461{
4462 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004463 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004464
Anurag Chouhan6d760662016-02-20 16:05:43 +05304465 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004466 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304467 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304468 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004469 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004470 }
4471
4472 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004473 case CDP_TXRX_PATH_STATS:
Mohit Khannaca4173b2017-09-12 21:52:19 -07004474 ol_txrx_stats_display(pdev, verb_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004475 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004476 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004477 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004478 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004479 case CDP_DUMP_TX_FLOW_POOL_INFO:
Nirav Shahaa34cbb2019-07-03 10:32:04 +05304480 if (verb_level == QDF_STATS_VERBOSITY_LEVEL_LOW)
4481 ol_tx_dump_flow_pool_info_compact((void *)pdev);
4482 else
4483 ol_tx_dump_flow_pool_info((void *)pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004484 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004485 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304486 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004487 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004488 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4489 htt_display_rx_buf_debug(pdev->htt_pdev);
4490 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304491#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004492 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304493 ol_tx_sched_cur_state_display(pdev);
4494 ol_tx_sched_stats_display(pdev);
4495 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004496 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304497 ol_tx_queue_log_display(pdev);
4498 break;
4499#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004500 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304501 ol_tx_dump_group_credit_stats(pdev);
4502 break;
4503#endif
4504
4505#ifdef DEBUG_HL_LOGGING
Nirav Shaheb017be2018-02-15 11:20:58 +05304506 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304507 htt_dump_bundle_stats(pdev->htt_pdev);
4508 break;
4509#endif
4510#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004511 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004512 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004513 break;
4514 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004515 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004516}
4517
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004518/**
4519 * ol_txrx_clear_stats() - Clear OL TXRX stats
Venkata Sharath Chandra Manchalacf572622019-07-29 11:50:57 -07004520 * @soc - ol soc handle
4521 * @value - Module id for which stats needs to be cleared
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004522 *
Venkata Sharath Chandra Manchalacf572622019-07-29 11:50:57 -07004523 * Return: 0 - success/ non-zero failure
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004524 */
Venkata Sharath Chandra Manchalacf572622019-07-29 11:50:57 -07004525static QDF_STATUS ol_txrx_clear_stats(struct cdp_soc *soc,
4526 uint8_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004527{
4528 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004529 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004530
Anurag Chouhan6d760662016-02-20 16:05:43 +05304531 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004532 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304533 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304534 "%s: pdev is NULL", __func__);
Venkata Sharath Chandra Manchalacf572622019-07-29 11:50:57 -07004535 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004536 }
4537
4538 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004539 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004540 ol_txrx_stats_clear(pdev);
4541 break;
Yun Park1027e8c2017-10-13 15:17:37 -07004542 case CDP_TXRX_TSO_STATS:
4543 ol_txrx_tso_stats_clear(pdev);
4544 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004545 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004546 ol_tx_clear_flow_pool_stats();
4547 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004548 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304549 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004550 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304551#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004552 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304553 ol_tx_sched_stats_clear(pdev);
4554 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004555 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304556 ol_tx_queue_log_clear(pdev);
4557 break;
4558#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004559 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304560 ol_tx_clear_group_credit_stats(pdev);
4561 break;
4562#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004563 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304564 htt_clear_bundle_stats(pdev->htt_pdev);
4565 break;
4566#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004567 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004568 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004569 break;
4570 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004571
Venkata Sharath Chandra Manchalacf572622019-07-29 11:50:57 -07004572 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004573}
4574
4575/**
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004576 * ol_txrx_drop_nbuf_list() - drop an nbuf list
4577 * @buf_list: buffer list to be dropepd
4578 *
4579 * Return: int (number of bufs dropped)
4580 */
4581static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
4582{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05304583 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
4584 ol_txrx_pdev_handle pdev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004585 int num_dropped = 0;
4586 qdf_nbuf_t buf, next_buf;
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05304587
4588 if (qdf_unlikely(!soc)) {
4589 ol_txrx_err("soc is NULL");
4590 return 0;
4591 }
4592
4593 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
4594 if (!pdev) {
4595 ol_txrx_err("pdev is NULL");
4596 return 0;
4597 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004598
4599 buf = buf_list;
4600 while (buf) {
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05304601 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004602 next_buf = qdf_nbuf_queue_next(buf);
4603 if (pdev)
4604 TXRX_STATS_MSDU_INCR(pdev,
4605 rx.dropped_peer_invalid, buf);
4606 qdf_nbuf_free(buf);
4607 buf = next_buf;
4608 num_dropped++;
4609 }
4610 return num_dropped;
4611}
4612
4613/**
Alok Kumarea3b23b2019-02-28 15:32:10 +05304614 * ol_rx_data_handler() - data rx handler
4615 * @pdev: dev handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004616 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304617 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004618 *
4619 * Return: None
4620 */
Alok Kumarea3b23b2019-02-28 15:32:10 +05304621static void ol_rx_data_handler(struct ol_txrx_pdev_t *pdev,
4622 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004623{
Mohit Khanna0696eef2016-04-14 16:14:08 -07004624 void *osif_dev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004625 uint8_t drop_count = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304626 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304627 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004628 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304629 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004630
Jeff Johnsondac9e382017-09-24 10:36:08 -07004631 if (qdf_unlikely(!pdev))
Nirav Shah36a87bf2016-02-22 12:38:46 +05304632 goto free_buf;
4633
4634 /* Do not use peer directly. Derive peer from staid to
4635 * make sure that peer is valid.
4636 */
Jingxiang Ge3badb982018-01-02 17:39:01 +08004637 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
4638 staid, PEER_DEBUG_ID_OL_RX_THREAD);
Nirav Shah36a87bf2016-02-22 12:38:46 +05304639 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004640 goto free_buf;
4641
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304642 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07004643 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
4644 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304645 qdf_spin_unlock_bh(&peer->peer_info_lock);
Jingxiang Ge9f297062018-01-24 13:31:31 +08004646 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004647 goto free_buf;
4648 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08004649
4650 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07004651 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304652 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004653
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004654 qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
4655 if (!list_empty(&peer->bufq_info.cached_bufq)) {
4656 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004657 /* Flush the cached frames to HDD before passing new rx frame */
4658 ol_txrx_flush_rx_frames(peer, 0);
4659 } else
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004660 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004661
Jingxiang Ge3badb982018-01-02 17:39:01 +08004662 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
4663
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004664 buf = buf_list;
4665 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304666 next_buf = qdf_nbuf_queue_next(buf);
4667 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07004668 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304669 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304670 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05304671 if (pdev)
4672 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304673 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004674 }
4675 buf = next_buf;
4676 }
4677 return;
4678
4679free_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004680 drop_count = ol_txrx_drop_nbuf_list(buf_list);
Nirav Shah7c8c1712018-09-10 16:01:31 +05304681 ol_txrx_warn("Dropped frames %u", drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004682}
4683
Alok Kumarea3b23b2019-02-28 15:32:10 +05304684/**
4685 * ol_rx_data_cb() - data rx callback
4686 * @context: dev handle
4687 * @buf_list: buffer list
4688 * @staid: Station id
4689 *
4690 * Return: None
4691 */
4692static inline void
4693ol_rx_data_cb(void *context, qdf_nbuf_t buf_list, uint16_t staid)
4694{
4695 struct ol_txrx_pdev_t *pdev = context;
4696
4697 ol_rx_data_handler(pdev, buf_list, staid);
4698}
4699
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004700/* print for every 16th packet */
4701#define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
4702struct ol_rx_cached_buf *cache_buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304703
4704/** helper function to drop packets
4705 * Note: caller must hold the cached buq lock before invoking
4706 * this function. Also, it assumes that the pointers passed in
4707 * are valid (non-NULL)
4708 */
4709static inline void ol_txrx_drop_frames(
4710 struct ol_txrx_cached_bufq_t *bufqi,
4711 qdf_nbuf_t rx_buf_list)
4712{
4713 uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004714
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304715 bufqi->dropped += dropped;
4716 bufqi->qdepth_no_thresh += dropped;
4717
4718 if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
4719 bufqi->high_water_mark = bufqi->qdepth_no_thresh;
4720}
4721
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004722static QDF_STATUS ol_txrx_enqueue_rx_frames(
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304723 struct ol_txrx_peer_t *peer,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004724 struct ol_txrx_cached_bufq_t *bufqi,
4725 qdf_nbuf_t rx_buf_list)
4726{
4727 struct ol_rx_cached_buf *cache_buf;
4728 qdf_nbuf_t buf, next_buf;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004729 static uint32_t count;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004730
4731 if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
4732 ol_txrx_info_high(
4733 "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
4734 bufqi->curr, bufqi->dropped);
4735
4736 qdf_spin_lock_bh(&bufqi->bufq_lock);
4737 if (bufqi->curr >= bufqi->thresh) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304738 ol_txrx_drop_frames(bufqi, rx_buf_list);
4739 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4740 return QDF_STATUS_E_FAULT;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004741 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004742 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4743
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004744 buf = rx_buf_list;
4745 while (buf) {
Sravan Kumar Kairamdd5a74a2019-01-11 17:32:49 +05304746 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004747 next_buf = qdf_nbuf_queue_next(buf);
4748 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
4749 if (!cache_buf) {
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004750 qdf_nbuf_free(buf);
4751 } else {
4752 /* Add NULL terminator */
4753 qdf_nbuf_set_next(buf, NULL);
4754 cache_buf->buf = buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304755 if (peer && peer->valid) {
4756 qdf_spin_lock_bh(&bufqi->bufq_lock);
4757 list_add_tail(&cache_buf->list,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004758 &bufqi->cached_bufq);
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304759 bufqi->curr++;
4760 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4761 } else {
4762 qdf_mem_free(cache_buf);
4763 rx_buf_list = buf;
4764 qdf_nbuf_set_next(rx_buf_list, next_buf);
4765 qdf_spin_lock_bh(&bufqi->bufq_lock);
4766 ol_txrx_drop_frames(bufqi, rx_buf_list);
4767 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4768 return QDF_STATUS_E_FAULT;
4769 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004770 }
4771 buf = next_buf;
4772 }
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304773 return QDF_STATUS_SUCCESS;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004774}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004775/**
4776 * ol_rx_data_process() - process rx frame
4777 * @peer: peer
4778 * @rx_buf_list: rx buffer list
4779 *
4780 * Return: None
4781 */
4782void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05304783 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004784{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05304785 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
4786 ol_txrx_pdev_handle pdev;
Yun Parkeaea8632017-04-09 09:53:45 -07004787 /*
4788 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004789 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07004790 * IPA kernel module API should not be called on SIRQ CTXT
4791 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08004792 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004793
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05304794 if (qdf_unlikely(!soc)) {
4795 ol_txrx_err("soc is NULL");
4796 goto drop_rx_buf;
4797 }
4798
4799 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004800 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304801 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004802 goto drop_rx_buf;
4803 }
4804
Dhanashri Atre182b0272016-02-17 15:35:07 -08004805 qdf_assert(peer->vdev);
4806
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304807 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004808 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08004809 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304810 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004811
4812 /*
4813 * If there is a data frame from peer before the peer is
4814 * registered for data service, enqueue them on to pending queue
4815 * which will be flushed to HDD once that station is registered.
4816 */
4817 if (!data_rx) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304818 if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
4819 rx_buf_list)
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004820 != QDF_STATUS_SUCCESS)
Poddar, Siddarth07eebf32017-04-19 12:40:26 +05304821 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4822 "%s: failed to enqueue rx frm to cached_bufq",
4823 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004824 } else {
4825#ifdef QCA_CONFIG_SMP
4826 /*
4827 * If the kernel is SMP, schedule rx thread to
4828 * better use multicores.
4829 */
4830 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Alok Kumarea3b23b2019-02-28 15:32:10 +05304831 ol_rx_data_handler(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004832 } else {
4833 p_cds_sched_context sched_ctx =
4834 get_cds_sched_ctxt();
4835 struct cds_ol_rx_pkt *pkt;
4836
4837 if (unlikely(!sched_ctx))
4838 goto drop_rx_buf;
4839
4840 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Alok Kumar3a6327d2018-08-06 17:28:25 +05304841 if (!pkt)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004842 goto drop_rx_buf;
Alok Kumar3a6327d2018-08-06 17:28:25 +05304843
Alok Kumarea3b23b2019-02-28 15:32:10 +05304844 pkt->callback = ol_rx_data_cb;
4845 pkt->context = pdev;
4846 pkt->Rxpkt = rx_buf_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004847 pkt->staId = peer->local_id;
4848 cds_indicate_rxpkt(sched_ctx, pkt);
4849 }
4850#else /* QCA_CONFIG_SMP */
Alok Kumarea3b23b2019-02-28 15:32:10 +05304851 ol_rx_data_handler(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004852#endif /* QCA_CONFIG_SMP */
4853 }
4854
4855 return;
4856
4857drop_rx_buf:
Alok Kumar3a6327d2018-08-06 17:28:25 +05304858 ol_txrx_drop_nbuf_list(rx_buf_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004859}
4860
4861/**
4862 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004863 * @sta_desc: sta descriptor
4864 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304865 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004866 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004867static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004868{
4869 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304870 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004871 union ol_txrx_peer_update_param_t param;
4872 struct privacy_exemption privacy_filter;
Rakshith Suresh Patkar55e08c02019-07-26 11:14:06 +05304873 uint8_t peer_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004874
4875 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304876 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304877 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004878 }
4879
Rakshith Suresh Patkar55e08c02019-07-26 11:14:06 +05304880 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
4881 sta_desc->peer_addr.bytes,
4882 &peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004883
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004884 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304885 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004886
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304887 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004888 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304889 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004890
4891 param.qos_capable = sta_desc->is_qos_enabled;
4892 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
4893 ol_txrx_peer_update_qos_capable);
4894
4895 if (sta_desc->is_wapi_supported) {
4896 /*Privacy filter to accept unencrypted WAI frames */
4897 privacy_filter.ether_type = ETHERTYPE_WAI;
4898 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
4899 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
4900 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
4901 }
4902
4903 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304904 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004905}
4906
4907/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004908 * ol_txrx_register_ocb_peer - Function to register the OCB peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004909 * @mac_addr: MAC address of the self peer
4910 * @peer_id: Pointer to the peer ID
4911 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304912 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004913 */
Jeff Johnson382bce02017-09-01 14:21:07 -07004914static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004915 uint8_t *peer_id)
4916{
4917 ol_txrx_pdev_handle pdev;
4918 ol_txrx_peer_handle peer;
4919
Anurag Chouhan6d760662016-02-20 16:05:43 +05304920 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004921 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05304922 ol_txrx_err("Unable to find pdev!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304923 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004924 }
4925
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004926 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
4927 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004928 if (!peer) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05304929 ol_txrx_err("Unable to find OCB peer!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304930 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004931 }
4932
4933 ol_txrx_set_ocb_peer(pdev, peer);
4934
4935 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004936 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004937 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004938
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304939 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004940}
4941
4942/**
4943 * ol_txrx_set_ocb_peer - Function to store the OCB peer
4944 * @pdev: Handle to the HTT instance
4945 * @peer: Pointer to the peer
4946 */
4947void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
4948 struct ol_txrx_peer_t *peer)
4949{
Jeff Johnson6795c3a2019-03-18 13:43:04 -07004950 if (!pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004951 return;
4952
4953 pdev->ocb_peer = peer;
4954 pdev->ocb_peer_valid = (NULL != peer);
4955}
4956
4957/**
4958 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
4959 * @pdev: Handle to the HTT instance
4960 * @peer: Pointer to the returned peer
4961 *
4962 * Return: true if the peer is valid, false if not
4963 */
4964bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
4965 struct ol_txrx_peer_t **peer)
4966{
4967 int rc;
4968
Jeff Johnson6795c3a2019-03-18 13:43:04 -07004969 if ((!pdev) || (!peer)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004970 rc = false;
4971 goto exit;
4972 }
4973
4974 if (pdev->ocb_peer_valid) {
4975 *peer = pdev->ocb_peer;
4976 rc = true;
4977 } else {
4978 rc = false;
4979 }
4980
4981exit:
4982 return rc;
4983}
4984
hangtian72704802019-04-17 18:16:25 +08004985/**
4986 * ol_txrx_register_pause_cb() - register pause callback
4987 * @pause_cb: pause callback
4988 *
4989 * Return: QDF status
4990 */
4991static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
4992 tx_pause_callback pause_cb)
4993{
4994 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4995
4996 if (!pdev || !pause_cb) {
4997 ol_txrx_err("pdev or pause_cb is NULL");
4998 return QDF_STATUS_E_INVAL;
4999 }
5000 pdev->pause_cb = pause_cb;
5001 return QDF_STATUS_SUCCESS;
5002}
5003
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005004#ifdef RECEIVE_OFFLOAD
5005/**
5006 * ol_txrx_offld_flush_handler() - offld flush handler
5007 * @context: dev handle
5008 * @rxpkt: rx data
5009 * @staid: station id
5010 *
5011 * This function handles an offld flush indication.
5012 * If the rx thread is enabled, it will be invoked by the rx
5013 * thread else it will be called in the tasklet context
5014 *
5015 * Return: none
5016 */
5017static void ol_txrx_offld_flush_handler(void *context,
Alok Kumarea3b23b2019-02-28 15:32:10 +05305018 qdf_nbuf_t rxpkt,
5019 uint16_t staid)
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005020{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305021 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5022 ol_txrx_pdev_handle pdev;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005023
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305024 if (qdf_unlikely(!soc)) {
5025 ol_txrx_err("Invalid soc context");
5026 qdf_assert(0);
5027 return;
5028 }
5029
5030 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005031 if (qdf_unlikely(!pdev)) {
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305032 ol_txrx_err("Invalid pdev context");
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005033 qdf_assert(0);
5034 return;
5035 }
5036
5037 if (pdev->offld_flush_cb)
5038 pdev->offld_flush_cb(context);
5039 else
5040 ol_txrx_err("offld_flush_cb NULL");
5041}
5042
5043/**
5044 * ol_txrx_offld_flush() - offld flush callback
5045 * @data: opaque data pointer
5046 *
5047 * This is the callback registered with CE to trigger
5048 * an offld flush
5049 *
5050 * Return: none
5051 */
5052static void ol_txrx_offld_flush(void *data)
5053{
5054 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
5055 struct cds_ol_rx_pkt *pkt;
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305056 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5057 ol_txrx_pdev_handle pdev;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005058
5059 if (qdf_unlikely(!sched_ctx))
5060 return;
5061
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305062 if (qdf_unlikely(!soc)) {
5063 ol_txrx_err("soc is NULL");
5064 return;
5065 }
5066
5067 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Amar Singhal4e855ad2018-09-04 12:19:00 -07005068 if (qdf_unlikely(!pdev)) {
5069 ol_txrx_err("TXRX module context is NULL");
5070 return;
5071 }
5072
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005073 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
5074 ol_txrx_offld_flush_handler(data, NULL, 0);
5075 } else {
5076 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Alok Kumar3a6327d2018-08-06 17:28:25 +05305077 if (qdf_unlikely(!pkt))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005078 return;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005079
5080 pkt->callback = ol_txrx_offld_flush_handler;
5081 pkt->context = data;
5082 pkt->Rxpkt = NULL;
5083 pkt->staId = 0;
5084 cds_indicate_rxpkt(sched_ctx, pkt);
5085 }
5086}
5087
5088/**
5089 * ol_register_offld_flush_cb() - register the offld flush callback
5090 * @offld_flush_cb: flush callback function
5091 * @offld_init_cb: Allocate and initialize offld data structure.
5092 *
5093 * Store the offld flush callback provided and in turn
5094 * register OL's offld flush handler with CE
5095 *
5096 * Return: none
5097 */
5098static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
5099{
5100 struct hif_opaque_softc *hif_device;
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305101 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5102 ol_txrx_pdev_handle pdev;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005103
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305104 if (qdf_unlikely(!soc)) {
5105 ol_txrx_err("soc NULL!");
5106 TXRX_ASSERT2(0);
5107 goto out;
5108 }
5109
5110 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005111 if (!pdev) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005112 ol_txrx_err("pdev NULL!");
5113 TXRX_ASSERT2(0);
5114 goto out;
5115 }
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005116 if (pdev->offld_flush_cb) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005117 ol_txrx_info("offld already initialised");
5118 if (pdev->offld_flush_cb != offld_flush_cb) {
5119 ol_txrx_err(
5120 "offld_flush_cb is differ to previously registered callback")
5121 TXRX_ASSERT2(0);
5122 goto out;
5123 }
5124 goto out;
5125 }
5126 pdev->offld_flush_cb = offld_flush_cb;
5127 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5128
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005129 if (qdf_unlikely(!hif_device)) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005130 ol_txrx_err("hif_device NULL!");
5131 qdf_assert(0);
5132 goto out;
5133 }
5134
5135 hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
5136
5137out:
5138 return;
5139}
5140
5141/**
5142 * ol_deregister_offld_flush_cb() - deregister the offld flush callback
5143 *
5144 * Remove the offld flush callback provided and in turn
5145 * deregister OL's offld flush handler with CE
5146 *
5147 * Return: none
5148 */
5149static void ol_deregister_offld_flush_cb(void)
5150{
5151 struct hif_opaque_softc *hif_device;
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305152 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5153 ol_txrx_pdev_handle pdev;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005154
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305155 if (qdf_unlikely(!soc)) {
5156 ol_txrx_err("soc is NULL");
5157 return;
5158 }
5159
5160 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005161 if (!pdev) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005162 ol_txrx_err("pdev NULL!");
5163 return;
5164 }
5165 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5166
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005167 if (qdf_unlikely(!hif_device)) {
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005168 ol_txrx_err("hif_device NULL!");
5169 qdf_assert(0);
5170 return;
5171 }
5172
5173 hif_offld_flush_cb_deregister(hif_device);
5174
5175 pdev->offld_flush_cb = NULL;
5176}
5177#endif /* RECEIVE_OFFLOAD */
5178
Poddar, Siddarth34872782017-08-10 14:08:51 +05305179/**
5180 * ol_register_data_stall_detect_cb() - register data stall callback
5181 * @data_stall_detect_callback: data stall callback function
5182 *
5183 *
5184 * Return: QDF_STATUS Enumeration
5185 */
5186static QDF_STATUS ol_register_data_stall_detect_cb(
5187 data_stall_detect_cb data_stall_detect_callback)
5188{
5189 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5190
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005191 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05305192 ol_txrx_err("pdev NULL!");
Poddar, Siddarth34872782017-08-10 14:08:51 +05305193 return QDF_STATUS_E_INVAL;
5194 }
5195 pdev->data_stall_detect_callback = data_stall_detect_callback;
5196 return QDF_STATUS_SUCCESS;
5197}
5198
5199/**
5200 * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5201 * @data_stall_detect_callback: data stall callback function
5202 *
5203 *
5204 * Return: QDF_STATUS Enumeration
5205 */
5206static QDF_STATUS ol_deregister_data_stall_detect_cb(
5207 data_stall_detect_cb data_stall_detect_callback)
5208{
5209 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5210
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005211 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05305212 ol_txrx_err("pdev NULL!");
Poddar, Siddarth34872782017-08-10 14:08:51 +05305213 return QDF_STATUS_E_INVAL;
5214 }
5215 pdev->data_stall_detect_callback = NULL;
5216 return QDF_STATUS_SUCCESS;
5217}
5218
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305219/**
5220 * ol_txrx_post_data_stall_event() - post data stall event
5221 * @indicator: Module triggering data stall
5222 * @data_stall_type: data stall event type
5223 * @pdev_id: pdev id
5224 * @vdev_id_bitmap: vdev id bitmap
5225 * @recovery_type: data stall recovery type
5226 *
5227 * Return: None
5228 */
5229static void ol_txrx_post_data_stall_event(
5230 enum data_stall_log_event_indicator indicator,
5231 enum data_stall_log_event_type data_stall_type,
5232 uint32_t pdev_id, uint32_t vdev_id_bitmap,
5233 enum data_stall_log_recovery_type recovery_type)
5234{
5235 struct scheduler_msg msg = {0};
5236 QDF_STATUS status;
5237 struct data_stall_event_info *data_stall_info;
5238 ol_txrx_pdev_handle pdev;
5239
5240 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5241 if (!pdev) {
5242 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5243 "%s: pdev is NULL.", __func__);
5244 return;
5245 }
5246 data_stall_info = qdf_mem_malloc(sizeof(*data_stall_info));
Nirav Shah7c8c1712018-09-10 16:01:31 +05305247 if (!data_stall_info)
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305248 return;
Nirav Shah7c8c1712018-09-10 16:01:31 +05305249
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305250 data_stall_info->indicator = indicator;
5251 data_stall_info->data_stall_type = data_stall_type;
5252 data_stall_info->vdev_id_bitmap = vdev_id_bitmap;
5253 data_stall_info->pdev_id = pdev_id;
5254 data_stall_info->recovery_type = recovery_type;
5255
Poddar, Siddarthb9047592017-10-05 15:48:28 +05305256 if (data_stall_info->data_stall_type ==
5257 DATA_STALL_LOG_FW_RX_REFILL_FAILED)
5258 htt_log_rx_ring_info(pdev->htt_pdev);
5259
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305260 sys_build_message_header(SYS_MSG_ID_DATA_STALL_MSG, &msg);
5261 /* Save callback and data */
5262 msg.callback = pdev->data_stall_detect_callback;
5263 msg.bodyptr = data_stall_info;
5264 msg.bodyval = 0;
5265
gaurank kathpalia9fb3f4b2018-08-28 20:19:48 +05305266 status = scheduler_post_message(QDF_MODULE_ID_TXRX,
5267 QDF_MODULE_ID_HDD,
5268 QDF_MODULE_ID_SYS, &msg);
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305269
Madhvapathi Sriram3e6627a2018-12-19 12:54:49 +05305270 if (status != QDF_STATUS_SUCCESS)
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305271 qdf_mem_free(data_stall_info);
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305272}
5273
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305274void
5275ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5276{
Nirav Shah7c8c1712018-09-10 16:01:31 +05305277 qdf_print(" Pkt: VA 0x%pK PA 0x%llx len %d\n",
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305278 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5279 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5280 qdf_nbuf_data(nbuf), len, true);
5281}
5282
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005283struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005284{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305285 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
Dhanashri Atre12a08392016-02-17 13:10:34 -08005286 ol_txrx_vdev_handle vdev = NULL;
5287
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305288 if (qdf_unlikely(!soc)) {
5289 ol_txrx_err("soc is NULL");
5290 return NULL;
5291 }
5292
5293 vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc, vdev_id);
5294
5295 return ol_txrx_vdev_t_to_cdp_vdev(vdev);
5296}
5297
5298struct ol_txrx_vdev_t *ol_txrx_get_vdev_from_soc_vdev_id(
5299 struct ol_txrx_soc_t *soc, uint8_t vdev_id)
5300{
5301 ol_txrx_pdev_handle pdev;
5302 ol_txrx_vdev_handle vdev = NULL;
5303
5304 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Dhanashri Atre12a08392016-02-17 13:10:34 -08005305 if (qdf_unlikely(!pdev))
5306 return NULL;
5307
5308 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5309 if (vdev->vdev_id == vdev_id)
5310 break;
5311 }
5312
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05305313 return vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005314}
Nirav Shah2e583a02016-04-30 14:06:12 +05305315
5316/**
chenguo2201c0a2018-11-15 18:07:41 +08005317 * ol_txrx_get_mon_vdev_from_pdev() - get monitor mode vdev from pdev
5318 * @ppdev: the physical device the virtual device belongs to
5319 *
5320 * Return: vdev handle
5321 * NULL if not found.
5322 */
5323struct cdp_vdev *ol_txrx_get_mon_vdev_from_pdev(struct cdp_pdev *ppdev)
5324{
5325 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
5326
5327 if (qdf_unlikely(!pdev))
5328 return NULL;
5329
5330 return (struct cdp_vdev *)pdev->monitor_vdev;
5331}
5332
5333/**
Nirav Shah2e583a02016-04-30 14:06:12 +05305334 * ol_txrx_set_wisa_mode() - set wisa mode
5335 * @vdev: vdev handle
5336 * @enable: enable flag
5337 *
5338 * Return: QDF STATUS
5339 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005340static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305341{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005342 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005343
Nirav Shah2e583a02016-04-30 14:06:12 +05305344 if (!vdev)
5345 return QDF_STATUS_E_INVAL;
5346
5347 vdev->is_wisa_mode_enable = enable;
5348 return QDF_STATUS_SUCCESS;
5349}
Leo Chang98726762016-10-28 11:07:18 -07005350
5351/**
5352 * ol_txrx_get_vdev_id() - get interface id from interface context
5353 * @pvdev: vdev handle
5354 *
5355 * Return: virtual interface id
5356 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005357static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005358{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005359 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005360
Leo Chang98726762016-10-28 11:07:18 -07005361 return vdev->vdev_id;
5362}
5363
5364/**
Leo Chang98726762016-10-28 11:07:18 -07005365 * ol_txrx_soc_attach_target() - attach soc target
5366 * @soc: soc handle
5367 *
5368 * MCL legacy OL do nothing here
5369 *
5370 * Return: 0
5371 */
Venkata Sharath Chandra Manchala598f5032018-09-05 18:55:43 -07005372static QDF_STATUS ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005373{
5374 /* MCL legacy OL do nothing here */
Venkata Sharath Chandra Manchala598f5032018-09-05 18:55:43 -07005375 return QDF_STATUS_SUCCESS;
Leo Chang98726762016-10-28 11:07:18 -07005376}
5377
5378/**
5379 * ol_txrx_soc_detach() - detach soc target
5380 * @soc: soc handle
5381 *
5382 * MCL legacy OL do nothing here
5383 *
5384 * Return: noe
5385 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005386static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005387{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005388 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005389}
5390
5391/**
5392 * ol_txrx_pkt_log_con_service() - connect packet log service
5393 * @ppdev: physical device handle
5394 * @scn: device context
5395 *
5396 * Return: noe
5397 */
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305398#ifdef REMOVE_PKT_LOG
5399static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
5400{
5401}
5402#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005403static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005404{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005405 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005406
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005407 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005408 pktlog_htc_attach();
5409}
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305410#endif
Leo Chang98726762016-10-28 11:07:18 -07005411
5412/* OL wrapper functions for CDP abstraction */
5413/**
5414 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5415 * @peer: peer handle
5416 * @drop: rx packets drop or deliver
5417 *
5418 * Return: none
5419 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005420static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005421{
5422 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5423}
5424
5425/**
5426 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5427 * @ppdev: pdev handle
5428 * @vdev_id: interface id
5429 *
5430 * Return: virtual interface instance
5431 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005432static
5433struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5434 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005435{
5436 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5437}
5438
5439/**
Rakshith Suresh Patkar02f3d312019-06-07 17:11:31 +05305440 * ol_txrx_pdev_set_ctrl_pdev() - set ctrl pdev handle in txrx pdev
5441 * @txrx_pdev: txrx pdev handle
5442 * @ctrl_pdev: UMAC ctrl pdev handle
5443 *
5444 * Return: void
5445 */
5446static void
5447ol_txrx_pdev_set_ctrl_pdev(struct cdp_pdev *txrx_pdev,
5448 struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
5449{
5450 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)txrx_pdev;
5451
5452 pdev->control_pdev = ctrl_pdev;
5453}
5454
5455/**
Leo Chang98726762016-10-28 11:07:18 -07005456 * ol_txrx_wrapper_register_peer() - register peer
5457 * @pdev: pdev handle
5458 * @sta_desc: peer description
5459 *
5460 * Return: QDF STATUS
5461 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005462static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005463 struct ol_txrx_desc_type *sta_desc)
5464{
5465 return ol_txrx_register_peer(sta_desc);
5466}
5467
5468/**
5469 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5470 * @pdev - the data physical device object
5471 * @local_peer_id - the ID txrx assigned locally to the peer in question
5472 *
5473 * The control SW typically uses the txrx peer handle to refer to the peer.
5474 * In unusual circumstances, if it is infeasible for the control SW maintain
5475 * the txrx peer handle but it can maintain a small integer local peer ID,
5476 * this function allows the peer handled to be retrieved, based on the local
5477 * peer ID.
5478 *
5479 * @return handle to the txrx peer object
5480 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005481static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005482ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5483 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005484{
5485 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005486 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005487}
5488
5489/**
5490 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5491 * @pdev: pdev handle
5492 *
5493 * Return: 1 high latency bus
5494 * 0 low latency bus
5495 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005496static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005497{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005498 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005499}
5500
5501/**
5502 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5503 * @data_peer - which peer has changed its state
5504 * @state - the new state of the peer
5505 *
5506 * Specify the peer's authentication state (none, connected, authenticated)
5507 * to allow the data SW to determine whether to filter out invalid data frames.
5508 * (In the "connected" state, where security is enabled, but authentication
5509 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5510 * be discarded.)
5511 * This function is only relevant for systems in which the tx and rx filtering
5512 * are done in the host rather than in the target.
5513 *
5514 * Return: QDF Status
5515 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005516static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005517 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5518{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005519 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005520 peer_mac, state);
5521}
5522
5523/**
5524 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5525 * @pdev: pdev handle
Jeff Johnson37df7c32018-05-10 12:30:35 -07005526 * @peer_addr: peer address want to find
Leo Chang98726762016-10-28 11:07:18 -07005527 * @peer_id: peer id
5528 *
5529 * Return: peer instance pointer
5530 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005531static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005532 uint8_t *peer_addr, uint8_t *peer_id)
5533{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005534 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005535 peer_addr, peer_id);
5536}
5537
5538/**
Mohit Khannab7bec722017-11-10 11:43:44 -08005539 * ol_txrx_wrapper_peer_get_ref_by_addr() - get peer reference by address
5540 * @pdev: pdev handle
5541 * @peer_addr: peer address we want to find
5542 * @peer_id: peer id
5543 * @debug_id: peer debug id for tracking
5544 *
5545 * Return: peer instance pointer
5546 */
5547static void *
5548ol_txrx_wrapper_peer_get_ref_by_addr(struct cdp_pdev *pdev,
5549 u8 *peer_addr, uint8_t *peer_id,
5550 enum peer_debug_id_type debug_id)
5551{
5552 return ol_txrx_peer_get_ref_by_addr((ol_txrx_pdev_handle)pdev,
5553 peer_addr, peer_id, debug_id);
5554}
5555
5556/**
5557 * ol_txrx_wrapper_peer_release_ref() - release peer reference
5558 * @peer: peer handle
5559 * @debug_id: peer debug id for tracking
5560 *
5561 * Release peer ref acquired by peer get ref api
5562 *
5563 * Return: void
5564 */
5565static void ol_txrx_wrapper_peer_release_ref(void *peer,
5566 enum peer_debug_id_type debug_id)
5567{
5568 ol_txrx_peer_release_ref(peer, debug_id);
5569}
5570
5571/**
Leo Chang98726762016-10-28 11:07:18 -07005572 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5573 * @cfg_ctx: cfg context
5574 * @cfg_param: cfg parameters
5575 *
5576 * Return: none
5577 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08005578static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005579ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5580 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07005581{
5582 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005583 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005584 (struct txrx_pdev_cfg_param_t *)cfg_param);
5585}
5586
jitiphil377bcc12018-10-05 19:46:08 +05305587/**
5588 * ol_txrx_get_cfg() - get ini/cgf values in legacy dp
5589 * @soc: soc context
5590 * @cfg_param: cfg parameters
5591 *
5592 * Return: none
5593 */
5594static uint32_t ol_txrx_get_cfg(void *soc, enum cdp_dp_cfg cfg)
5595{
5596 struct txrx_pdev_cfg_t *cfg_ctx;
5597 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5598 uint32_t value = 0;
5599
Vulupala Shashank Reddy4bf9be62019-06-03 12:33:00 +05305600 if (!pdev) {
5601 qdf_print("pdev is NULL");
5602 return 0;
5603 }
5604
jitiphil377bcc12018-10-05 19:46:08 +05305605 cfg_ctx = (struct txrx_pdev_cfg_t *)(pdev->ctrl_pdev);
5606 switch (cfg) {
5607 case cfg_dp_enable_data_stall:
5608 value = cfg_ctx->enable_data_stall_detection;
5609 break;
5610 case cfg_dp_enable_ip_tcp_udp_checksum_offload:
5611 value = cfg_ctx->ip_tcp_udp_checksum_offload;
5612 break;
5613 case cfg_dp_tso_enable:
5614 value = cfg_ctx->tso_enable;
5615 break;
5616 case cfg_dp_lro_enable:
5617 value = cfg_ctx->lro_enable;
5618 break;
5619 case cfg_dp_gro_enable:
5620 value = cfg_ctx->gro_enable;
5621 break;
5622#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5623 case cfg_dp_tx_flow_start_queue_offset:
5624 value = cfg_ctx->tx_flow_start_queue_offset;
5625 break;
5626 case cfg_dp_tx_flow_stop_queue_threshold:
5627 value = cfg_ctx->tx_flow_stop_queue_th;
5628 break;
5629#endif
5630 case cfg_dp_ipa_uc_tx_buf_size:
5631 value = cfg_ctx->uc_tx_buffer_size;
5632 break;
5633 case cfg_dp_ipa_uc_tx_partition_base:
5634 value = cfg_ctx->uc_tx_partition_base;
5635 break;
5636 case cfg_dp_ipa_uc_rx_ind_ring_count:
5637 value = cfg_ctx->uc_rx_indication_ring_count;
5638 break;
5639 case cfg_dp_enable_flow_steering:
5640 value = cfg_ctx->enable_flow_steering;
5641 break;
5642 case cfg_dp_reorder_offload_supported:
5643 value = cfg_ctx->is_full_reorder_offload;
5644 break;
5645 case cfg_dp_ce_classify_enable:
5646 value = cfg_ctx->ce_classify_enabled;
5647 break;
5648 case cfg_dp_disable_intra_bss_fwd:
5649 value = cfg_ctx->disable_intra_bss_fwd;
5650 break;
5651 default:
5652 value = 0;
5653 break;
5654 }
5655
5656 return value;
5657}
5658
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005659#ifdef WDI_EVENT_ENABLE
5660void *ol_get_pldev(struct cdp_pdev *txrx_pdev)
5661{
5662 struct ol_txrx_pdev_t *pdev =
5663 (struct ol_txrx_pdev_t *)txrx_pdev;
Jeff Johnson6795c3a2019-03-18 13:43:04 -07005664 if (pdev)
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005665 return pdev->pl_dev;
5666
5667 return NULL;
5668}
5669#endif
5670
Lin Bai1a73a412018-12-13 16:40:14 +08005671/**
5672 * ol_register_packetdump_callback() - registers
5673 * tx data packet, tx mgmt. packet and rx data packet
5674 * dump callback handler.
5675 *
5676 * @ol_tx_packetdump_cb: tx packetdump cb
5677 * @ol_rx_packetdump_cb: rx packetdump cb
5678 *
5679 * This function is used to register tx data pkt, tx mgmt.
5680 * pkt and rx data pkt dump callback
5681 *
5682 * Return: None
5683 *
5684 */
5685static inline
5686void ol_register_packetdump_callback(ol_txrx_pktdump_cb ol_tx_packetdump_cb,
5687 ol_txrx_pktdump_cb ol_rx_packetdump_cb)
5688{
5689 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5690
5691 if (!pdev) {
5692 ol_txrx_err("pdev is NULL");
5693 return;
5694 }
5695
5696 pdev->ol_tx_packetdump_cb = ol_tx_packetdump_cb;
5697 pdev->ol_rx_packetdump_cb = ol_rx_packetdump_cb;
5698}
5699
5700/**
5701 * ol_deregister_packetdump_callback() - deregidters
5702 * tx data packet, tx mgmt. packet and rx data packet
5703 * dump callback handler
5704 *
5705 * This function is used to deregidter tx data pkt.,
5706 * tx mgmt. pkt and rx data pkt. dump callback
5707 *
5708 * Return: None
5709 *
5710 */
5711static inline
5712void ol_deregister_packetdump_callback(void)
5713{
5714 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5715
5716 if (!pdev) {
5717 ol_txrx_err("pdev is NULL");
5718 return;
5719 }
5720
5721 pdev->ol_tx_packetdump_cb = NULL;
5722 pdev->ol_rx_packetdump_cb = NULL;
5723}
5724
Leo Chang98726762016-10-28 11:07:18 -07005725static struct cdp_cmn_ops ol_ops_cmn = {
5726 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
5727 .txrx_vdev_attach = ol_txrx_vdev_attach,
5728 .txrx_vdev_detach = ol_txrx_vdev_detach,
5729 .txrx_pdev_attach = ol_txrx_pdev_attach,
5730 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
5731 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05305732 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07005733 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08005734 .txrx_peer_create = ol_txrx_peer_attach,
5735 .txrx_peer_setup = NULL,
5736 .txrx_peer_teardown = NULL,
5737 .txrx_peer_delete = ol_txrx_peer_detach,
Alok Kumare1977442018-11-28 17:16:03 +05305738 .txrx_peer_delete_sync = ol_txrx_peer_detach_sync,
Leo Chang98726762016-10-28 11:07:18 -07005739 .txrx_vdev_register = ol_txrx_vdev_register,
5740 .txrx_soc_detach = ol_txrx_soc_detach,
5741 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
5742 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
5743 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
chenguo2201c0a2018-11-15 18:07:41 +08005744 .txrx_get_mon_vdev_from_pdev = ol_txrx_get_mon_vdev_from_pdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08005745 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07005746 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
5747 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
Alok Kumar688eadb2019-02-14 14:44:01 +05305748 .txrx_peer_unmap_sync_cb_set = ol_txrx_peer_unmap_sync_cb_set,
Leo Chang98726762016-10-28 11:07:18 -07005749 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07005750 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005751 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
5752 .display_stats = ol_txrx_display_stats,
jitiphil377bcc12018-10-05 19:46:08 +05305753 .txrx_get_cfg = ol_txrx_get_cfg,
Rakshith Suresh Patkar02f3d312019-06-07 17:11:31 +05305754 .txrx_pdev_set_ctrl_pdev = ol_txrx_pdev_set_ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005755 /* TODO: Add other functions */
5756};
5757
5758static struct cdp_misc_ops ol_ops_misc = {
5759 .set_ibss_vdev_heart_beat_timer =
5760 ol_txrx_set_ibss_vdev_heart_beat_timer,
5761#ifdef CONFIG_HL_SUPPORT
5762 .set_wmm_param = ol_txrx_set_wmm_param,
5763#endif /* CONFIG_HL_SUPPORT */
5764 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
5765 .bad_peer_txctl_update_threshold =
5766 ol_txrx_bad_peer_txctl_update_threshold,
5767 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
5768 .tx_non_std = ol_tx_non_std,
5769 .get_vdev_id = ol_txrx_get_vdev_id,
Alok Kumar75355aa2018-03-19 17:32:58 +05305770 .get_tx_ack_stats = ol_txrx_get_tx_ack_stats,
Leo Chang98726762016-10-28 11:07:18 -07005771 .set_wisa_mode = ol_txrx_set_wisa_mode,
Poddar, Siddarth34872782017-08-10 14:08:51 +05305772 .txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
5773 .txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305774 .txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
Leo Chang98726762016-10-28 11:07:18 -07005775#ifdef FEATURE_RUNTIME_PM
5776 .runtime_suspend = ol_txrx_runtime_suspend,
5777 .runtime_resume = ol_txrx_runtime_resume,
5778#endif /* FEATURE_RUNTIME_PM */
5779 .get_opmode = ol_txrx_get_opmode,
5780 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
5781 .update_mac_id = ol_txrx_update_mac_id,
5782 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
5783 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
5784 .pkt_log_init = htt_pkt_log_init,
Lin Bai1a73a412018-12-13 16:40:14 +08005785 .pkt_log_con_service = ol_txrx_pkt_log_con_service,
5786 .register_pktdump_cb = ol_register_packetdump_callback,
Tiger Yue40e7832019-04-25 10:46:53 +08005787 .unregister_pktdump_cb = ol_deregister_packetdump_callback,
5788#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
5789 .pdev_reset_driver_del_ack = ol_tx_pdev_reset_driver_del_ack,
5790 .vdev_set_driver_del_ack_enable = ol_tx_vdev_set_driver_del_ack_enable
5791#endif
Leo Chang98726762016-10-28 11:07:18 -07005792};
5793
5794static struct cdp_flowctl_ops ol_ops_flowctl = {
Leo Chang98726762016-10-28 11:07:18 -07005795 .register_pause_cb = ol_txrx_register_pause_cb,
hangtian72704802019-04-17 18:16:25 +08005796#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Leo Chang98726762016-10-28 11:07:18 -07005797 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005798 .dump_flow_pool_info = ol_tx_dump_flow_pool_info,
Sravan Kumar Kairam8433f902019-01-10 15:53:54 +05305799 .tx_desc_thresh_reached = ol_tx_desc_thresh_reached,
Leo Chang98726762016-10-28 11:07:18 -07005800#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
5801};
5802
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305803#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
Leo Chang98726762016-10-28 11:07:18 -07005804static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
Leo Chang98726762016-10-28 11:07:18 -07005805 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
5806 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
5807 .flow_control_cb = ol_txrx_flow_control_cb,
5808 .get_tx_resource = ol_txrx_get_tx_resource,
5809 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
5810 .vdev_flush = ol_txrx_vdev_flush,
5811 .vdev_pause = ol_txrx_vdev_pause,
5812 .vdev_unpause = ol_txrx_vdev_unpause
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305813}; /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
5814#elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
5815static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
5816 .register_tx_flow_control = ol_txrx_register_hl_flow_control,
5817 .vdev_flush = ol_txrx_vdev_flush,
5818 .vdev_pause = ol_txrx_vdev_pause,
Ajit Pal Singh851a7772018-05-14 16:55:09 +05305819 .vdev_unpause = ol_txrx_vdev_unpause,
Ajit Pal Singhd6c08f22018-04-25 16:55:26 +05305820 .set_vdev_os_queue_status = ol_txrx_set_vdev_os_queue_status,
5821 .set_vdev_tx_desc_limit = ol_txrx_set_vdev_tx_desc_limit
Leo Chang98726762016-10-28 11:07:18 -07005822};
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305823#else /* QCA_HL_NETDEV_FLOW_CONTROL */
5824static struct cdp_lflowctl_ops ol_ops_l_flowctl = { };
5825#endif
Leo Chang98726762016-10-28 11:07:18 -07005826
Leo Chang98726762016-10-28 11:07:18 -07005827#ifdef IPA_OFFLOAD
Yun Parkb4f591d2017-03-29 15:51:01 -07005828static struct cdp_ipa_ops ol_ops_ipa = {
Leo Chang98726762016-10-28 11:07:18 -07005829 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
5830 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
5831 .ipa_set_active = ol_txrx_ipa_uc_set_active,
5832 .ipa_op_response = ol_txrx_ipa_uc_op_response,
5833 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
5834 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
5835 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07005836 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
Yun Parkb4f591d2017-03-29 15:51:01 -07005837 .ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
5838 .ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
5839 .ipa_setup = ol_txrx_ipa_setup,
5840 .ipa_cleanup = ol_txrx_ipa_cleanup,
5841 .ipa_setup_iface = ol_txrx_ipa_setup_iface,
5842 .ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
5843 .ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
5844 .ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
5845 .ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
5846#ifdef FEATURE_METERING
Yun Park637d6482016-10-05 10:51:33 -07005847 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
5848 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Yun Parkb4f591d2017-03-29 15:51:01 -07005849#endif
Leo Chang98726762016-10-28 11:07:18 -07005850};
Yun Parkb4f591d2017-03-29 15:51:01 -07005851#endif
Leo Chang98726762016-10-28 11:07:18 -07005852
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005853#ifdef RECEIVE_OFFLOAD
5854static struct cdp_rx_offld_ops ol_rx_offld_ops = {
5855 .register_rx_offld_flush_cb = ol_register_offld_flush_cb,
5856 .deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
5857};
5858#endif
5859
Leo Chang98726762016-10-28 11:07:18 -07005860static struct cdp_bus_ops ol_ops_bus = {
5861 .bus_suspend = ol_txrx_bus_suspend,
5862 .bus_resume = ol_txrx_bus_resume
5863};
5864
Nirav Shah575282c2018-07-08 22:48:00 +05305865#ifdef WLAN_FEATURE_DSRC
Leo Chang98726762016-10-28 11:07:18 -07005866static struct cdp_ocb_ops ol_ops_ocb = {
5867 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
5868 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
5869};
Nirav Shah575282c2018-07-08 22:48:00 +05305870#endif
Leo Chang98726762016-10-28 11:07:18 -07005871
5872static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005873#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07005874 .throttle_init_period = ol_tx_throttle_init_period,
5875 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005876#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07005877};
5878
5879static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07005880 .clear_stats = ol_txrx_clear_stats,
5881 .stats = ol_txrx_stats
5882};
5883
5884static struct cdp_cfg_ops ol_ops_cfg = {
5885 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
5886 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
5887 .cfg_attach = ol_pdev_cfg_attach,
5888 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
5889 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
5890 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
5891 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
5892 .set_flow_control_parameters =
5893 ol_txrx_wrapper_set_flow_control_parameters,
5894 .set_flow_steering = ol_set_cfg_flow_steering,
Yu Wang66a250b2017-07-19 11:46:40 +08005895 .set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
jitiphilebf3a922018-11-05 14:25:00 +05305896 .set_new_htt_msg_format =
5897 ol_txrx_set_new_htt_msg_format,
Alok Kumare1977442018-11-28 17:16:03 +05305898 .set_peer_unmap_conf_support = ol_txrx_set_peer_unmap_conf_support,
5899 .get_peer_unmap_conf_support = ol_txrx_get_peer_unmap_conf_support,
Jiani Liu6d3b6a12019-05-08 15:15:06 +08005900 .set_tx_compl_tsf64 = ol_txrx_set_tx_compl_tsf64,
5901 .get_tx_compl_tsf64 = ol_txrx_get_tx_compl_tsf64,
Leo Chang98726762016-10-28 11:07:18 -07005902};
5903
5904static struct cdp_peer_ops ol_ops_peer = {
5905 .register_peer = ol_txrx_wrapper_register_peer,
5906 .clear_peer = ol_txrx_clear_peer,
Mohit Khannab7bec722017-11-10 11:43:44 -08005907 .peer_get_ref_by_addr = ol_txrx_wrapper_peer_get_ref_by_addr,
5908 .peer_release_ref = ol_txrx_wrapper_peer_release_ref,
Leo Chang98726762016-10-28 11:07:18 -07005909 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
5910 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
5911 .local_peer_id = ol_txrx_local_peer_id,
5912 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
5913 .peer_state_update = ol_txrx_wrapper_peer_state_update,
5914 .get_vdevid = ol_txrx_get_vdevid,
Vevek Venkatesanb8e96622019-10-14 18:40:32 +05305915 .get_vdev_by_peer_addr = ol_txrx_wrapper_get_vdev_by_peer_addr,
Leo Chang98726762016-10-28 11:07:18 -07005916 .register_ocb_peer = ol_txrx_register_ocb_peer,
5917 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
5918 .get_peer_state = ol_txrx_get_peer_state,
5919 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
5920 .update_ibss_add_peer_num_of_vdev =
5921 ol_txrx_update_ibss_add_peer_num_of_vdev,
5922 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
5923 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08005924#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07005925 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
5926 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005927 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
5928 .update_last_real_peer = ol_txrx_update_last_real_peer,
5929#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07005930 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
5931};
5932
5933static struct cdp_tx_delay_ops ol_ops_delay = {
5934#ifdef QCA_COMPUTE_TX_DELAY
5935 .tx_delay = ol_tx_delay,
5936 .tx_delay_hist = ol_tx_delay_hist,
5937 .tx_packet_count = ol_tx_packet_count,
5938 .tx_set_compute_interval = ol_tx_set_compute_interval
5939#endif /* QCA_COMPUTE_TX_DELAY */
5940};
5941
5942static struct cdp_pmf_ops ol_ops_pmf = {
5943 .get_pn_info = ol_txrx_get_pn_info
5944};
5945
Leo Chang98726762016-10-28 11:07:18 -07005946static struct cdp_ctrl_ops ol_ops_ctrl = {
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305947 .txrx_get_pldev = ol_get_pldev,
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005948 .txrx_wdi_event_sub = wdi_event_sub,
5949 .txrx_wdi_event_unsub = wdi_event_unsub,
Leo Chang98726762016-10-28 11:07:18 -07005950};
5951
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305952/* WINplatform specific structures */
Leo Chang98726762016-10-28 11:07:18 -07005953static struct cdp_me_ops ol_ops_me = {
5954 /* EMPTY FOR MCL */
5955};
5956
5957static struct cdp_mon_ops ol_ops_mon = {
Jinwei Chen8cb25b32019-06-14 11:04:47 +08005958 .txrx_monitor_record_channel = ol_htt_mon_note_chan,
Leo Chang98726762016-10-28 11:07:18 -07005959};
5960
5961static struct cdp_host_stats_ops ol_ops_host_stats = {
5962 /* EMPTY FOR MCL */
5963};
5964
5965static struct cdp_wds_ops ol_ops_wds = {
5966 /* EMPTY FOR MCL */
5967};
5968
5969static struct cdp_raw_ops ol_ops_raw = {
5970 /* EMPTY FOR MCL */
5971};
5972
5973static struct cdp_ops ol_txrx_ops = {
5974 .cmn_drv_ops = &ol_ops_cmn,
5975 .ctrl_ops = &ol_ops_ctrl,
5976 .me_ops = &ol_ops_me,
5977 .mon_ops = &ol_ops_mon,
5978 .host_stats_ops = &ol_ops_host_stats,
5979 .wds_ops = &ol_ops_wds,
5980 .raw_ops = &ol_ops_raw,
5981 .misc_ops = &ol_ops_misc,
5982 .cfg_ops = &ol_ops_cfg,
5983 .flowctl_ops = &ol_ops_flowctl,
5984 .l_flowctl_ops = &ol_ops_l_flowctl,
Yun Parkb4f591d2017-03-29 15:51:01 -07005985#ifdef IPA_OFFLOAD
Leo Chang98726762016-10-28 11:07:18 -07005986 .ipa_ops = &ol_ops_ipa,
Yun Parkb4f591d2017-03-29 15:51:01 -07005987#endif
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005988#ifdef RECEIVE_OFFLOAD
5989 .rx_offld_ops = &ol_rx_offld_ops,
5990#endif
Leo Chang98726762016-10-28 11:07:18 -07005991 .bus_ops = &ol_ops_bus,
Nirav Shah575282c2018-07-08 22:48:00 +05305992#ifdef WLAN_FEATURE_DSRC
Leo Chang98726762016-10-28 11:07:18 -07005993 .ocb_ops = &ol_ops_ocb,
Nirav Shah575282c2018-07-08 22:48:00 +05305994#endif
Leo Chang98726762016-10-28 11:07:18 -07005995 .peer_ops = &ol_ops_peer,
5996 .throttle_ops = &ol_ops_throttle,
5997 .mob_stats_ops = &ol_ops_mob_stats,
5998 .delay_ops = &ol_ops_delay,
5999 .pmf_ops = &ol_ops_pmf
6000};
6001
Rakesh Pillaica99b832019-06-24 15:05:13 +05306002ol_txrx_soc_handle ol_txrx_soc_attach(void *scn_handle,
6003 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07006004{
Rakesh Pillaica99b832019-06-24 15:05:13 +05306005 struct ol_txrx_soc_t *soc;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07006006
Rakesh Pillaica99b832019-06-24 15:05:13 +05306007 soc = qdf_mem_malloc(sizeof(*soc));
Nirav Shah7c8c1712018-09-10 16:01:31 +05306008 if (!soc)
Leo Chang98726762016-10-28 11:07:18 -07006009 return NULL;
Leo Chang98726762016-10-28 11:07:18 -07006010
Rakesh Pillaica99b832019-06-24 15:05:13 +05306011 soc->psoc = scn_handle;
6012 soc->cdp_soc.ops = &ol_txrx_ops;
6013 soc->cdp_soc.ol_ops = dp_ol_if_ops;
6014
6015 return ol_txrx_soc_t_to_cdp_soc_t(soc);
Leo Chang98726762016-10-28 11:07:18 -07006016}
jitiphilebf3a922018-11-05 14:25:00 +05306017
6018bool ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t *pdev)
6019{
6020 if (!pdev) {
6021 qdf_print("%s: pdev is NULL\n", __func__);
6022 return false;
6023 }
6024 return pdev->new_htt_msg_format;
6025}
6026
6027void ol_txrx_set_new_htt_msg_format(uint8_t val)
6028{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306029 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6030 ol_txrx_pdev_handle pdev;
jitiphilebf3a922018-11-05 14:25:00 +05306031
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306032 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
jitiphilebf3a922018-11-05 14:25:00 +05306033 if (!pdev) {
6034 qdf_print("%s: pdev is NULL\n", __func__);
6035 return;
6036 }
6037 pdev->new_htt_msg_format = val;
6038}
6039
Alok Kumare1977442018-11-28 17:16:03 +05306040bool ol_txrx_get_peer_unmap_conf_support(void)
6041{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306042 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6043 ol_txrx_pdev_handle pdev;
Alok Kumare1977442018-11-28 17:16:03 +05306044
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306045 if (qdf_unlikely(!soc)) {
6046 ol_txrx_err("soc is NULL");
6047 return false;
6048 }
6049
6050 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Alok Kumare1977442018-11-28 17:16:03 +05306051 if (!pdev) {
6052 qdf_print("%s: pdev is NULL\n", __func__);
6053 return false;
6054 }
6055 return pdev->enable_peer_unmap_conf_support;
6056}
6057
6058void ol_txrx_set_peer_unmap_conf_support(bool val)
6059{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306060 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6061 ol_txrx_pdev_handle pdev;
Alok Kumare1977442018-11-28 17:16:03 +05306062
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306063 if (qdf_unlikely(!soc)) {
6064 ol_txrx_err("soc is NULL");
6065 return;
6066 }
6067
6068 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Alok Kumare1977442018-11-28 17:16:03 +05306069 if (!pdev) {
6070 qdf_print("%s: pdev is NULL\n", __func__);
6071 return;
6072 }
6073 pdev->enable_peer_unmap_conf_support = val;
6074}
Jiani Liu6d3b6a12019-05-08 15:15:06 +08006075
6076#ifdef WLAN_FEATURE_TSF_PLUS
6077bool ol_txrx_get_tx_compl_tsf64(void)
6078{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306079 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6080 ol_txrx_pdev_handle pdev;
Jiani Liu6d3b6a12019-05-08 15:15:06 +08006081
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306082 if (qdf_unlikely(!soc)) {
6083 ol_txrx_err("soc is NULL");
6084 return false;
6085 }
6086
6087 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Jiani Liu6d3b6a12019-05-08 15:15:06 +08006088 if (!pdev) {
6089 qdf_print("%s: pdev is NULL\n", __func__);
6090 return false;
6091 }
6092 return pdev->enable_tx_compl_tsf64;
6093}
6094
6095void ol_txrx_set_tx_compl_tsf64(bool val)
6096{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306097 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6098 ol_txrx_pdev_handle pdev;
Jiani Liu6d3b6a12019-05-08 15:15:06 +08006099
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306100 if (qdf_unlikely(!soc)) {
6101 ol_txrx_err("soc is NULL");
Jiani Liu6d3b6a12019-05-08 15:15:06 +08006102 return;
6103 }
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05306104
6105 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6106 if (!pdev) {
6107 ol_txrx_err("pdev is NULL");
6108 return;
6109 }
6110
Jiani Liu6d3b6a12019-05-08 15:15:06 +08006111 pdev->enable_tx_compl_tsf64 = val;
6112}
6113#else
6114bool ol_txrx_get_tx_compl_tsf64(void)
6115{
6116 return false;
6117}
6118
6119void ol_txrx_set_tx_compl_tsf64(bool val)
6120{
6121}
6122#endif