blob: c756eb51cee46dc531748efba362526f7d4c7b6e [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
wadesong9f2b1102017-12-20 22:58:35 +08002 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== includes ===*/
20/* header files for OS primitives */
21#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053023#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053024#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053025#include <qdf_atomic.h> /* qdf_atomic_read */
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053026#include <qdf_debugfs.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080027
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028/* header files for utilities */
29#include <cds_queue.h> /* TAILQ */
30
31/* header files for configuration API */
32#include <ol_cfg.h> /* ol_cfg_is_high_latency */
33#include <ol_if_athvar.h>
34
35/* header files for HTT API */
36#include <ol_htt_api.h>
37#include <ol_htt_tx_api.h>
38
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039/* header files for our own APIs */
40#include <ol_txrx_api.h>
41#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070042#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070043#include <ol_txrx_ctrl_api.h>
44#include <cdp_txrx_stats.h>
45#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046/* header files for our internal definitions */
47#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
48#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049#include <ol_tx.h> /* ol_tx_ll */
50#include <ol_rx.h> /* ol_rx_deliver */
51#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
52#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
53#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
54#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
55#include <ol_rx_reorder.h>
56#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
57#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
58#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053059#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080060#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070061#include <ol_txrx_types.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080062#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070063#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080064#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070065#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080066#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053067#include "hif.h"
wadesong9e95bd92017-04-14 14:28:40 +080068#include "hif_main.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070069#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053070#ifndef REMOVE_PKT_LOG
71#include "pktlog_ac.h"
72#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070073#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053074#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053075#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080076#include <cdp_txrx_handle.h>
Poddar, Siddarthdb568162017-07-27 18:16:38 +053077#include "wlan_qct_sys.h"
78
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080079#include <htt_internal.h>
Yun Parkb4f591d2017-03-29 15:51:01 -070080#include <ol_txrx_ipa.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070081#include "wlan_roam_debug.h"
jitiphil377bcc12018-10-05 19:46:08 +053082#include "cfg_ucfg_api.h"
83
Yun Parkb4f591d2017-03-29 15:51:01 -070084
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053085#define DPT_DEBUGFS_PERMS (QDF_FILE_USR_READ | \
86 QDF_FILE_USR_WRITE | \
87 QDF_FILE_GRP_READ | \
88 QDF_FILE_OTH_READ)
89
jitiphilecbee582018-06-06 14:29:40 +053090#define DPT_DEBUGFS_NUMBER_BASE 10
91/**
92 * enum dpt_set_param_debugfs - dpt set params
93 * @DPT_SET_PARAM_PROTO_BITMAP : set proto bitmap
94 * @DPT_SET_PARAM_NR_RECORDS: set num of records
95 * @DPT_SET_PARAM_VERBOSITY: set verbosity
96 */
97enum dpt_set_param_debugfs {
98 DPT_SET_PARAM_PROTO_BITMAP = 1,
99 DPT_SET_PARAM_NR_RECORDS = 2,
100 DPT_SET_PARAM_VERBOSITY = 3,
101 DPT_SET_PARAM_MAX,
102};
103
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800104QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -0700105 uint8_t *peer_mac,
106 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800107static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
108 bool val);
109int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700110extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800111ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700112 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700113
Leo Chang98726762016-10-28 11:07:18 -0700114extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
115 uint64_t **last_pn, uint32_t **rmf_pn_replays);
116
Mohit Khanna78cb6bb2017-03-31 17:05:14 -0700117/* thresh for peer's cached buf queue beyond which the elements are dropped */
118#define OL_TXRX_CACHED_BUFQ_THRESH 128
119
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530120/**
121 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
122 * fw is compatible for marking first packet after wow wakeup
123 * @value: 1 for enabled/ 0 for disabled
124 *
125 * Return: None
126 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800127static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530128{
129 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
130
131 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +0530132 ol_txrx_err("pdev is NULL");
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530133 return;
134 }
135
136 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
137}
138
Nirav Shah22bf44d2015-12-10 15:39:48 +0530139/**
140 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
141 * wmi is enabled or not.
142 * @value: 1 for enabled/ 0 for disable
143 *
144 * Return: None
145 */
146void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
147{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530148 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700149
Nirav Shah22bf44d2015-12-10 15:39:48 +0530150 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +0530151 qdf_print("pdev is NULL");
Nirav Shah22bf44d2015-12-10 15:39:48 +0530152 return;
153 }
154 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530155}
156
157/**
158 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
159 *
160 * Return: is_mgmt_over_wmi_enabled
161 */
162uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
163{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530164 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700165
Nirav Shah22bf44d2015-12-10 15:39:48 +0530166 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +0530167 qdf_print("pdev is NULL");
Nirav Shah22bf44d2015-12-10 15:39:48 +0530168 return 0;
169 }
170 return pdev->is_mgmt_over_wmi_enabled;
171}
172
173
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800174#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800175static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800176ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
177 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800178{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800179 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
180 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800181 struct ol_txrx_peer_t *peer;
182
183 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
184 if (!peer)
185 return NULL;
186 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800187 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800188 return peer;
189}
190
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800191static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800192{
Leo Chang98726762016-10-28 11:07:18 -0700193 struct ol_txrx_peer_t *peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700194
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800195 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530196 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530197 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530198 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800199 }
200
201 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530202 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203}
204
Yun Park0dad1002017-07-14 14:57:01 -0700205static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(struct cdp_pdev *ppdev,
206 uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800207{
Yun Park0dad1002017-07-14 14:57:01 -0700208 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800209 struct ol_txrx_peer_t *peer = NULL;
Yun Park5dd9a122018-01-12 15:00:12 -0800210 ol_txrx_vdev_handle vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800211
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800212 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530213 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530214 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800215 return NULL;
216 }
217
Yun Park5dd9a122018-01-12 15:00:12 -0800218 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev, sta_id,
219 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800220 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530221 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530222 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800223 return NULL;
224 }
225
Yun Park5dd9a122018-01-12 15:00:12 -0800226 vdev = peer->vdev;
227 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
228
229 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230}
231
Mohit Khannababadb82017-02-21 18:54:19 -0800232/**
233 * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
234 * @ppdev: pointer of type cdp_pdev
235 * @peer_addr: peer mac addr
236 * @peer_id: pointer to fill in the value of peer->local_id for caller
237 *
238 * This function finds a peer with given mac address and returns its peer_id.
239 * Note that this function does not increment the peer->ref_cnt.
240 * This means that the peer may be deleted in some other parallel context after
241 * its been found.
242 *
243 * Return: peer handle if peer is found, NULL if peer is not found.
244 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800245void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Yun Park0dad1002017-07-14 14:57:01 -0700246 uint8_t *peer_addr,
247 uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800248{
249 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800250 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251
Mohit Khannab7bec722017-11-10 11:43:44 -0800252 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
253 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800254 if (!peer)
255 return NULL;
256 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800257 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khannababadb82017-02-21 18:54:19 -0800258 return peer;
259}
260
261/**
Mohit Khannab7bec722017-11-10 11:43:44 -0800262 * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
Mohit Khannababadb82017-02-21 18:54:19 -0800263 * @pdev: pointer of type ol_txrx_pdev_handle
264 * @peer_addr: peer mac addr
265 * @peer_id: pointer to fill in the value of peer->local_id for caller
266 *
267 * This function finds the peer with given mac address and returns its peer_id.
268 * Note that this function increments the peer->ref_cnt.
269 * This makes sure that peer will be valid. This also means the caller needs to
Mohit Khannab7bec722017-11-10 11:43:44 -0800270 * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
Mohit Khannababadb82017-02-21 18:54:19 -0800271 * reference.
272 * Sample usage:
273 * {
274 * //the API call below increments the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800275 * peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800276 *
277 * // Once peer usage is done
278 *
279 * //the API call below decrements the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800280 * ol_txrx_peer_release_ref(peer, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800281 * }
282 *
283 * Return: peer handle if the peer is found, NULL if peer is not found.
284 */
Mohit Khannab7bec722017-11-10 11:43:44 -0800285ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
286 u8 *peer_addr,
287 u8 *peer_id,
288 enum peer_debug_id_type dbg_id)
Mohit Khannababadb82017-02-21 18:54:19 -0800289{
290 struct ol_txrx_peer_t *peer;
291
Mohit Khannab7bec722017-11-10 11:43:44 -0800292 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
293 dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800294 if (!peer)
295 return NULL;
296 *peer_id = peer->local_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800297 return peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800298}
299
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800300static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301{
Leo Chang98726762016-10-28 11:07:18 -0700302 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700303
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304 return peer->local_id;
305}
306
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700307/**
308 * @brief Find a txrx peer handle from a peer's local ID
309 * @details
310 * The control SW typically uses the txrx peer handle to refer to the peer.
311 * In unusual circumstances, if it is infeasible for the control SW maintain
312 * the txrx peer handle but it can maintain a small integer local peer ID,
313 * this function allows the peer handled to be retrieved, based on the local
314 * peer ID.
315 *
316 * @param pdev - the data physical device object
317 * @param local_peer_id - the ID txrx assigned locally to the peer in question
318 * @return handle to the txrx peer object
319 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800320ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800321ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322 uint8_t local_peer_id)
323{
324 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800325 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700326
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800327 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
328 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
329 return NULL;
330 }
331
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530332 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800333 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530334 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800335 return peer;
336}
337
Jingxiang Ge3badb982018-01-02 17:39:01 +0800338/**
339 * @brief Find a txrx peer handle from a peer's local ID
340 * @param pdev - the data physical device object
341 * @param local_peer_id - the ID txrx assigned locally to the peer in question
342 * @dbg_id - debug_id to track caller
343 * @return handle to the txrx peer object
344 * @details
345 * The control SW typically uses the txrx peer handle to refer to the peer.
346 * In unusual circumstances, if it is infeasible for the control SW maintain
347 * the txrx peer handle but it can maintain a small integer local peer ID,
348 * this function allows the peer handled to be retrieved, based on the local
349 * peer ID.
350 *
351 * Note that this function increments the peer->ref_cnt.
352 * This makes sure that peer will be valid. This also means the caller needs to
353 * call the corresponding API -
354 * ol_txrx_peer_release_ref
355 *
356 * reference.
357 * Sample usage:
358 * {
359 * //the API call below increments the peer->ref_cnt
360 * peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
361 *
362 * // Once peer usage is done
363 *
364 * //the API call below decrements the peer->ref_cnt
365 * ol_txrx_peer_release_ref(peer, dbg_id);
366 * }
367 *
368 * Return: peer handle if the peer is found, NULL if peer is not found.
369 */
370ol_txrx_peer_handle
371ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
372 uint8_t local_peer_id,
373 enum peer_debug_id_type dbg_id)
374{
375 struct ol_txrx_peer_t *peer = NULL;
376 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
377
378 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
379 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
380 return NULL;
381 }
382
383 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
384 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
385 peer = pdev->local_peer_ids.map[local_peer_id];
386 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
387 if (peer && peer->valid)
388 ol_txrx_peer_get_ref(peer, dbg_id);
Jingxiang Ge9f297062018-01-24 13:31:31 +0800389 else
390 peer = NULL;
Jingxiang Ge3badb982018-01-02 17:39:01 +0800391 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
392
393 return peer;
394}
395
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800396static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
397{
398 int i;
399
400 /* point the freelist to the first ID */
401 pdev->local_peer_ids.freelist = 0;
402
403 /* link each ID to the next one */
404 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
405 pdev->local_peer_ids.pool[i] = i + 1;
406 pdev->local_peer_ids.map[i] = NULL;
407 }
408
409 /* link the last ID to itself, to mark the end of the list */
410 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
411 pdev->local_peer_ids.pool[i] = i;
412
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530413 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800414}
415
416static void
417ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
418 struct ol_txrx_peer_t *peer)
419{
420 int i;
421
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530422 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800423 i = pdev->local_peer_ids.freelist;
424 if (pdev->local_peer_ids.pool[i] == i) {
425 /* the list is empty, except for the list-end marker */
426 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
427 } else {
428 /* take the head ID and advance the freelist */
429 peer->local_id = i;
430 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
431 pdev->local_peer_ids.map[i] = peer;
432 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530433 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800434}
435
436static void
437ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
438 struct ol_txrx_peer_t *peer)
439{
440 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700441
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800442 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
443 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
444 return;
445 }
446 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530447 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
449 pdev->local_peer_ids.freelist = i;
450 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530451 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800452}
453
454static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
455{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530456 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800457}
458
459#else
460#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
461#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
462#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
463#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
464#endif
465
Nirav Shahd21a2e32018-04-20 16:34:43 +0530466#if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800467/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530468 * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
469 * @file: file to read
470 * @arg: pdev object
471 *
472 * Return: QDF_STATUS
473 */
474static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
475 void *arg)
476{
477 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
478 uint32_t i = 0;
479 QDF_STATUS status = QDF_STATUS_SUCCESS;
480
481 if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
482 return QDF_STATUS_E_INVAL;
483 else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
484 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
485 return QDF_STATUS_SUCCESS;
486 }
487
488 i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
489 status = qdf_dpt_dump_stats_debugfs(file, i);
490 if (status == QDF_STATUS_E_FAILURE)
491 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
492 else if (status == QDF_STATUS_SUCCESS)
493 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
494
495 return status;
496}
497
498/**
jitiphilecbee582018-06-06 14:29:40 +0530499 * ol_txrx_conv_str_to_int_debugfs() - convert string to int
500 * @buf: buffer containing string
501 * @len: buffer len
502 * @proto_bitmap: defines the protocol to be tracked
503 * @nr_records: defines the nth packet which is traced
504 * @verbosity: defines the verbosity level
505 *
506 * This function expects char buffer to be null terminated.
507 * Otherwise results could be unexpected values.
508 *
509 * Return: 0 on success
510 */
511static int ol_txrx_conv_str_to_int_debugfs(char *buf, qdf_size_t len,
512 int *proto_bitmap,
513 int *nr_records,
514 int *verbosity)
515{
516 int num_value = DPT_SET_PARAM_PROTO_BITMAP;
517 int ret, param_value = 0;
518 char *buf_param = buf;
519 int i;
520
521 for (i = 1; i < DPT_SET_PARAM_MAX; i++) {
522 /* Loop till you reach space as kstrtoint operates till
523 * null character. Replace space with null character
524 * to read each value.
525 * terminate the loop either at null terminated char or
526 * len is 0.
527 */
528 while (*buf && len) {
529 if (*buf == ' ') {
530 *buf = '\0';
531 buf++;
532 len--;
533 break;
534 }
535 buf++;
536 len--;
537 }
538 /* get the parameter */
539 ret = qdf_kstrtoint(buf_param,
540 DPT_DEBUGFS_NUMBER_BASE,
541 &param_value);
542 if (ret) {
543 QDF_TRACE(QDF_MODULE_ID_TXRX,
544 QDF_TRACE_LEVEL_ERROR,
545 "%s: Error while parsing buffer. ret %d",
546 __func__, ret);
547 return ret;
548 }
549 switch (num_value) {
550 case DPT_SET_PARAM_PROTO_BITMAP:
551 *proto_bitmap = param_value;
552 break;
553 case DPT_SET_PARAM_NR_RECORDS:
554 *nr_records = param_value;
555 break;
556 case DPT_SET_PARAM_VERBOSITY:
557 *verbosity = param_value;
558 break;
559 default:
560 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
561 "%s %d: :Set command needs exactly 3 arguments in format <proto_bitmap> <number of record> <Verbosity>.",
562 __func__, __LINE__);
563 break;
564 }
565 num_value++;
566 /*buf_param should now point to the next param value. */
567 buf_param = buf;
568 }
569
570 /* buf is not yet NULL implies more than 3 params are passed. */
571 if (*buf) {
572 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
573 "%s %d: :Set command needs exactly 3 arguments in format <proto_bitmap> <number of record> <Verbosity>.",
574 __func__, __LINE__);
575 return -EINVAL;
576 }
577 return 0;
578}
579
580/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530581 * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
582 * @priv: pdev object
583 * @buf: buff to get value for dpt parameters
584 * @len: buf length
585 *
586 * Return: QDF_STATUS
587 */
588static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
589 const char *buf,
590 qdf_size_t len)
591{
jitiphilecbee582018-06-06 14:29:40 +0530592 int ret;
593 int proto_bitmap = 0;
594 int nr_records = 0;
595 int verbosity = 0;
596 char *buf1 = NULL;
597
598 if (!buf || !len) {
599 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
600 "%s: null buffer or len. len %u",
601 __func__, (uint8_t)len);
602 return QDF_STATUS_E_FAULT;
603 }
604
605 buf1 = (char *)qdf_mem_malloc(len);
Nirav Shah7c8c1712018-09-10 16:01:31 +0530606 if (!buf1)
jitiphilecbee582018-06-06 14:29:40 +0530607 return QDF_STATUS_E_FAULT;
Nirav Shah7c8c1712018-09-10 16:01:31 +0530608
jitiphilecbee582018-06-06 14:29:40 +0530609 qdf_mem_copy(buf1, buf, len);
610 ret = ol_txrx_conv_str_to_int_debugfs(buf1, len, &proto_bitmap,
611 &nr_records, &verbosity);
612 if (ret) {
613 qdf_mem_free(buf1);
614 return QDF_STATUS_E_INVAL;
615 }
616
617 qdf_dpt_set_value_debugfs(proto_bitmap, nr_records, verbosity);
618 qdf_mem_free(buf1);
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530619 return QDF_STATUS_SUCCESS;
620}
621
622static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
623{
624 pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
625 pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
626 pdev->dpt_debugfs_fops.priv = pdev;
627
628 pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
629
630 if (!pdev->dpt_stats_log_dir) {
631 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
632 "%s: error while creating debugfs dir for %s",
633 __func__, "dpt_stats");
634 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
635 return -EBUSY;
636 }
637
638 if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
639 pdev->dpt_stats_log_dir,
640 &pdev->dpt_debugfs_fops)) {
641 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
642 "%s: debug Entry creation failed!",
643 __func__);
644 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
645 return -EBUSY;
646 }
647
648 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
649 return 0;
650}
651
Nirav Shahd21a2e32018-04-20 16:34:43 +0530652static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
653{
654 qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
655}
656#else
657static inline int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
658{
659 return 0;
660}
661
662static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
663{
664}
665#endif
666
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530667/**
Dhanashri Atre12a08392016-02-17 13:10:34 -0800668 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800669 * @ctrl_pdev: cfg pdev
670 * @htc_pdev: HTC pdev
671 * @osdev: os dev
672 *
673 * Return: txrx pdev handle
674 * NULL for failure
675 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800676static struct cdp_pdev *
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530677ol_txrx_pdev_attach(ol_txrx_soc_handle soc,
678 struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700679 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800680{
681 struct ol_txrx_pdev_t *pdev;
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530682 struct cdp_cfg *cfg_pdev = (struct cdp_cfg *)ctrl_pdev;
hqufd227fe2017-06-26 17:01:14 +0800683 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800684
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530685 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800686 if (!pdev)
687 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800688
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530689 /* init LL/HL cfg here */
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530690 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(cfg_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530691 /*
692 * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
693 * enabled or not.
694 */
695 pdev->cfg.credit_update_enabled =
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530696 ol_cfg_is_credit_update_enabled(cfg_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530697
698 /* Explicitly request TX Completions from FW */
699 pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() ||
700 cds_is_packet_log_enabled();
701
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530702 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(cfg_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800703
704 /* store provided params */
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530705 pdev->ctrl_pdev = cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800706 pdev->osdev = osdev;
707
708 for (i = 0; i < htt_num_sec_types; i++)
709 pdev->sec_types[i] = (enum ol_sec_type)i;
710
711 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530712 ol_txrx_tso_stats_init(pdev);
jitiphil335d2412018-06-07 22:49:24 +0530713 ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800714
715 TAILQ_INIT(&pdev->vdev_list);
716
tfyu9fcabd72017-09-26 17:46:48 +0800717 TAILQ_INIT(&pdev->req_list);
718 pdev->req_list_depth = 0;
719 qdf_spinlock_create(&pdev->req_list_spinlock);
Ajit Pal Singh8184e932018-07-25 13:54:13 +0530720 qdf_spinlock_create(&pdev->tx_mutex);
tfyu9fcabd72017-09-26 17:46:48 +0800721
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800722 /* do initial set up of the peer ID -> peer object lookup map */
723 if (ol_txrx_peer_find_attach(pdev))
724 goto fail1;
725
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530726 /* initialize the counter of the target's tx buffer availability */
727 qdf_atomic_init(&pdev->target_tx_credit);
728 qdf_atomic_init(&pdev->orig_target_tx_credit);
729
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530730 if (ol_cfg_is_high_latency(cfg_pdev)) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530731 qdf_spinlock_create(&pdev->tx_queue_spinlock);
732 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
733 if (pdev->tx_sched.scheduler == NULL)
734 goto fail2;
735 }
736 ol_txrx_pdev_txq_log_init(pdev);
737 ol_txrx_pdev_grp_stats_init(pdev);
738
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800739 pdev->htt_pdev =
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530740 htt_pdev_alloc(pdev, cfg_pdev, htc_pdev, osdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800741 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530742 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800743
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530744 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
745 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +0800746
747 /*
748 * Init the tid --> category table.
749 * Regular tids (0-15) map to their AC.
750 * Extension tids get their own categories.
751 */
752 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
753 int ac = TXRX_TID_TO_WMM_AC(tid);
754
755 pdev->tid_to_ac[tid] = ac;
756 }
757 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
758 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
759 pdev->tid_to_ac[OL_TX_MGMT_TID] =
760 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
761 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
762 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
763 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
764 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
765
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530766 ol_txrx_debugfs_init(pdev);
767
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800768 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800769
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530770fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800771 ol_txrx_peer_find_detach(pdev);
772
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530773fail2:
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530774 if (ol_cfg_is_high_latency(cfg_pdev))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530775 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
776
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800777fail1:
Ajit Pal Singh8184e932018-07-25 13:54:13 +0530778 qdf_spinlock_destroy(&pdev->tx_mutex);
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530779 ol_txrx_tso_stats_deinit(pdev);
jitiphil335d2412018-06-07 22:49:24 +0530780 ol_txrx_fw_stats_desc_pool_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530781 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800782
783fail0:
784 return NULL;
785}
786
Komal Seelamc4b28632016-02-03 15:02:18 +0530787#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
788/**
789 * htt_pkt_log_init() - API to initialize packet log
790 * @handle: pdev handle
791 * @scn: HIF context
792 *
793 * Return: void
794 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800795void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +0530796{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800797 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700798
Komal Seelamc4b28632016-02-03 15:02:18 +0530799 if (handle->pkt_log_init)
800 return;
801
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530802 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -0700803 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -0700804 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -0800805 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Komal Seelamc4b28632016-02-03 15:02:18 +0530806 if (pktlogmod_init(scn))
Nirav Shah7c8c1712018-09-10 16:01:31 +0530807 qdf_print(" pktlogmod_init failed");
Komal Seelamc4b28632016-02-03 15:02:18 +0530808 else
809 handle->pkt_log_init = true;
810 }
811}
812
813/**
814 * htt_pktlogmod_exit() - API to cleanup pktlog info
815 * @handle: Pdev handle
816 * @scn: HIF Context
817 *
818 * Return: void
819 */
Houston Hoffman8c485042017-02-08 13:40:21 -0800820static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +0530821{
Houston Hoffman8c485042017-02-08 13:40:21 -0800822 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -0700823 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +0530824 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -0800825 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +0530826 handle->pkt_log_init = false;
827 }
828}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800829
Komal Seelamc4b28632016-02-03 15:02:18 +0530830#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800831void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -0800832static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +0530833#endif
834
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800835/**
Dhanashri Atre12a08392016-02-17 13:10:34 -0800836 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800837 * @pdev: txrx pdev
838 *
839 * Return: 0 for success
840 */
841int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800842ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800843{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800844 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -0700845 uint16_t i;
846 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800847 int ret = 0;
848 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530849 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800850
Leo Chang376398b2015-10-23 14:19:02 -0700851 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
852 union ol_tx_desc_list_elem_t *c_element;
853 unsigned int sig_bit;
854 uint16_t desc_per_page;
855
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800856 if (!osc) {
857 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -0700858 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800859 }
860
861 /*
862 * For LL, limit the number of host's tx descriptors to match
863 * the number of target FW tx descriptors.
864 * This simplifies the FW, by ensuring the host will never
865 * download more tx descriptors than the target has space for.
866 * The FW will drop/free low-priority tx descriptors when it
867 * starts to run low, so that in theory the host should never
868 * run out of tx descriptors.
869 */
870
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800871 /*
872 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530873 * HL - wait for a HTT target credit initialization
874 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800875 */
Nirav Shah52d85aa2018-04-26 14:03:00 +0530876 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
877 ol_tx_init_pdev(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800878
Nirav Shah76291962016-04-25 10:50:37 +0530879 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
880
Nirav Shah5ff1fd02018-03-11 14:55:53 +0530881 ol_tx_setup_fastpath_ce_handles(osc, pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800882
Rakshith Suresh Patkar0f6375c2018-12-04 20:59:07 +0530883 if ((ol_txrx_get_new_htt_msg_format(pdev)))
884 ol_set_cfg_new_htt_format(pdev->ctrl_pdev, true);
885 else
886 ol_set_cfg_new_htt_format(pdev->ctrl_pdev, false);
887
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800888 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
889 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +0530890 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800891
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800892 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -0800893 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
894 ret = htt_ipa_uc_attach(pdev->htt_pdev);
895 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -0700896 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -0800897 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800898
Leo Chang376398b2015-10-23 14:19:02 -0700899 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530900 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530901 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -0700902 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
903 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
904 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530905 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -0700906 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -0800907 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -0700908 goto page_alloc_fail;
909 }
910 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
911 pdev->tx_desc.offset_filter = desc_per_page - 1;
912 /* Calculate page divider to find page number */
913 sig_bit = 0;
914 while (desc_per_page) {
915 sig_bit++;
916 desc_per_page = desc_per_page >> 1;
917 }
918 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -0800919 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -0700920 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
921 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
922 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
923 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800924
925 /*
926 * Each SW tx desc (used only within the tx datapath SW) has a
927 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
928 * Go ahead and allocate the HTT tx desc and link it with the SW tx
929 * desc now, to avoid doing it during time-critical transmit.
930 */
931 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -0700932 pdev->tx_desc.freelist =
933 (union ol_tx_desc_list_elem_t *)
934 (*pdev->tx_desc.desc_pages.cacheable_pages);
935 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800936 for (i = 0; i < desc_pool_size; i++) {
937 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -0700938 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530939 qdf_dma_addr_t frag_paddr = 0;
940 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800941
Leo Chang376398b2015-10-23 14:19:02 -0700942 if (i == (desc_pool_size - 1))
943 c_element->next = NULL;
944 else
945 c_element->next = (union ol_tx_desc_list_elem_t *)
946 ol_tx_desc_find(pdev, i + 1);
947
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800948 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800949 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530950 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800951 "%s: failed to alloc HTT tx desc (%d of %d)",
952 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -0700953 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -0800954 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -0700955 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800956 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800957
Leo Chang376398b2015-10-23 14:19:02 -0700958 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800959 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -0700960 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800961 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -0700962 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530963 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -0700964 "%s: failed to alloc HTT frag dsc (%d/%d)",
965 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800966 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -0700967 fail_idx = i;
968 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800969 }
Leo Chang376398b2015-10-23 14:19:02 -0700970 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -0700971 /*
972 * Initialize the first 6 words (TSO flags)
973 * of the frag descriptor
974 */
Leo Chang376398b2015-10-23 14:19:02 -0700975 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
976 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800977 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -0700978 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800979#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -0700980 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800981#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -0700982 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800983 0xffffffff;
984#endif
985#endif
Leo Chang376398b2015-10-23 14:19:02 -0700986 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530987 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -0700988 c_element = c_element->next;
989 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800990 }
991
992 /* link SW tx descs into a freelist */
993 pdev->tx_desc.num_free = desc_pool_size;
Nirav Shah7c8c1712018-09-10 16:01:31 +0530994 ol_txrx_dbg("first tx_desc:0x%pK Last tx desc:0x%pK",
995 (uint32_t *)pdev->tx_desc.freelist,
996 (uint32_t *)(pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800997
998 /* check what format of frames are expected to be delivered by the OS */
999 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1000 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1001 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1002 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1003 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1004 pdev->htt_pkt_type = htt_pkt_type_eth2;
1005 else
1006 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1007 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301008 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001009 "%s Invalid standard frame type: %d",
1010 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001011 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001012 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001013 }
1014
1015 /* setup the global rx defrag waitlist */
1016 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1017
1018 /* configure where defrag timeout and duplicate detection is handled */
1019 pdev->rx.flags.defrag_timeout_check =
1020 pdev->rx.flags.dup_check =
1021 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1022
1023#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1024 /* Need to revisit this part. Currently,hardcode to riva's caps */
1025 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1026 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1027 /*
1028 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1029 * header for non-first subframe of A-MSDU.
1030 */
1031 pdev->sw_subfrm_hdr_recovery_enable = 1;
1032 /*
1033 * The Riva HW doesn't have the capability to set Protected Frame bit
1034 * in the MAC header for encrypted data frame.
1035 */
1036 pdev->sw_pf_proc_enable = 1;
1037
1038 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001039 /*
1040 * sw llc process is only needed in
1041 * 802.3 to 802.11 transform case
1042 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001043 pdev->sw_tx_llc_proc_enable = 1;
1044 pdev->sw_rx_llc_proc_enable = 1;
1045 } else {
1046 pdev->sw_tx_llc_proc_enable = 0;
1047 pdev->sw_rx_llc_proc_enable = 0;
1048 }
1049
1050 switch (pdev->frame_format) {
1051 case wlan_frm_fmt_raw:
1052 pdev->sw_tx_encap =
1053 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1054 ? 0 : 1;
1055 pdev->sw_rx_decap =
1056 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1057 ? 0 : 1;
1058 break;
1059 case wlan_frm_fmt_native_wifi:
1060 pdev->sw_tx_encap =
1061 pdev->
1062 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1063 ? 0 : 1;
1064 pdev->sw_rx_decap =
1065 pdev->
1066 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1067 ? 0 : 1;
1068 break;
1069 case wlan_frm_fmt_802_3:
1070 pdev->sw_tx_encap =
1071 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1072 ? 0 : 1;
1073 pdev->sw_rx_decap =
1074 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1075 ? 0 : 1;
1076 break;
1077 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301078 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001079 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1080 pdev->frame_format,
1081 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001082 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001083 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001084 }
1085#endif
1086
1087 /*
1088 * Determine what rx processing steps are done within the host.
1089 * Possibilities:
1090 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1091 * (This is unlikely; even if the target is doing rx->tx forwarding,
1092 * the host should be doing rx->tx forwarding too, as a back up for
1093 * the target's rx->tx forwarding, in case the target runs short on
1094 * memory, and can't store rx->tx frames that are waiting for
1095 * missing prior rx frames to arrive.)
1096 * 2. Just rx -> tx forwarding.
1097 * This is the typical configuration for HL, and a likely
1098 * configuration for LL STA or small APs (e.g. retail APs).
1099 * 3. Both PN check and rx -> tx forwarding.
1100 * This is the typical configuration for large LL APs.
1101 * Host-side PN check without rx->tx forwarding is not a valid
1102 * configuration, since the PN check needs to be done prior to
1103 * the rx->tx forwarding.
1104 */
1105 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001106 /*
1107 * PN check, rx-tx forwarding and rx reorder is done by
1108 * the target
1109 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001110 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1111 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1112 else
1113 pdev->rx_opt_proc = ol_rx_fwd_check;
1114 } else {
1115 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1116 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1117 /*
1118 * PN check done on host,
1119 * rx->tx forwarding not done at all.
1120 */
1121 pdev->rx_opt_proc = ol_rx_pn_check_only;
1122 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1123 /*
1124 * Both PN check and rx->tx forwarding done
1125 * on host.
1126 */
1127 pdev->rx_opt_proc = ol_rx_pn_check;
1128 } else {
1129#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1130"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301131 QDF_TRACE(QDF_MODULE_ID_TXRX,
1132 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001133 "%s: %s", __func__, TRACESTR01);
1134#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001135 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001136 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001137 }
1138 } else {
1139 /* PN check done on target */
1140 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1141 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1142 /*
1143 * rx->tx forwarding done on host (possibly as
1144 * back-up for target-side primary rx->tx
1145 * forwarding)
1146 */
1147 pdev->rx_opt_proc = ol_rx_fwd_check;
1148 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001149 /*
1150 * rx->tx forwarding either done in target,
1151 * or not done at all
1152 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001153 pdev->rx_opt_proc = ol_rx_deliver;
1154 }
1155 }
1156 }
1157
1158 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301159 qdf_spinlock_create(&pdev->peer_ref_mutex);
1160 qdf_spinlock_create(&pdev->rx.mutex);
1161 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001162 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001163 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1164
Yun Parkf01f6e22017-01-18 17:27:02 -08001165 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1166 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001167 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001168 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001169
Yun Parkf01f6e22017-01-18 17:27:02 -08001170 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1171 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001172 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001173 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001174
1175#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1176 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1177#endif
1178
1179 /*
1180 * WDI event attach
1181 */
1182 wdi_event_attach(pdev);
1183
1184 /*
1185 * Initialize rx PN check characteristics for different security types.
1186 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301187 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001188
1189 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1190 pdev->rx_pn[htt_sec_type_tkip].len =
1191 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1192 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1193 pdev->rx_pn[htt_sec_type_tkip].cmp =
1194 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1195 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1196
1197 /* WAPI: 128-bit PN */
1198 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1199 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1200
1201 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1202
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001203 ol_txrx_dbg("Created pdev %pK\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001204
1205 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1206
1207#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1208#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1209
1210/* #if 1 -- TODO: clean this up */
1211#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1212 /* avg = 100% * new + 0% * old */ \
1213 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1214/*
Yun Parkeaea8632017-04-09 09:53:45 -07001215 * #else
1216 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1217 * //avg = 25% * new + 25% * old
1218 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1219 * #endif
1220 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001221 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1222 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1223#endif
1224
1225 ol_txrx_local_peer_id_pool_init(pdev);
1226
1227 pdev->cfg.ll_pause_txq_limit =
1228 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1229
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301230 /* TX flow control for peer who is in very bad link status */
1231 ol_tx_badpeer_flow_cl_init(pdev);
1232
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001233#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301234 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301235 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001236
1237 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301238 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001239 {
1240 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001241
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001242 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301243 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001244 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1245 * 1000);
1246 /*
1247 * Compute a factor and shift that together are equal to the
1248 * inverse of the bin_width time, so that rather than dividing
1249 * by the bin width time, approximately the same result can be
1250 * obtained much more efficiently by a multiply + shift.
1251 * multiply_factor >> shift = 1 / bin_width_time, so
1252 * multiply_factor = (1 << shift) / bin_width_time.
1253 *
1254 * Pick the shift semi-arbitrarily.
1255 * If we knew statically what the bin_width would be, we could
1256 * choose a shift that minimizes the error.
1257 * Since the bin_width is determined dynamically, simply use a
1258 * shift that is about half of the uint32_t size. This should
1259 * result in a relatively large multiplier value, which
1260 * minimizes error from rounding the multiplier to an integer.
1261 * The rounding error only becomes significant if the tick units
1262 * are on the order of 1 microsecond. In most systems, it is
1263 * expected that the tick units will be relatively low-res,
1264 * on the order of 1 millisecond. In such systems the rounding
1265 * error is negligible.
1266 * It would be more accurate to dynamically try out different
1267 * shifts and choose the one that results in the smallest
1268 * rounding error, but that extra level of fidelity is
1269 * not needed.
1270 */
1271 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1272 pdev->tx_delay.hist_internal_bin_width_mult =
1273 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1274 1000 + (bin_width_1000ticks >> 1)) /
1275 bin_width_1000ticks;
1276 }
1277#endif /* QCA_COMPUTE_TX_DELAY */
1278
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001279 /* Thermal Mitigation */
1280 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001281
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001282 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001283
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301284 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1285
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001286 ol_tx_register_flow_control(pdev);
1287
1288 return 0; /* success */
1289
Leo Chang376398b2015-10-23 14:19:02 -07001290pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001291 OL_RX_REORDER_TRACE_DETACH(pdev);
1292
Leo Chang376398b2015-10-23 14:19:02 -07001293reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301294 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1295 qdf_spinlock_destroy(&pdev->rx.mutex);
1296 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301297 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001298 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1299
Leo Chang376398b2015-10-23 14:19:02 -07001300control_init_fail:
1301desc_alloc_fail:
1302 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001303 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001304 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001305
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301306 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001307 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001308
Leo Chang376398b2015-10-23 14:19:02 -07001309page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001310 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1311 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001312uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001313 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301314htt_attach_fail:
1315 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001316ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001317 return ret; /* fail */
1318}
1319
Dhanashri Atre12a08392016-02-17 13:10:34 -08001320/**
1321 * ol_txrx_pdev_attach_target() - send target configuration
1322 *
1323 * @pdev - the physical device being initialized
1324 *
1325 * The majority of the data SW setup are done by the pdev_attach
1326 * functions, but this function completes the data SW setup by
1327 * sending datapath configuration messages to the target.
1328 *
1329 * Return: 0 - success 1 - failure
1330 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001331static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001332{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001333 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001334
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05301335 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001336}
1337
Dhanashri Atre12a08392016-02-17 13:10:34 -08001338/**
Mohit Khanna54f3a382017-03-13 17:56:32 -07001339 * ol_tx_free_descs_inuse - free tx descriptors which are in use
1340 * @pdev - the physical device for which tx descs need to be freed
1341 *
1342 * Cycle through the list of TX descriptors (for a pdev) which are in use,
1343 * for which TX completion has not been received and free them. Should be
1344 * called only when the interrupts are off and all lower layer RX is stopped.
1345 * Otherwise there may be a race condition with TX completions.
1346 *
1347 * Return: None
1348 */
1349static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1350{
1351 int i;
1352 void *htt_tx_desc;
1353 struct ol_tx_desc_t *tx_desc;
1354 int num_freed_tx_desc = 0;
1355
1356 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1357 tx_desc = ol_tx_desc_find(pdev, i);
1358 /*
1359 * Confirm that each tx descriptor is "empty", i.e. it has
1360 * no tx frame attached.
1361 * In particular, check that there are no frames that have
1362 * been given to the target to transmit, for which the
1363 * target has never provided a response.
1364 */
1365 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
1366 ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
1367 ol_tx_desc_frame_free_nonstd(pdev,
1368 tx_desc, 1);
1369 num_freed_tx_desc++;
1370 }
1371 htt_tx_desc = tx_desc->htt_tx_desc;
1372 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1373 }
1374
1375 if (num_freed_tx_desc)
1376 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1377 "freed %d tx frames for which no resp from target",
1378 num_freed_tx_desc);
1379
1380}
1381
1382/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301383 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08001384 * @pdev - the data physical device object being removed
1385 * @force - delete the pdev (and its vdevs and peers) even if
1386 * there are outstanding references by the target to the vdevs
1387 * and peers within the pdev
1388 *
1389 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301390 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08001391 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301392 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08001393 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301394static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001395{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001396 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001397
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001398 /* preconditions */
1399 TXRX_ASSERT2(pdev);
1400
1401 /* check that the pdev has no vdevs allocated */
1402 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1403
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001404#ifdef QCA_SUPPORT_TX_THROTTLE
1405 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301406 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1407 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001408#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301409 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1410 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001411#endif
1412#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001413
1414 if (force) {
1415 /*
1416 * The assertion above confirms that all vdevs within this pdev
1417 * were detached. However, they may not have actually been
1418 * deleted.
1419 * If the vdev had peers which never received a PEER_UNMAP msg
1420 * from the target, then there are still zombie peer objects,
1421 * and the vdev parents of the zombie peers are also zombies,
1422 * hanging around until their final peer gets deleted.
1423 * Go through the peer hash table and delete any peers left.
1424 * As a side effect, this will complete the deletion of any
1425 * vdevs that are waiting for their peers to finish deletion.
1426 */
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001427 ol_txrx_dbg("Force delete for pdev %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001428 pdev);
1429 ol_txrx_peer_find_hash_erase(pdev);
1430 }
1431
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301432 /* to get flow pool status before freeing descs */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07001433 ol_tx_dump_flow_pool_info((void *)pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001434 ol_tx_free_descs_inuse(pdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301435 ol_tx_deregister_flow_control(pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001436
1437 /*
1438 * ol_tso_seg_list_deinit should happen after
1439 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
1440 * which is being de-initilized in ol_tso_seg_list_deinit
1441 */
1442 ol_tso_seg_list_deinit(pdev);
1443 ol_tso_num_seg_list_deinit(pdev);
1444
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301445 /* Stop the communication between HTT and target at first */
1446 htt_detach_target(pdev->htt_pdev);
1447
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301448 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001449 &pdev->tx_desc.desc_pages, 0, true);
1450 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001451
1452 /* Detach micro controller data path offload resource */
1453 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1454 htt_ipa_uc_detach(pdev->htt_pdev);
1455
1456 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05301457 ol_tx_desc_dup_detect_deinit(pdev);
1458
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301459 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1460 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1461 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001462 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001463#ifdef QCA_SUPPORT_TX_THROTTLE
1464 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301465 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001466#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301467
1468 /* TX flow control for peer who is in very bad link status */
1469 ol_tx_badpeer_flow_cl_deinit(pdev);
1470
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001471 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1472
1473 OL_RX_REORDER_TRACE_DETACH(pdev);
1474 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301475
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001476 /*
1477 * WDI event detach
1478 */
1479 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301480
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001481 ol_txrx_local_peer_id_cleanup(pdev);
1482
1483#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301484 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001485#endif
1486}
1487
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301488/**
1489 * ol_txrx_pdev_detach() - delete the data SW state
1490 * @ppdev - the data physical device object being removed
1491 * @force - delete the pdev (and its vdevs and peers) even if
1492 * there are outstanding references by the target to the vdevs
1493 * and peers within the pdev
1494 *
1495 * This function is used when the WLAN driver is being removed to
1496 * remove the host data component within the driver.
1497 * All virtual devices within the physical device need to be deleted
1498 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1499 *
1500 * Return: None
1501 */
1502static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
1503{
1504 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rakesh Pillai33942c42018-05-09 11:45:38 +05301505 struct ol_txrx_stats_req_internal *req, *temp_req;
tfyu9fcabd72017-09-26 17:46:48 +08001506 int i = 0;
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301507
1508 /*checking to ensure txrx pdev structure is not NULL */
1509 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301510 ol_txrx_err("pdev is NULL");
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301511 return;
1512 }
1513
1514 htt_pktlogmod_exit(pdev);
1515
tfyu9fcabd72017-09-26 17:46:48 +08001516 qdf_spin_lock_bh(&pdev->req_list_spinlock);
1517 if (pdev->req_list_depth > 0)
1518 ol_txrx_err(
1519 "Warning: the txrx req list is not empty, depth=%d\n",
1520 pdev->req_list_depth
1521 );
Rakesh Pillai33942c42018-05-09 11:45:38 +05301522 TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
tfyu9fcabd72017-09-26 17:46:48 +08001523 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
1524 pdev->req_list_depth--;
1525 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05301526 "%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
tfyu9fcabd72017-09-26 17:46:48 +08001527 i++,
1528 req,
1529 req->base.print.verbose,
1530 req->base.print.concise,
1531 req->base.stats_type_upload_mask,
1532 req->base.stats_type_reset_mask
1533 );
1534 qdf_mem_free(req);
1535 }
1536 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
1537
1538 qdf_spinlock_destroy(&pdev->req_list_spinlock);
Ajit Pal Singh8184e932018-07-25 13:54:13 +05301539 qdf_spinlock_destroy(&pdev->tx_mutex);
tfyu9fcabd72017-09-26 17:46:48 +08001540
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301541 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
1542
1543 if (pdev->cfg.is_high_latency)
1544 ol_tx_sched_detach(pdev);
1545
1546 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
1547
1548 htt_pdev_free(pdev->htt_pdev);
1549 ol_txrx_peer_find_detach(pdev);
1550 ol_txrx_tso_stats_deinit(pdev);
jitiphil335d2412018-06-07 22:49:24 +05301551 ol_txrx_fw_stats_desc_pool_deinit(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301552
1553 ol_txrx_pdev_txq_log_destroy(pdev);
1554 ol_txrx_pdev_grp_stat_destroy(pdev);
Alok Kumarddd457e2018-04-09 13:51:42 +05301555
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301556 ol_txrx_debugfs_exit(pdev);
1557
Alok Kumarddd457e2018-04-09 13:51:42 +05301558 qdf_mem_free(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301559}
1560
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301561#if defined(QCA_HL_NETDEV_FLOW_CONTROL)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301562
1563/**
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301564 * ol_txrx_vdev_per_vdev_tx_desc_init() - initialise per vdev tx desc count
1565 * related variables.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301566 * @vdev: the virtual device object
1567 *
1568 * Return: None
1569 */
1570static inline void
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301571ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301572{
1573 qdf_atomic_init(&vdev->tx_desc_count);
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301574 vdev->tx_desc_limit = 0;
1575 vdev->queue_restart_th = 0;
1576 vdev->prio_q_paused = 0;
1577 vdev->queue_stop_th = 0;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301578}
1579#else
1580
1581static inline void
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301582ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301583{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301584}
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301585#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301586
Dhanashri Atre12a08392016-02-17 13:10:34 -08001587/**
1588 * ol_txrx_vdev_attach - Allocate and initialize the data object
1589 * for a new virtual device.
1590 *
1591 * @data_pdev - the physical device the virtual device belongs to
1592 * @vdev_mac_addr - the MAC address of the virtual device
1593 * @vdev_id - the ID used to identify the virtual device to the target
1594 * @op_mode - whether this virtual device is operating as an AP,
1595 * an IBSS, or a STA
1596 *
1597 * Return: success: handle to new data vdev object, failure: NULL
1598 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001599static struct cdp_vdev *
1600ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001601 uint8_t *vdev_mac_addr,
1602 uint8_t vdev_id, enum wlan_op_mode op_mode)
1603{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001604 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001605 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001606 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001607
1608 /* preconditions */
1609 TXRX_ASSERT2(pdev);
1610 TXRX_ASSERT2(vdev_mac_addr);
1611
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301612 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001613 if (!vdev)
1614 return NULL; /* failure */
1615
1616 /* store provided params */
1617 vdev->pdev = pdev;
1618 vdev->vdev_id = vdev_id;
1619 vdev->opmode = op_mode;
1620
1621 vdev->delete.pending = 0;
1622 vdev->safemode = 0;
1623 vdev->drop_unenc = 1;
1624 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301625 vdev->fwd_tx_packets = 0;
1626 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001627
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301628 ol_txrx_vdev_per_vdev_tx_desc_init(vdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301629
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301630 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001631 OL_TXRX_MAC_ADDR_LEN);
1632
1633 TAILQ_INIT(&vdev->peer_list);
1634 vdev->last_real_peer = NULL;
1635
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001636 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301637
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001638#ifdef QCA_IBSS_SUPPORT
1639 vdev->ibss_peer_num = 0;
1640 vdev->ibss_peer_heart_beat_timer = 0;
1641#endif
1642
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301643 ol_txrx_vdev_txqs_init(vdev);
1644
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301645 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001646 vdev->ll_pause.paused_reason = 0;
1647 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
1648 vdev->ll_pause.txq.depth = 0;
wadesong5e2e8012017-08-21 16:56:03 +08001649 qdf_atomic_init(&vdev->delete.detaching);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301650 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001651 &vdev->ll_pause.timer,
1652 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301653 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301654 qdf_atomic_init(&vdev->os_q_paused);
1655 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001656 vdev->tx_fl_lwm = 0;
1657 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001658 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001659 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05301660 qdf_mem_zero(&vdev->last_peer_mac_addr,
1661 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301662 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001663 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08001664 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001665 vdev->osif_fc_ctx = NULL;
1666
Alok Kumar75355aa2018-03-19 17:32:58 +05301667 vdev->txrx_stats.txack_success = 0;
1668 vdev->txrx_stats.txack_failed = 0;
1669
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001670 /* Default MAX Q depth for every VDEV */
1671 vdev->ll_pause.max_q_depth =
1672 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001673 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001674 /* add this vdev into the pdev's list */
1675 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
1676
Poddar, Siddarth14521792017-03-14 21:19:42 +05301677 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001678 "Created vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001679 vdev,
1680 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1681 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1682 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1683
1684 /*
1685 * We've verified that htt_op_mode == wlan_op_mode,
1686 * so no translation is needed.
1687 */
1688 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
1689
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001690 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001691}
1692
Dhanashri Atre12a08392016-02-17 13:10:34 -08001693/**
1694 *ol_txrx_vdev_register - Link a vdev's data object with the
1695 * matching OS shim vdev object.
1696 *
1697 * @txrx_vdev: the virtual device's data object
1698 * @osif_vdev: the virtual device's OS shim object
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301699 * @ctrl_vdev: UMAC vdev objmgr handle
Dhanashri Atre12a08392016-02-17 13:10:34 -08001700 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
1701 *
1702 * The data object for a virtual device is created by the
1703 * function ol_txrx_vdev_attach. However, rather than fully
1704 * linking the data vdev object with the vdev objects from the
1705 * other subsystems that the data vdev object interacts with,
1706 * the txrx_vdev_attach function focuses primarily on creating
1707 * the data vdev object. After the creation of both the data
1708 * vdev object and the OS shim vdev object, this
1709 * txrx_osif_vdev_attach function is used to connect the two
1710 * vdev objects, so the data SW can use the OS shim vdev handle
1711 * when passing rx data received by a vdev up to the OS shim.
1712 */
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301713static void ol_txrx_vdev_register(struct cdp_vdev *pvdev, void *osif_vdev,
1714 struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
1715 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001716{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001717 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001718
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001719 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301720 qdf_print("vdev/txrx_ops is NULL!");
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001721 qdf_assert(0);
1722 return;
1723 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001724
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001725 vdev->osif_dev = osif_vdev;
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301726 vdev->ctrl_vdev = ctrl_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001727 vdev->rx = txrx_ops->rx.rx;
Poddar, Siddarth3906e172018-01-09 11:24:58 +05301728 vdev->stats_rx = txrx_ops->rx.stats_rx;
Alok Kumar4696fb02018-06-06 00:10:18 +05301729 vdev->tx_comp = txrx_ops->tx.tx_comp;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001730 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001731}
1732
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001733void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
1734{
1735 vdev->safemode = val;
1736}
1737
Dhanashri Atre12a08392016-02-17 13:10:34 -08001738/**
1739 * ol_txrx_set_privacy_filters - set the privacy filter
1740 * @vdev - the data virtual device object
1741 * @filter - filters to be set
1742 * @num - the number of filters
1743 *
1744 * Rx related. Set the privacy filters. When rx packets, check
1745 * the ether type, filter type and packet type to decide whether
1746 * discard these packets.
1747 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08001748static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001749ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
1750 void *filters, uint32_t num)
1751{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301752 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001753 num * sizeof(struct privacy_exemption));
1754 vdev->num_filters = num;
1755}
1756
1757void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
1758{
1759 vdev->drop_unenc = val;
1760}
1761
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001762#if defined(CONFIG_HL_SUPPORT) || defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
gbian016a42e2017-03-01 18:49:11 +08001763
1764static void
1765ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1766{
1767 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1768 int i;
1769 struct ol_tx_desc_t *tx_desc;
1770
1771 qdf_spin_lock_bh(&pdev->tx_mutex);
1772 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1773 tx_desc = ol_tx_desc_find(pdev, i);
1774 if (tx_desc->vdev == vdev)
1775 tx_desc->vdev = NULL;
1776 }
1777 qdf_spin_unlock_bh(&pdev->tx_mutex);
1778}
1779
1780#else
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001781#ifdef QCA_LL_TX_FLOW_CONTROL_V2
1782static void ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1783{
1784 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1785 struct ol_tx_flow_pool_t *pool;
1786 int i;
1787 struct ol_tx_desc_t *tx_desc;
gbian016a42e2017-03-01 18:49:11 +08001788
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001789 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
1790 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1791 tx_desc = ol_tx_desc_find(pdev, i);
1792 if (!qdf_atomic_read(&tx_desc->ref_cnt))
1793 /* not in use */
1794 continue;
1795
1796 pool = tx_desc->pool;
1797 qdf_spin_lock_bh(&pool->flow_pool_lock);
1798 if (tx_desc->vdev == vdev)
1799 tx_desc->vdev = NULL;
1800 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1801 }
1802 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
1803}
1804
1805#else
gbian016a42e2017-03-01 18:49:11 +08001806static void
1807ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1808{
gbian016a42e2017-03-01 18:49:11 +08001809}
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001810#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1811#endif /* CONFIG_HL_SUPPORT */
gbian016a42e2017-03-01 18:49:11 +08001812
Dhanashri Atre12a08392016-02-17 13:10:34 -08001813/**
1814 * ol_txrx_vdev_detach - Deallocate the specified data virtual
1815 * device object.
1816 * @data_vdev: data object for the virtual device in question
1817 * @callback: function to call (if non-NULL) once the vdev has
1818 * been wholly deleted
1819 * @callback_context: context to provide in the callback
1820 *
1821 * All peers associated with the virtual device need to be deleted
1822 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
1823 * However, for the peers to be fully deleted, the peer deletion has to
1824 * percolate through the target data FW and back up to the host data SW.
1825 * Thus, even though the host control SW may have issued a peer_detach
1826 * call for each of the vdev's peers, the peer objects may still be
1827 * allocated, pending removal of all references to them by the target FW.
1828 * In this case, though the vdev_detach function call will still return
1829 * immediately, the vdev itself won't actually be deleted, until the
1830 * deletions of all its peers complete.
1831 * The caller can provide a callback function pointer to be notified when
1832 * the vdev deletion actually happens - whether it's directly within the
1833 * vdev_detach call, or if it's deferred until all in-progress peer
1834 * deletions have completed.
1835 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08001836static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001837ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001838 ol_txrx_vdev_delete_cb callback, void *context)
1839{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001840 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
wadesong5e2e8012017-08-21 16:56:03 +08001841 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001842
1843 /* preconditions */
1844 TXRX_ASSERT2(vdev);
wadesong5e2e8012017-08-21 16:56:03 +08001845 pdev = vdev->pdev;
1846
1847 /* prevent anyone from restarting the ll_pause timer again */
1848 qdf_atomic_set(&vdev->delete.detaching, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001849
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301850 ol_txrx_vdev_tx_queue_free(vdev);
1851
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301852 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301853 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001854 vdev->ll_pause.is_q_timer_on = false;
1855 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301856 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07001857
Nirav Shahcbc6d722016-03-01 16:24:53 +05301858 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301859 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001860 vdev->ll_pause.txq.head = next;
1861 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301862 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
wadesong5e2e8012017-08-21 16:56:03 +08001863
1864 /* ll_pause timer should be deleted without any locks held, and
1865 * no timer function should be executed after this point because
1866 * qdf_timer_free is deleting the timer synchronously.
1867 */
1868 qdf_timer_free(&vdev->ll_pause.timer);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301869 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001870
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301871 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001872 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08001873 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001874 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301875 qdf_spin_unlock_bh(&vdev->flow_control_lock);
1876 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001877
1878 /* remove the vdev from its parent pdev's list */
1879 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
1880
1881 /*
1882 * Use peer_ref_mutex while accessing peer_list, in case
1883 * a peer is in the process of being removed from the list.
1884 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301885 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001886 /* check that the vdev has no peers allocated */
1887 if (!TAILQ_EMPTY(&vdev->peer_list)) {
1888 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05301889 ol_txrx_dbg(
Nirav Shah7c8c1712018-09-10 16:01:31 +05301890 "not deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
1891 vdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001892 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1893 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1894 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1895 /* indicate that the vdev needs to be deleted */
1896 vdev->delete.pending = 1;
1897 vdev->delete.callback = callback;
1898 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301899 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001900 return;
1901 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301902 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001903 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001904
Poddar, Siddarth14521792017-03-14 21:19:42 +05301905 ol_txrx_dbg(
Nirav Shah7c8c1712018-09-10 16:01:31 +05301906 "deleting vdev obj %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
1907 vdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001908 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1909 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1910 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1911
1912 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
1913
1914 /*
Yun Parkeaea8632017-04-09 09:53:45 -07001915 * The ol_tx_desc_free might access the invalid content of vdev referred
1916 * by tx desc, since this vdev might be detached in another thread
1917 * asynchronous.
1918 *
1919 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
1920 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
1921 * to avoid crash.
1922 *
1923 */
gbian016a42e2017-03-01 18:49:11 +08001924 ol_txrx_tx_desc_reset_vdev(vdev);
1925
1926 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001927 * Doesn't matter if there are outstanding tx frames -
1928 * they will be freed once the target sends a tx completion
1929 * message for them.
1930 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301931 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001932 if (callback)
1933 callback(context);
1934}
1935
1936/**
1937 * ol_txrx_flush_rx_frames() - flush cached rx frames
1938 * @peer: peer
1939 * @drop: set flag to drop frames
1940 *
1941 * Return: None
1942 */
1943void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301944 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001945{
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001946 struct ol_txrx_cached_bufq_t *bufqi;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001947 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301948 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001949 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001950
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301951 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1952 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001953 return;
1954 }
1955
Dhanashri Atre182b0272016-02-17 15:35:07 -08001956 qdf_assert(peer->vdev);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301957 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001958 bufqi = &peer->bufq_info;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001959
Dhanashri Atre50141c52016-04-07 13:15:29 -07001960 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08001961 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001962 else
1963 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301964 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001965
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001966 qdf_spin_lock_bh(&bufqi->bufq_lock);
1967 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001968 typeof(*cache_buf), list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001969 while (!list_empty(&bufqi->cached_bufq)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001970 list_del(&cache_buf->list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001971 bufqi->curr--;
1972 qdf_assert(bufqi->curr >= 0);
1973 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001974 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301975 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001976 } else {
1977 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001978 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301979 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05301980 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001981 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301982 qdf_mem_free(cache_buf);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001983 qdf_spin_lock_bh(&bufqi->bufq_lock);
1984 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001985 typeof(*cache_buf), list);
1986 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001987 bufqi->qdepth_no_thresh = bufqi->curr;
1988 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301989 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001990}
1991
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07001992static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05301993{
1994 uint8_t sta_id;
1995 struct ol_txrx_peer_t *peer;
1996 struct ol_txrx_pdev_t *pdev;
1997
1998 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1999 if (!pdev)
2000 return;
2001
2002 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002003 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2004 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302005 if (!peer)
2006 continue;
2007 ol_txrx_flush_rx_frames(peer, 1);
2008 }
2009}
2010
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302011/* Define short name to use in cds_trigger_recovery */
2012#define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2013
Dhanashri Atre12a08392016-02-17 13:10:34 -08002014/**
Naveen Rawat17c42a82018-02-01 19:18:27 -08002015 * ol_txrx_dump_peer_access_list() - dump peer access list
2016 * @peer: peer handle
2017 *
2018 * This function will dump if any peer debug ids are still accessing peer
2019 *
2020 * Return: None
2021 */
2022static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2023{
2024 u32 i;
2025 u32 pending_ref;
2026
2027 for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2028 pending_ref = qdf_atomic_read(&peer->access_list[i]);
2029 if (pending_ref)
2030 ol_txrx_info_high("id %d pending refs %d",
2031 i, pending_ref);
2032 }
2033}
2034
2035/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08002036 * ol_txrx_peer_attach - Allocate and set up references for a
2037 * data peer object.
2038 * @data_pdev: data physical device object that will indirectly
2039 * own the data_peer object
2040 * @data_vdev - data virtual device object that will directly
2041 * own the data_peer object
2042 * @peer_mac_addr - MAC address of the new peer
2043 *
2044 * When an association with a peer starts, the host's control SW
2045 * uses this function to inform the host data SW.
2046 * The host data SW allocates its own peer object, and stores a
2047 * reference to the control peer object within the data peer object.
2048 * The host data SW also stores a reference to the virtual device
2049 * that the peer is associated with. This virtual device handle is
2050 * used when the data SW delivers rx data frames to the OS shim layer.
2051 * The host data SW returns a handle to the new peer data object,
2052 * so a reference within the control peer object can be set to the
2053 * data peer object.
2054 *
2055 * Return: handle to new data peer object, or NULL if the attach
2056 * fails
2057 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002058static void *
psimha8696f772018-04-03 17:38:38 -07002059ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05302060 struct cdp_ctrl_objmgr_peer *ctrl_peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002061{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002062 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002063 struct ol_txrx_peer_t *peer;
2064 struct ol_txrx_peer_t *temp_peer;
2065 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002066 bool wait_on_deletion = false;
2067 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002068 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302069 bool cmp_wait_mac = false;
2070 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Alok Kumare1977442018-11-28 17:16:03 +05302071 u8 check_valid = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002072
2073 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002074 TXRX_ASSERT2(vdev);
2075 TXRX_ASSERT2(peer_mac_addr);
2076
Dhanashri Atre12a08392016-02-17 13:10:34 -08002077 pdev = vdev->pdev;
2078 TXRX_ASSERT2(pdev);
2079
Alok Kumare1977442018-11-28 17:16:03 +05302080 if (pdev->enable_peer_unmap_conf_support)
2081 check_valid = 1;
2082
Abhishek Singh217d9782017-04-28 23:49:11 +05302083 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2084 QDF_MAC_ADDR_SIZE))
2085 cmp_wait_mac = true;
2086
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302087 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002088 /* check for duplicate existing peer */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002089 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2090 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
Alok Kumare1977442018-11-28 17:16:03 +05302091 (union ol_txrx_align_mac_addr_t *)peer_mac_addr) &&
2092 (check_valid == 0 || temp_peer->valid)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302093 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002094 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exists.\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002095 vdev->vdev_id,
2096 peer_mac_addr[0], peer_mac_addr[1],
2097 peer_mac_addr[2], peer_mac_addr[3],
2098 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302099 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002100 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002101 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002102 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302103 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002104 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302105 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002106 return NULL;
2107 }
2108 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302109 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2110 &temp_peer->mac_addr,
Alok Kumare1977442018-11-28 17:16:03 +05302111 &vdev->last_peer_mac_addr) &&
2112 (check_valid == 0 ||
2113 temp_peer->valid)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302114 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002115 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exists.\n",
Abhishek Singh217d9782017-04-28 23:49:11 +05302116 vdev->vdev_id,
2117 vdev->last_peer_mac_addr.raw[0],
2118 vdev->last_peer_mac_addr.raw[1],
2119 vdev->last_peer_mac_addr.raw[2],
2120 vdev->last_peer_mac_addr.raw[3],
2121 vdev->last_peer_mac_addr.raw[4],
2122 vdev->last_peer_mac_addr.raw[5]);
2123 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2124 vdev->wait_on_peer_id = temp_peer->local_id;
2125 qdf_event_reset(&vdev->wait_delete_comp);
2126 wait_on_deletion = true;
2127 break;
2128 } else {
2129 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2130 ol_txrx_err("peer not found");
2131 return NULL;
2132 }
2133 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002134 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302135 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002136
Abhishek Singh217d9782017-04-28 23:49:11 +05302137 qdf_mem_zero(&vdev->last_peer_mac_addr,
2138 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002139 if (wait_on_deletion) {
2140 /* wait for peer deletion */
Nachiket Kukade0396b732017-11-14 16:35:16 +05302141 rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002142 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002143 if (QDF_STATUS_SUCCESS != rc) {
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002144 ol_txrx_err("error waiting for peer_id(%d) deletion, status %d\n",
Dustin Brown100201e2017-07-10 11:48:40 -07002145 vdev->wait_on_peer_id, (int) rc);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002146 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08002147 ol_txrx_dump_peer_access_list(temp_peer);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002148 wlan_roam_debug_dump_table();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002149 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Dustin Brown100201e2017-07-10 11:48:40 -07002150
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002151 return NULL;
2152 }
2153 }
2154
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302155 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002156 if (!peer)
2157 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002158
2159 /* store provided params */
2160 peer->vdev = vdev;
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05302161 peer->ctrl_peer = peer->ctrl_peer;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302162 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002163 OL_TXRX_MAC_ADDR_LEN);
2164
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302165 ol_txrx_peer_txqs_init(pdev, peer);
2166
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002167 INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302168 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002169 /* add this peer into the vdev's list */
2170 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302171 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002172 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
Frank Liu4362e462018-01-16 11:51:55 +08002173 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2174 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002175 vdev->last_real_peer = peer;
Frank Liu4362e462018-01-16 11:51:55 +08002176 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2177 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002178
2179 peer->rx_opt_proc = pdev->rx_opt_proc;
2180
2181 ol_rx_peer_init(pdev, peer);
2182
2183 /* initialize the peer_id */
2184 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2185 peer->peer_ids[i] = HTT_INVALID_PEER;
2186
Alok Kumare1977442018-11-28 17:16:03 +05302187 if (pdev->enable_peer_unmap_conf_support)
2188 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2189 peer->map_unmap_peer_ids[i] = HTT_INVALID_PEER;
2190
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302191 qdf_spinlock_create(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002192 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2193
2194 peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002195
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302196 qdf_atomic_init(&peer->delete_in_progress);
2197 qdf_atomic_init(&peer->flush_in_progress);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302198 qdf_atomic_init(&peer->ref_cnt);
Mohit Khannab7bec722017-11-10 11:43:44 -08002199
2200 for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2201 qdf_atomic_init(&peer->access_list[i]);
2202
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002203 /* keep one reference for attach */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002204 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002205
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002206 /* Set a flag to indicate peer create is pending in firmware */
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002207 qdf_atomic_init(&peer->fw_create_pending);
2208 qdf_atomic_set(&peer->fw_create_pending, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002209
2210 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002211 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2212 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002213
2214 ol_txrx_peer_find_hash_add(pdev, peer);
2215
Mohit Khanna47384bc2016-08-15 15:37:05 -07002216 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002217 "vdev %pK created peer %pK ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Mohit Khanna47384bc2016-08-15 15:37:05 -07002218 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002219 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2220 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2221 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2222 /*
2223 * For every peer MAp message search and set if bss_peer
2224 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002225 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2226 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002227 peer->bss_peer = 1;
2228
2229 /*
2230 * The peer starts in the "disc" state while association is in progress.
2231 * Once association completes, the peer will get updated to "auth" state
2232 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2233 * or else to the "conn" state. For non-open mode, the peer will
2234 * progress to "auth" state once the authentication completes.
2235 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002236 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002237 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002238 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002239
2240#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2241 peer->rssi_dbm = HTT_RSSI_INVALID;
2242#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002243 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2244 !pdev->self_peer) {
2245 pdev->self_peer = peer;
2246 /*
2247 * No Tx in monitor mode, otherwise results in target assert.
2248 * Setting disable_intrabss_fwd to true
2249 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002250 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002251 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002252
2253 ol_txrx_local_peer_id_alloc(pdev, peer);
2254
Leo Chang98726762016-10-28 11:07:18 -07002255 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002256}
2257
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302258#undef PEER_DEL_TIMEOUT
2259
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002260/*
2261 * Discarding tx filter - removes all data frames (disconnected state)
2262 */
2263static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2264{
2265 return A_ERROR;
2266}
2267
2268/*
2269 * Non-autentication tx filter - filters out data frames that are not
2270 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2271 * data frames (connected state)
2272 */
2273static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2274{
2275 return
2276 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2277 tx_msdu_info->htt.info.ethertype ==
2278 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2279}
2280
2281/*
2282 * Pass-through tx filter - lets all data frames through (authenticated state)
2283 */
2284static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2285{
2286 return A_OK;
2287}
2288
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002289/**
2290 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2291 * @peer: handle to peer
2292 *
2293 * returns mac addrs for module which do not know peer type
2294 *
2295 * Return: the mac_addr from peer
2296 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002297static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002298ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002299{
Leo Chang98726762016-10-28 11:07:18 -07002300 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002301
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002302 if (!peer)
2303 return NULL;
2304
2305 return peer->mac_addr.raw;
2306}
2307
Abhishek Singhcfb44482017-03-10 12:42:37 +05302308#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002309/**
2310 * ol_txrx_get_pn_info() - Returns pn info from peer
2311 * @peer: handle to peer
2312 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2313 * @last_pn: return last_rmf_pn value from peer.
2314 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2315 *
2316 * Return: NONE
2317 */
2318void
Leo Chang98726762016-10-28 11:07:18 -07002319ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002320 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2321{
Leo Chang98726762016-10-28 11:07:18 -07002322 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002323 *last_pn_valid = &peer->last_rmf_pn_valid;
2324 *last_pn = &peer->last_rmf_pn;
2325 *rmf_pn_replays = &peer->rmf_pn_replays;
2326}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302327#else
2328void
2329ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2330 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2331{
2332}
2333#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002334
2335/**
2336 * ol_txrx_get_opmode() - Return operation mode of vdev
2337 * @vdev: vdev handle
2338 *
2339 * Return: operation mode.
2340 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002341static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002342{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002343 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002344
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002345 return vdev->opmode;
2346}
2347
2348/**
2349 * ol_txrx_get_peer_state() - Return peer state of peer
2350 * @peer: peer handle
2351 *
2352 * Return: return peer state
2353 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002354static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002355{
Leo Chang98726762016-10-28 11:07:18 -07002356 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002357
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002358 return peer->state;
2359}
2360
2361/**
2362 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2363 * @peer: peer handle
2364 *
2365 * Return: vdev handle from peer
2366 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002367static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002368{
Leo Chang98726762016-10-28 11:07:18 -07002369 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002370
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002371 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002372}
2373
2374/**
2375 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2376 * @vdev: vdev handle
2377 *
2378 * Return: vdev mac address
2379 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002380static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002381ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002382{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002383 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002384
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002385 if (!vdev)
2386 return NULL;
2387
2388 return vdev->mac_addr.raw;
2389}
2390
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002391#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002392/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002393 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002394 * vdev
2395 * @vdev: vdev handle
2396 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002397 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002398 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002399struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002400ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2401{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002402 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002403}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002404#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002405
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002406#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002407/**
2408 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2409 * @vdev: vdev handle
2410 *
2411 * Return: Handle to pdev
2412 */
2413ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2414{
2415 return vdev->pdev;
2416}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002417#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002418
2419/**
2420 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2421 * @vdev: vdev handle
2422 *
2423 * Return: Handle to control pdev
2424 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002425static struct cdp_cfg *
2426ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002427{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002428 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002429
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002430 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002431}
2432
2433/**
2434 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2435 * @vdev: vdev handle
2436 *
2437 * Return: Rx Fwd disabled status
2438 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002439static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002440ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002441{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002442 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002443 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2444 vdev->pdev->ctrl_pdev;
2445 return cfg->rx_fwd_disabled;
2446}
2447
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002448#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002449/**
2450 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2451 * @vdev: vdev handle
2452 * @peer_num_delta: peer nums to be adjusted
2453 *
2454 * Return: -1 for failure or total peer nums after adjustment.
2455 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002456static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002457ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002458 int16_t peer_num_delta)
2459{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002460 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002461 int16_t new_peer_num;
2462
2463 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07002464 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002465 return OL_TXRX_INVALID_NUM_PEERS;
2466
2467 vdev->ibss_peer_num = new_peer_num;
2468
2469 return new_peer_num;
2470}
2471
2472/**
2473 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2474 * beat timer
2475 * @vdev: vdev handle
2476 * @timer_value_sec: new heart beat timer value
2477 *
2478 * Return: Old timer value set in vdev.
2479 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002480static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2481 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002482{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002483 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002484 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2485
2486 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2487
2488 return old_timer_value;
2489}
jiad391c5282018-11-26 16:21:04 +08002490#else /* !QCA_IBSS_SUPPORT */
2491static inline int16_t
2492ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
2493 int16_t peer_num_delta)
2494{
2495 return 0;
2496}
2497
2498static inline uint16_t
2499ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2500 uint16_t timer_value_sec)
2501{
2502 return 0;
2503}
2504#endif /* QCA_IBSS_SUPPORT */
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002505
2506/**
2507 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
2508 * @vdev: vdev handle
2509 * @callback: callback function to remove the peer.
2510 * @callback_context: handle for callback function
2511 * @remove_last_peer: Does it required to last peer.
2512 *
2513 * Return: NONE
2514 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002515static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002516ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002517 ol_txrx_vdev_peer_remove_cb callback,
2518 void *callback_context, bool remove_last_peer)
2519{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002520 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002521 ol_txrx_peer_handle peer, temp;
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002522 int self_removed = 0;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002523 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002524 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002525
2526 temp = NULL;
2527 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
2528 peer_list_elem) {
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05302529 if (qdf_atomic_read(&peer->delete_in_progress))
2530 continue;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002531 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002532 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05302533 callback(callback_context, temp->mac_addr.raw,
Jiachao Wu641760e2018-01-21 12:11:31 +08002534 vdev->vdev_id, temp);
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002535 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002536 }
2537 /* self peer is deleted last */
2538 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002539 self_removed = 1;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002540 break;
Yun Parkeaea8632017-04-09 09:53:45 -07002541 }
2542 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002543 }
2544
Mohit Khanna137b97d2016-04-21 16:11:33 -07002545 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2546
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002547 if (self_removed)
Nirav Shah7c8c1712018-09-10 16:01:31 +05302548 ol_txrx_info("self peer removed by caller");
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002549
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002550 if (remove_last_peer) {
2551 /* remove IBSS bss peer last */
2552 peer = TAILQ_FIRST(&vdev->peer_list);
2553 callback(callback_context, (uint8_t *) &vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08002554 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002555 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002556}
2557
2558/**
2559 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
2560 * @vdev: vdev handle
2561 * @callback: callback function to remove the peer.
2562 * @callback_context: handle for callback function
2563 *
2564 * Return: NONE
2565 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002566static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002567ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002568 ol_txrx_vdev_peer_remove_cb callback,
2569 void *callback_context)
2570{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002571 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002572 ol_txrx_peer_handle peer = NULL;
Jiachao Wu641760e2018-01-21 12:11:31 +08002573 ol_txrx_peer_handle tmp_peer = NULL;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002574
Jiachao Wu641760e2018-01-21 12:11:31 +08002575 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, peer_list_elem, tmp_peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302576 ol_txrx_info_high(
Nirav Shah7c8c1712018-09-10 16:01:31 +05302577 "peer found for vdev id %d. deleting the peer",
2578 vdev->vdev_id);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002579 callback(callback_context, (uint8_t *)&vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08002580 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002581 }
2582}
2583
Nirav Shah575282c2018-07-08 22:48:00 +05302584#ifdef WLAN_FEATURE_DSRC
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002585/**
2586 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2587 * @vdev: vdev handle
2588 * @ocb_set_chan: OCB channel information to be set in vdev.
2589 *
2590 * Return: NONE
2591 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002592static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002593 struct ol_txrx_ocb_set_chan ocb_set_chan)
2594{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002595 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002596
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002597 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2598 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2599}
2600
2601/**
2602 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2603 * @vdev: vdev handle
2604 *
2605 * Return: handle to struct ol_txrx_ocb_chan_info
2606 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002607static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002608ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002609{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002610 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002611
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002612 return vdev->ocb_channel_info;
2613}
Nirav Shah575282c2018-07-08 22:48:00 +05302614#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002615
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002616/**
2617 * @brief specify the peer's authentication state
2618 * @details
2619 * Specify the peer's authentication state (none, connected, authenticated)
2620 * to allow the data SW to determine whether to filter out invalid data frames.
2621 * (In the "connected" state, where security is enabled, but authentication
2622 * has not completed, tx and rx data frames other than EAPOL or WAPI should
2623 * be discarded.)
2624 * This function is only relevant for systems in which the tx and rx filtering
2625 * are done in the host rather than in the target.
2626 *
2627 * @param data_peer - which peer has changed its state
2628 * @param state - the new state of the peer
2629 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002630 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002631 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002632QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002633 uint8_t *peer_mac,
2634 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002635{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002636 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002637 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002638 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002639
Anurag Chouhanc5548422016-02-24 18:33:27 +05302640 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302641 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05302642 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302643 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002644 }
2645
Mohit Khannab7bec722017-11-10 11:43:44 -08002646 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2647 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002648 if (NULL == peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302649 ol_txrx_err(
Nirav Shah7c8c1712018-09-10 16:01:31 +05302650 "peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302651 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2652 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302653 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002654 }
2655
2656 /* TODO: Should we send WMI command of the connection state? */
2657 /* avoid multiple auth state change. */
2658 if (peer->state == state) {
2659#ifdef TXRX_PRINT_VERBOSE_ENABLE
Nirav Shah7c8c1712018-09-10 16:01:31 +05302660 ol_txrx_dbg("no state change, returns directly");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002661#endif
Mohit Khannab7bec722017-11-10 11:43:44 -08002662 peer_ref_cnt = ol_txrx_peer_release_ref
2663 (peer,
2664 PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302665 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002666 }
2667
Nirav Shah7c8c1712018-09-10 16:01:31 +05302668 ol_txrx_dbg("change from %d to %d",
2669 peer->state, state);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002670
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002671 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002672 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002673 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002674 ? ol_tx_filter_non_auth
2675 : ol_tx_filter_discard);
2676
2677 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002678 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002679 int tid;
2680 /*
2681 * Pause all regular (non-extended) TID tx queues until
2682 * data arrives and ADDBA negotiation has completed.
2683 */
Nirav Shah7c8c1712018-09-10 16:01:31 +05302684 ol_txrx_dbg("pause peer and unpause mgmt/non-qos");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002685 ol_txrx_peer_pause(peer); /* pause all tx queues */
2686 /* unpause mgmt and non-QoS tx queues */
2687 for (tid = OL_TX_NUM_QOS_TIDS;
2688 tid < OL_TX_NUM_TIDS; tid++)
2689 ol_txrx_peer_tid_unpause(peer, tid);
2690 }
2691 }
Mohit Khannab7bec722017-11-10 11:43:44 -08002692 peer_ref_cnt = ol_txrx_peer_release_ref(peer,
2693 PEER_DEBUG_ID_OL_INTERNAL);
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002694 /*
Mohit Khannab7bec722017-11-10 11:43:44 -08002695 * after ol_txrx_peer_release_ref, peer object cannot be accessed
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002696 * if the return code was 0
2697 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08002698 if (peer_ref_cnt > 0)
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002699 /*
2700 * Set the state after the Pause to avoid the race condiction
2701 * with ADDBA check in tx path
2702 */
2703 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302704 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002705}
2706
2707void
2708ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
2709{
2710 peer->keyinstalled = val;
2711}
2712
2713void
2714ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
2715 uint8_t *peer_mac,
2716 union ol_txrx_peer_update_param_t *param,
2717 enum ol_txrx_peer_update_select_t select)
2718{
2719 struct ol_txrx_peer_t *peer;
2720
Mohit Khannab7bec722017-11-10 11:43:44 -08002721 peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
2722 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002723 if (!peer) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05302724 ol_txrx_dbg("peer is null");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002725 return;
2726 }
2727
2728 switch (select) {
2729 case ol_txrx_peer_update_qos_capable:
2730 {
2731 /* save qos_capable here txrx peer,
2732 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
2733 */
2734 peer->qos_capable = param->qos_capable;
2735 /*
2736 * The following function call assumes that the peer has a
2737 * single ID. This is currently true, and
2738 * is expected to remain true.
2739 */
2740 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
2741 peer->peer_ids[0],
2742 peer->qos_capable);
2743 break;
2744 }
2745 case ol_txrx_peer_update_uapsdMask:
2746 {
2747 peer->uapsd_mask = param->uapsd_mask;
2748 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
2749 peer->peer_ids[0],
2750 peer->uapsd_mask);
2751 break;
2752 }
2753 case ol_txrx_peer_update_peer_security:
2754 {
2755 enum ol_sec_type sec_type = param->sec_type;
2756 enum htt_sec_type peer_sec_type = htt_sec_type_none;
2757
2758 switch (sec_type) {
2759 case ol_sec_type_none:
2760 peer_sec_type = htt_sec_type_none;
2761 break;
2762 case ol_sec_type_wep128:
2763 peer_sec_type = htt_sec_type_wep128;
2764 break;
2765 case ol_sec_type_wep104:
2766 peer_sec_type = htt_sec_type_wep104;
2767 break;
2768 case ol_sec_type_wep40:
2769 peer_sec_type = htt_sec_type_wep40;
2770 break;
2771 case ol_sec_type_tkip:
2772 peer_sec_type = htt_sec_type_tkip;
2773 break;
2774 case ol_sec_type_tkip_nomic:
2775 peer_sec_type = htt_sec_type_tkip_nomic;
2776 break;
2777 case ol_sec_type_aes_ccmp:
2778 peer_sec_type = htt_sec_type_aes_ccmp;
2779 break;
2780 case ol_sec_type_wapi:
2781 peer_sec_type = htt_sec_type_wapi;
2782 break;
2783 default:
2784 peer_sec_type = htt_sec_type_none;
2785 break;
2786 }
2787
2788 peer->security[txrx_sec_ucast].sec_type =
2789 peer->security[txrx_sec_mcast].sec_type =
2790 peer_sec_type;
2791
2792 break;
2793 }
2794 default:
2795 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05302796 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002797 "ERROR: unknown param %d in %s", select,
2798 __func__);
2799 break;
2800 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002801 } /* switch */
Mohit Khannab7bec722017-11-10 11:43:44 -08002802 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002803}
2804
2805uint8_t
2806ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2807{
2808
2809 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07002810
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002811 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
2812 if (peer)
2813 return peer->uapsd_mask;
2814 return 0;
2815}
2816
2817uint8_t
2818ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2819{
2820
2821 struct ol_txrx_peer_t *peer_t =
2822 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
2823 if (peer_t != NULL)
2824 return peer_t->qos_capable;
2825 return 0;
2826}
2827
Mohit Khannab7bec722017-11-10 11:43:44 -08002828/**
Alok Kumare1977442018-11-28 17:16:03 +05302829 * ol_txrx_send_peer_unmap_conf() - send peer unmap conf cmd to FW
2830 * @pdev: pdev_handle
2831 * @peer: peer_handle
2832 *
2833 * Return: None
2834 */
2835static inline void
2836ol_txrx_send_peer_unmap_conf(ol_txrx_pdev_handle pdev,
2837 ol_txrx_peer_handle peer)
2838{
2839 int i;
2840 int peer_cnt = 0;
2841 uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
2842 QDF_STATUS status = QDF_STATUS_E_FAILURE;
2843
2844 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
2845
2846 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER &&
2847 peer_cnt < MAX_NUM_PEER_ID_PER_PEER; i++) {
2848 if (peer->map_unmap_peer_ids[i] == HTT_INVALID_PEER)
2849 continue;
2850 peer_ids[peer_cnt++] = peer->map_unmap_peer_ids[i];
2851 peer->map_unmap_peer_ids[i] = HTT_INVALID_PEER;
2852 }
2853
2854 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
2855
2856 if (peer->peer_unmap_sync_cb && peer_cnt) {
2857 ol_txrx_dbg("send unmap conf cmd [%d]", peer_cnt);
2858 status = peer->peer_unmap_sync_cb(
2859 DEBUG_INVALID_VDEV_ID,
2860 peer_cnt, peer_ids);
2861 if (status != QDF_STATUS_SUCCESS)
2862 ol_txrx_err("unable to send unmap conf cmd [%d]",
2863 peer_cnt);
2864 }
2865}
2866
2867/**
Mohit Khannab7bec722017-11-10 11:43:44 -08002868 * ol_txrx_peer_free_tids() - free tids for the peer
2869 * @peer: peer handle
2870 *
2871 * Return: None
2872 */
2873static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
2874{
2875 int i = 0;
2876 /*
2877 * 'array' is allocated in addba handler and is supposed to be
2878 * freed in delba handler. There is the case (for example, in
2879 * SSR) where delba handler is not called. Because array points
2880 * to address of 'base' by default and is reallocated in addba
2881 * handler later, only free the memory when the array does not
2882 * point to base.
2883 */
2884 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
2885 if (peer->tids_rx_reorder[i].array !=
2886 &peer->tids_rx_reorder[i].base) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05302887 ol_txrx_dbg("delete reorder arr, tid:%d", i);
Mohit Khannab7bec722017-11-10 11:43:44 -08002888 qdf_mem_free(peer->tids_rx_reorder[i].array);
2889 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
2890 (uint8_t)i);
2891 }
2892 }
2893}
2894
2895/**
2896 * ol_txrx_peer_release_ref() - release peer reference
2897 * @peer: peer handle
2898 *
2899 * Release peer reference and delete peer if refcount is 0
2900 *
wadesong9f2b1102017-12-20 22:58:35 +08002901 * Return: Resulting peer ref_cnt after this function is invoked
Mohit Khannab7bec722017-11-10 11:43:44 -08002902 */
2903int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
2904 enum peer_debug_id_type debug_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002905{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002906 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002907 struct ol_txrx_vdev_t *vdev;
2908 struct ol_txrx_pdev_t *pdev;
Jingxiang Ge3badb982018-01-02 17:39:01 +08002909 bool ref_silent = false;
Jingxiang Ge190679b2018-01-30 08:56:19 +08002910 int access_list = 0;
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002911 uint32_t err_code = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002912
2913 /* preconditions */
2914 TXRX_ASSERT2(peer);
2915
2916 vdev = peer->vdev;
2917 if (NULL == vdev) {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002918 ol_txrx_err("The vdev is not present anymore\n");
Amar Singhal7ef59092018-09-11 15:32:35 -07002919 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002920 }
2921
2922 pdev = vdev->pdev;
2923 if (NULL == pdev) {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002924 ol_txrx_err("The pdev is not present anymore\n");
2925 err_code = 0xbad2;
2926 goto ERR_STATE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002927 }
2928
Mohit Khannab7bec722017-11-10 11:43:44 -08002929 if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
2930 ol_txrx_err("incorrect debug_id %d ", debug_id);
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002931 err_code = 0xbad3;
2932 goto ERR_STATE;
Mohit Khannab7bec722017-11-10 11:43:44 -08002933 }
2934
Jingxiang Ge3badb982018-01-02 17:39:01 +08002935 if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
2936 ref_silent = true;
2937
2938 if (!ref_silent)
2939 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
2940 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002941 peer, 0xdead,
Jingxiang Ge3badb982018-01-02 17:39:01 +08002942 qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002943
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002944
2945 /*
2946 * Hold the lock all the way from checking if the peer ref count
2947 * is zero until the peer references are removed from the hash
2948 * table and vdev list (if the peer ref count is zero).
2949 * This protects against a new HL tx operation starting to use the
2950 * peer object just after this function concludes it's done being used.
2951 * Furthermore, the lock needs to be held while checking whether the
2952 * vdev's list of peers is empty, to make sure that list is not modified
2953 * concurrently with the empty check.
2954 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302955 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07002956
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002957 /*
2958 * Check for the reference count before deleting the peer
2959 * as we noticed that sometimes we are re-entering this
2960 * function again which is leading to dead-lock.
2961 * (A double-free should never happen, so assert if it does.)
2962 */
2963 rc = qdf_atomic_read(&(peer->ref_cnt));
2964
2965 if (rc == 0) {
2966 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2967 ol_txrx_err("The Peer is not present anymore\n");
2968 qdf_assert(0);
2969 return -EACCES;
2970 }
2971 /*
2972 * now decrement rc; this will be the return code.
2973 * 0 : peer deleted
2974 * >0: peer ref removed, but still has other references
2975 * <0: sanity failed - no changes to the state of the peer
2976 */
2977 rc--;
2978
Mohit Khannab7bec722017-11-10 11:43:44 -08002979 if (!qdf_atomic_read(&peer->access_list[debug_id])) {
2980 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
jitiphil8ad8a6f2018-03-01 23:45:05 +05302981 ol_txrx_err("peer %pK ref was not taken by %d",
Mohit Khannab7bec722017-11-10 11:43:44 -08002982 peer, debug_id);
2983 ol_txrx_dump_peer_access_list(peer);
2984 QDF_BUG(0);
2985 return -EACCES;
2986 }
Mohit Khannab7bec722017-11-10 11:43:44 -08002987 qdf_atomic_dec(&peer->access_list[debug_id]);
2988
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07002989 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Mohit Khannab7bec722017-11-10 11:43:44 -08002990 u16 peer_id;
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002991 wlan_roam_debug_log(vdev->vdev_id,
2992 DEBUG_DELETING_PEER_OBJ,
2993 DEBUG_INVALID_PEER_ID,
2994 &peer->mac_addr.raw, peer, 0,
2995 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002996 peer_id = peer->local_id;
2997 /* remove the reference to the peer from the hash table */
2998 ol_txrx_peer_find_hash_remove(pdev, peer);
2999
3000 /* remove the peer from its parent vdev's list */
3001 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3002
3003 /* cleanup the Rx reorder queues for this peer */
3004 ol_rx_peer_cleanup(vdev, peer);
3005
Jingxiang Ge3badb982018-01-02 17:39:01 +08003006 qdf_spinlock_destroy(&peer->peer_info_lock);
3007 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
3008
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003009 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303010 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003011
3012 /*
3013 * Set wait_delete_comp event if the current peer id matches
3014 * with registered peer id.
3015 */
3016 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303017 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003018 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3019 }
3020
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003021 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3022 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003023
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003024 /* check whether the parent vdev has no peers left */
3025 if (TAILQ_EMPTY(&vdev->peer_list)) {
3026 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003027 * Check if the parent vdev was waiting for its peers
3028 * to be deleted, in order for it to be deleted too.
3029 */
3030 if (vdev->delete.pending) {
3031 ol_txrx_vdev_delete_cb vdev_delete_cb =
3032 vdev->delete.callback;
3033 void *vdev_delete_context =
3034 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303035 /*
3036 * Now that there are no references to the peer,
3037 * we can release the peer reference lock.
3038 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303039 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303040
gbian016a42e2017-03-01 18:49:11 +08003041 /*
Yun Parkeaea8632017-04-09 09:53:45 -07003042 * The ol_tx_desc_free might access the invalid
3043 * content of vdev referred by tx desc, since
3044 * this vdev might be detached in another thread
3045 * asynchronous.
3046 *
3047 * Go through tx desc pool to set corresponding
3048 * tx desc's vdev to NULL when detach this vdev,
3049 * and add vdev checking in the ol_tx_desc_free
3050 * to avoid crash.
3051 */
gbian016a42e2017-03-01 18:49:11 +08003052 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05303053 ol_txrx_dbg(
Nirav Shah7c8c1712018-09-10 16:01:31 +05303054 "deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
3055 vdev,
Yun Parkeaea8632017-04-09 09:53:45 -07003056 vdev->mac_addr.raw[0],
3057 vdev->mac_addr.raw[1],
3058 vdev->mac_addr.raw[2],
3059 vdev->mac_addr.raw[3],
3060 vdev->mac_addr.raw[4],
3061 vdev->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003062 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303063 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003064 if (vdev_delete_cb)
3065 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303066 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303067 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003068 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303069 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303070 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303071 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003072
jitiphil8ad8a6f2018-03-01 23:45:05 +05303073 ol_txrx_info_high("[%d][%d]: Deleting peer %pK ref_cnt -> %d %s",
Mohit Khannab7bec722017-11-10 11:43:44 -08003074 debug_id,
3075 qdf_atomic_read(&peer->access_list[debug_id]),
3076 peer, rc,
3077 qdf_atomic_read(&peer->fw_create_pending)
3078 == 1 ?
3079 "(No Maps received)" : "");
Mohit Khanna8ee37c62017-08-07 17:15:20 -07003080
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303081 ol_txrx_peer_tx_queue_free(pdev, peer);
3082
Alok Kumare1977442018-11-28 17:16:03 +05303083 /* send peer unmap conf cmd to fw for unmapped peer_ids */
3084 if (pdev->enable_peer_unmap_conf_support)
3085 ol_txrx_send_peer_unmap_conf(pdev, peer);
3086
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -07003087 /* Remove mappings from peer_id to peer object */
3088 ol_txrx_peer_clear_map_peer(pdev, peer);
3089
wadesong9f2b1102017-12-20 22:58:35 +08003090 /* Remove peer pointer from local peer ID map */
3091 ol_txrx_local_peer_id_free(pdev, peer);
3092
Mohit Khannab7bec722017-11-10 11:43:44 -08003093 ol_txrx_peer_free_tids(peer);
3094
3095 ol_txrx_dump_peer_access_list(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003096
Alok Kumar8e178242018-06-15 12:49:57 +05303097 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003098 } else {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003099 access_list = qdf_atomic_read(&peer->access_list[debug_id]);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303100 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Jingxiang Ge3badb982018-01-02 17:39:01 +08003101 if (!ref_silent)
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003102 ol_txrx_info_high("[%d][%d]: ref delete peer %pK ref_cnt -> %d",
3103 debug_id,
3104 access_list,
3105 peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003106 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003107 return rc;
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003108ERR_STATE:
3109 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3110 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3111 peer, err_code, qdf_atomic_read(&peer->ref_cnt));
3112 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003113}
3114
Dhanashri Atre12a08392016-02-17 13:10:34 -08003115/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003116 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3117 * @peer: pointer to ol txrx peer structure
3118 *
3119 * Return: QDF Status
3120 */
3121static QDF_STATUS
3122ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3123{
3124 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3125 /* Drop pending Rx frames in CDS */
3126 if (sched_ctx)
3127 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3128
3129 /* Purge the cached rx frame queue */
3130 ol_txrx_flush_rx_frames(peer, 1);
3131
3132 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003133 peer->state = OL_TXRX_PEER_STATE_DISC;
3134 qdf_spin_unlock_bh(&peer->peer_info_lock);
3135
3136 return QDF_STATUS_SUCCESS;
3137}
3138
3139/**
3140 * ol_txrx_clear_peer() - clear peer
3141 * @sta_id: sta id
3142 *
3143 * Return: QDF Status
3144 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003145static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003146{
3147 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003148 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Zhu Jianmin99523042018-06-06 20:01:44 +08003149 QDF_STATUS status;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003150
3151 if (!pdev) {
Zhu Jianmin99523042018-06-06 20:01:44 +08003152 ol_txrx_err("Unable to find pdev!");
Mohit Khanna0696eef2016-04-14 16:14:08 -07003153 return QDF_STATUS_E_FAILURE;
3154 }
3155
3156 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303157 ol_txrx_err("Invalid sta id %d", sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003158 return QDF_STATUS_E_INVAL;
3159 }
3160
Zhu Jianmin99523042018-06-06 20:01:44 +08003161 peer = ol_txrx_peer_get_ref_by_local_id(ppdev, sta_id,
3162 PEER_DEBUG_ID_OL_INTERNAL);
Kabilan Kannanfa163982018-01-30 12:03:41 -08003163
3164 /* Return success, if the peer is already cleared by
3165 * data path via peer detach function.
3166 */
Mohit Khanna0696eef2016-04-14 16:14:08 -07003167 if (!peer)
Kabilan Kannanfa163982018-01-30 12:03:41 -08003168 return QDF_STATUS_SUCCESS;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003169
Zhu Jianmin99523042018-06-06 20:01:44 +08003170 ol_txrx_dbg("Clear peer rx frames: " QDF_MAC_ADDR_STR,
3171 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
3172 ol_txrx_clear_peer_internal(peer);
3173 status = ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003174
Zhu Jianmin99523042018-06-06 20:01:44 +08003175 return status;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003176}
3177
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003178void peer_unmap_timer_work_function(void *param)
3179{
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003180 WMA_LOGI("Enter: %s", __func__);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003181 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08003182 ol_txrx_dump_peer_access_list(param);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003183 wlan_roam_debug_dump_table();
Anurag Chouhan4085ff72017-10-05 18:09:56 +05303184 cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003185}
3186
Mohit Khanna0696eef2016-04-14 16:14:08 -07003187/**
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003188 * peer_unmap_timer_handler() - peer unmap timer function
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003189 * @data: peer object pointer
3190 *
3191 * Return: none
3192 */
3193void peer_unmap_timer_handler(void *data)
3194{
3195 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003196 ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003197
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003198 ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003199 peer, qdf_atomic_read(&peer->ref_cnt));
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003200 ol_txrx_err("peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003201 peer,
3202 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3203 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3204 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Nachiket Kukadea48fd772017-07-28 18:48:57 +05303205 if (!cds_is_driver_recovering() && !cds_is_fw_down()) {
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003206 qdf_create_work(0, &txrx_pdev->peer_unmap_timer_work,
3207 peer_unmap_timer_work_function,
Naveen Rawat17c42a82018-02-01 19:18:27 -08003208 peer);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003209 qdf_sched_work(0, &txrx_pdev->peer_unmap_timer_work);
Deepak Dhamdhered42ab7c2017-04-13 19:32:16 -07003210 } else {
3211 ol_txrx_err("Recovery is in progress, ignore!");
3212 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003213}
3214
3215
3216/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003217 * ol_txrx_peer_detach() - Delete a peer's data object.
3218 * @peer - the object to detach
Naveen Rawatf4ada152017-09-05 14:56:12 -07003219 * @bitmap - bitmap indicating special handling of request.
Dhanashri Atre12a08392016-02-17 13:10:34 -08003220 *
3221 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003222 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003223 * stored in the control peer object to the data peer
3224 * object (set up by a call to ol_peer_store()) is provided.
3225 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003226 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003227 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003228static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003229{
Leo Chang98726762016-10-28 11:07:18 -07003230 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003231 struct ol_txrx_vdev_t *vdev = peer->vdev;
3232
3233 /* redirect peer's rx delivery function to point to a discard func */
3234 peer->rx_opt_proc = ol_rx_discard;
3235
3236 peer->valid = 0;
3237
Mohit Khanna0696eef2016-04-14 16:14:08 -07003238 /* flush all rx packets before clearing up the peer local_id */
3239 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003240
3241 /* debug print to dump rx reorder state */
3242 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3243
Abhinav Kumar50d4dc72018-06-15 16:35:50 +05303244 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003245 "%s:peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003246 __func__, peer,
3247 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3248 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3249 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003250
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303251 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003252 if (vdev->last_real_peer == peer)
3253 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303254 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003255 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3256
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003257 /*
3258 * set delete_in_progress to identify that wma
3259 * is waiting for unmap massage for this peer
3260 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303261 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003262
Lin Bai973e6922018-01-08 17:59:19 +08003263 if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
Naveen Rawatf4ada152017-09-05 14:56:12 -07003264 if (vdev->opmode == wlan_op_mode_sta) {
3265 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3266 &peer->mac_addr,
3267 sizeof(union ol_txrx_align_mac_addr_t));
Abhishek Singh217d9782017-04-28 23:49:11 +05303268
Lin Bai973e6922018-01-08 17:59:19 +08003269 /*
3270 * Create a timer to track unmap events when the
3271 * sta peer gets deleted.
3272 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003273 qdf_timer_start(&peer->peer_unmap_timer,
3274 OL_TXRX_PEER_UNMAP_TIMEOUT);
Mohit Khannab7bec722017-11-10 11:43:44 -08003275 ol_txrx_info_high
3276 ("started peer_unmap_timer for peer %pK",
3277 peer);
Naveen Rawatf4ada152017-09-05 14:56:12 -07003278 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003279 }
3280
3281 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003282 * Remove the reference added during peer_attach.
3283 * The peer will still be left allocated until the
3284 * PEER_UNMAP message arrives to remove the other
3285 * reference, added by the PEER_MAP message.
3286 */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003287 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003288}
3289
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003290/**
3291 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
Lin Bai973e6922018-01-08 17:59:19 +08003292 * @ppeer - the object to detach
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003293 *
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003294 * Detach a peer and force peer object to be removed. It is called during
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003295 * roaming scenario when the firmware has already deleted a peer.
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003296 * Remove it from the peer_id_to_object map. Peer object is actually freed
3297 * when last reference is deleted.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003298 *
3299 * Return: None
3300 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003301static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003302{
Leo Chang98726762016-10-28 11:07:18 -07003303 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003304 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3305
Nirav Shah7c8c1712018-09-10 16:01:31 +05303306 ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
3307 peer, qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003308
3309 /* Clear the peer_id_to_obj map entries */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003310 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
Lin Bai973e6922018-01-08 17:59:19 +08003311 ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003312}
3313
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003314/**
Alok Kumare1977442018-11-28 17:16:03 +05303315 * ol_txrx_peer_detach_sync() - peer detach sync callback
3316 * @ppeer - the peer object
3317 * @peer_unmap_sync - peer unmap sync cb.
3318 * @bitmap - bitmap indicating special handling of request.
3319 *
3320 *
3321 * Return: None
3322 */
3323static void ol_txrx_peer_detach_sync(void *ppeer,
3324 ol_txrx_peer_unmap_sync_cb peer_unmap_sync,
3325 uint32_t bitmap)
3326{
3327 ol_txrx_peer_handle peer = ppeer;
3328
3329 ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d", __func__,
3330 peer, qdf_atomic_read(&peer->ref_cnt));
3331
3332 peer->peer_unmap_sync_cb = peer_unmap_sync;
3333 ol_txrx_peer_detach(peer, bitmap);
3334}
3335
3336/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003337 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3338 * @txrx_pdev: Pointer to txrx pdev
3339 *
3340 * Return: none
3341 */
3342static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3343{
3344 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003345 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003346
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303347 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3348 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3349 else
3350 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003351
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003352 num_free = ol_tx_get_total_free_desc(pdev);
3353
Kapil Gupta53d9b572017-06-28 17:53:25 +05303354 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303355 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003356 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003357
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003358}
3359
3360/**
3361 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3362 * @timeout: timeout in ms
3363 *
3364 * Wait for tx queue to be empty, return timeout error if
3365 * queue doesn't empty before timeout occurs.
3366 *
3367 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303368 * QDF_STATUS_SUCCESS if the queue empties,
3369 * QDF_STATUS_E_TIMEOUT in case of timeout,
3370 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003371 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003372static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003373{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003374 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003375
3376 if (txrx_pdev == NULL) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303377 ol_txrx_err("txrx context is null");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303378 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003379 }
3380
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003381 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303382 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003383 if (timeout <= 0) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303384 ol_txrx_err("tx frames are pending");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003385 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303386 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003387 }
3388 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3389 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303390 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003391}
3392
3393#ifndef QCA_WIFI_3_0_EMU
Himanshu Agarwal83a87572017-05-25 14:09:50 +05303394#define SUSPEND_DRAIN_WAIT 500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003395#else
3396#define SUSPEND_DRAIN_WAIT 3000
3397#endif
3398
Yue Ma1e11d792016-02-26 18:58:44 -08003399#ifdef FEATURE_RUNTIME_PM
3400/**
3401 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3402 * @txrx_pdev: TXRX pdev context
3403 *
3404 * TXRX is ready to runtime suspend if there are no pending packets
3405 * in the tx queue.
3406 *
3407 * Return: QDF_STATUS
3408 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003409static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003410{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003411 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003412
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003413 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003414 return QDF_STATUS_E_BUSY;
3415 else
3416 return QDF_STATUS_SUCCESS;
3417}
3418
3419/**
3420 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3421 * @txrx_pdev: TXRX pdev context
3422 *
3423 * This is a dummy function for symmetry.
3424 *
3425 * Return: QDF_STATUS_SUCCESS
3426 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003427static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003428{
3429 return QDF_STATUS_SUCCESS;
3430}
3431#endif
3432
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003433/**
3434 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003435 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003436 *
3437 * Ensure that ol_txrx is ready for bus suspend
3438 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303439 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003440 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003441static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003442{
3443 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3444}
3445
3446/**
3447 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003448 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003449 *
3450 * Dummy function for symetry
3451 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303452 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003453 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003454static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003455{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303456 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003457}
3458
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003459/**
3460 * ol_txrx_get_tx_pending - Get the number of pending transmit
3461 * frames that are awaiting completion.
3462 *
3463 * @pdev - the data physical device object
3464 * Mainly used in clean up path to make sure all buffers have been freed
3465 *
3466 * Return: count of pending frames
3467 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003468int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003469{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003470 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003471 uint32_t total;
3472
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303473 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3474 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3475 else
3476 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003477
Nirav Shah55b45a02016-01-21 10:00:16 +05303478 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003479}
3480
3481void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3482{
3483 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07003484 /*
3485 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303486 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07003487 * which is tha same with normal data send complete path
3488 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003489 htt_tx_pending_discard(pdev_handle->htt_pdev);
3490
3491 TAILQ_INIT(&tx_descs);
3492 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3493 /* Discard Frames in Discard List */
3494 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3495
3496 ol_tx_discard_target_frms(pdev_handle);
3497}
3498
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003499static inline
3500uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3501{
3502 return (uint64_t) ((size_t) req);
3503}
3504
3505static inline
3506struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3507{
3508 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3509}
3510
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003511#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003512void
3513ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3514 uint8_t cfg_stats_type, uint32_t cfg_val)
3515{
jitiphil335d2412018-06-07 22:49:24 +05303516 uint8_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07003517
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003518 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3519 0 /* reset mask */,
3520 cfg_stats_type, cfg_val, dummy_cookie);
3521}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003522#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003523
jitiphil335d2412018-06-07 22:49:24 +05303524/**
3525 * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
3526 * @pdev: handle to ol txrx pdev
3527 * @pool_size: Size of fw stats descriptor pool
3528 *
3529 * Return: 0 for success, error code on failure.
3530 */
3531int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
3532 uint8_t pool_size)
3533{
3534 int i;
3535
3536 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303537 ol_txrx_err("pdev is NULL");
jitiphil335d2412018-06-07 22:49:24 +05303538 return -EINVAL;
3539 }
3540 pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
3541 sizeof(struct ol_txrx_fw_stats_desc_elem_t));
Nirav Shah7c8c1712018-09-10 16:01:31 +05303542 if (!pdev->ol_txrx_fw_stats_desc_pool.pool)
jitiphil335d2412018-06-07 22:49:24 +05303543 return -ENOMEM;
Nirav Shah7c8c1712018-09-10 16:01:31 +05303544
jitiphil335d2412018-06-07 22:49:24 +05303545 pdev->ol_txrx_fw_stats_desc_pool.freelist =
3546 &pdev->ol_txrx_fw_stats_desc_pool.pool[0];
3547 pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
3548
3549 for (i = 0; i < (pool_size - 1); i++) {
3550 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3551 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3552 pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
3553 &pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
3554 }
3555 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3556 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3557 pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
3558 qdf_spinlock_create(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3559 qdf_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
3560 qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
3561 return 0;
3562}
3563
3564/**
3565 * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
3566 * fw stats descriptor pool
3567 * @pdev: handle to ol txrx pdev
3568 *
3569 * Return: None
3570 */
3571void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
3572{
jitiphil335d2412018-06-07 22:49:24 +05303573 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303574 ol_txrx_err("pdev is NULL");
jitiphil335d2412018-06-07 22:49:24 +05303575 return;
3576 }
3577 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303578 ol_txrx_err("Pool is not initialized");
jitiphil335d2412018-06-07 22:49:24 +05303579 return;
3580 }
3581 if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303582 ol_txrx_err("Pool is not allocated");
jitiphil335d2412018-06-07 22:49:24 +05303583 return;
3584 }
3585 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3586 qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
jitiphil335d2412018-06-07 22:49:24 +05303587 qdf_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
3588 pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
3589
3590 pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
3591 pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
3592 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3593}
3594
3595/**
3596 * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
3597 * free descriptor pool
3598 * @pdev: handle to ol txrx pdev
3599 *
3600 * Return: pointer to fw stats descriptor, NULL on failure
3601 */
3602struct ol_txrx_fw_stats_desc_t
3603 *ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
3604{
3605 struct ol_txrx_fw_stats_desc_t *desc = NULL;
3606
3607 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3608 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3609 qdf_spin_unlock_bh(&pdev->
3610 ol_txrx_fw_stats_desc_pool.pool_lock);
Nirav Shah7c8c1712018-09-10 16:01:31 +05303611 ol_txrx_err("Pool deinitialized");
jitiphil335d2412018-06-07 22:49:24 +05303612 return NULL;
3613 }
3614 if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
3615 desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
3616 pdev->ol_txrx_fw_stats_desc_pool.freelist =
3617 pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
3618 }
3619 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3620
3621 if (desc)
Nirav Shah7c8c1712018-09-10 16:01:31 +05303622 ol_txrx_dbg("desc_id %d allocated", desc->desc_id);
jitiphil335d2412018-06-07 22:49:24 +05303623 else
Nirav Shah7c8c1712018-09-10 16:01:31 +05303624 ol_txrx_err("fw stats descriptors are exhausted");
jitiphil335d2412018-06-07 22:49:24 +05303625
3626 return desc;
3627}
3628
3629/**
3630 * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
3631 * back into free pool
3632 * @pdev: handle to ol txrx pdev
3633 * @fw_stats_desc: fw_stats_desc_get descriptor
3634 *
3635 * Return: pointer to request
3636 */
3637struct ol_txrx_stats_req_internal
3638 *ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
3639 unsigned char desc_id)
3640{
3641 struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
3642 struct ol_txrx_stats_req_internal *req;
3643
3644 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3645 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3646 qdf_spin_unlock_bh(&pdev->
3647 ol_txrx_fw_stats_desc_pool.pool_lock);
Nirav Shah7c8c1712018-09-10 16:01:31 +05303648 ol_txrx_err("Desc ID %u Pool deinitialized", desc_id);
jitiphil335d2412018-06-07 22:49:24 +05303649 return NULL;
3650 }
3651 desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
3652 req = desc_elem->desc.req;
3653 desc_elem->desc.req = NULL;
3654 desc_elem->next =
3655 pdev->ol_txrx_fw_stats_desc_pool.freelist;
3656 pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
3657 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3658 return req;
3659}
3660
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003661static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003662ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07003663 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003664{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003665 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003666 struct ol_txrx_pdev_t *pdev = vdev->pdev;
jitiphil335d2412018-06-07 22:49:24 +05303667 uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003668 struct ol_txrx_stats_req_internal *non_volatile_req;
jitiphil335d2412018-06-07 22:49:24 +05303669 struct ol_txrx_fw_stats_desc_t *desc = NULL;
3670 struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003671
3672 if (!pdev ||
3673 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
3674 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
3675 return A_ERROR;
3676 }
3677
3678 /*
3679 * Allocate a non-transient stats request object.
3680 * (The one provided as an argument is likely allocated on the stack.)
3681 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303682 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003683 if (!non_volatile_req)
3684 return A_NO_MEMORY;
3685
3686 /* copy the caller's specifications */
3687 non_volatile_req->base = *req;
3688 non_volatile_req->serviced = 0;
3689 non_volatile_req->offset = 0;
tfyu9fcabd72017-09-26 17:46:48 +08003690 if (response_expected) {
jitiphil335d2412018-06-07 22:49:24 +05303691 desc = ol_txrx_fw_stats_desc_alloc(pdev);
3692 if (!desc) {
3693 qdf_mem_free(non_volatile_req);
3694 return A_ERROR;
3695 }
3696
3697 /* use the desc id as the cookie */
3698 cookie = desc->desc_id;
3699 desc->req = non_volatile_req;
tfyu9fcabd72017-09-26 17:46:48 +08003700 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3701 TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
3702 pdev->req_list_depth++;
3703 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3704 }
3705
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003706 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
3707 req->stats_type_upload_mask,
3708 req->stats_type_reset_mask,
3709 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
3710 cookie)) {
tfyu9fcabd72017-09-26 17:46:48 +08003711 if (response_expected) {
3712 qdf_spin_lock_bh(&pdev->req_list_spinlock);
jitiphil335d2412018-06-07 22:49:24 +05303713 TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
3714 req_list_elem);
tfyu9fcabd72017-09-26 17:46:48 +08003715 pdev->req_list_depth--;
3716 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
jitiphil335d2412018-06-07 22:49:24 +05303717 if (desc) {
3718 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.
3719 pool_lock);
3720 desc->req = NULL;
3721 elem = container_of(desc,
3722 struct ol_txrx_fw_stats_desc_elem_t,
3723 desc);
3724 elem->next =
3725 pdev->ol_txrx_fw_stats_desc_pool.freelist;
3726 pdev->ol_txrx_fw_stats_desc_pool.freelist = elem;
3727 qdf_spin_unlock_bh(&pdev->
3728 ol_txrx_fw_stats_desc_pool.
3729 pool_lock);
3730 }
tfyu9fcabd72017-09-26 17:46:48 +08003731 }
3732
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303733 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003734 return A_ERROR;
3735 }
3736
Nirav Shahd2310422016-01-21 18:58:06 +05303737 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303738 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05303739
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003740 return A_OK;
3741}
Dhanashri Atre12a08392016-02-17 13:10:34 -08003742
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003743void
3744ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
jitiphil335d2412018-06-07 22:49:24 +05303745 uint8_t cookie, uint8_t *stats_info_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003746{
3747 enum htt_dbg_stats_type type;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003748 enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003749 enum htt_dbg_stats_status status;
3750 int length;
3751 uint8_t *stats_data;
tfyu9fcabd72017-09-26 17:46:48 +08003752 struct ol_txrx_stats_req_internal *req, *tmp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003753 int more = 0;
tfyu9fcabd72017-09-26 17:46:48 +08003754 int found = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003755
jitiphil335d2412018-06-07 22:49:24 +05303756 if (cookie >= FW_STATS_DESC_POOL_SIZE) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05303757 ol_txrx_err("Cookie is not valid");
jitiphil335d2412018-06-07 22:49:24 +05303758 return;
3759 }
3760 req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
3761 if (!req) {
3762 ol_txrx_err("%s: Request not retrieved for cookie %u", __func__,
3763 (uint8_t)cookie);
3764 return;
3765 }
tfyu9fcabd72017-09-26 17:46:48 +08003766 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3767 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
3768 if (req == tmp) {
3769 found = 1;
3770 break;
3771 }
3772 }
3773 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3774
3775 if (!found) {
3776 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05303777 "req(%pK) from firmware can't be found in the list\n", req);
tfyu9fcabd72017-09-26 17:46:48 +08003778 return;
3779 }
3780
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003781 do {
3782 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
3783 &length, &stats_data);
3784 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
3785 break;
3786 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
3787 status == HTT_DBG_STATS_STATUS_PARTIAL) {
3788 uint8_t *buf;
3789 int bytes = 0;
3790
3791 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
3792 more = 1;
3793 if (req->base.print.verbose || req->base.print.concise)
3794 /* provide the header along with the data */
3795 htt_t2h_stats_print(stats_info_list,
3796 req->base.print.concise);
3797
3798 switch (type) {
3799 case HTT_DBG_STATS_WAL_PDEV_TXRX:
3800 bytes = sizeof(struct wlan_dbg_stats);
3801 if (req->base.copy.buf) {
3802 int lmt;
3803
3804 lmt = sizeof(struct wlan_dbg_stats);
3805 if (req->base.copy.byte_limit < lmt)
3806 lmt = req->base.copy.byte_limit;
3807 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303808 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003809 }
3810 break;
3811 case HTT_DBG_STATS_RX_REORDER:
3812 bytes = sizeof(struct rx_reorder_stats);
3813 if (req->base.copy.buf) {
3814 int lmt;
3815
3816 lmt = sizeof(struct rx_reorder_stats);
3817 if (req->base.copy.byte_limit < lmt)
3818 lmt = req->base.copy.byte_limit;
3819 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303820 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003821 }
3822 break;
3823 case HTT_DBG_STATS_RX_RATE_INFO:
3824 bytes = sizeof(wlan_dbg_rx_rate_info_t);
3825 if (req->base.copy.buf) {
3826 int lmt;
3827
3828 lmt = sizeof(wlan_dbg_rx_rate_info_t);
3829 if (req->base.copy.byte_limit < lmt)
3830 lmt = req->base.copy.byte_limit;
3831 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303832 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003833 }
3834 break;
3835
3836 case HTT_DBG_STATS_TX_RATE_INFO:
3837 bytes = sizeof(wlan_dbg_tx_rate_info_t);
3838 if (req->base.copy.buf) {
3839 int lmt;
3840
3841 lmt = sizeof(wlan_dbg_tx_rate_info_t);
3842 if (req->base.copy.byte_limit < lmt)
3843 lmt = req->base.copy.byte_limit;
3844 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303845 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003846 }
3847 break;
3848
3849 case HTT_DBG_STATS_TX_PPDU_LOG:
3850 bytes = 0;
3851 /* TO DO: specify how many bytes are present */
3852 /* TO DO: add copying to the requestor's buf */
3853
3854 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003855 bytes = sizeof(struct
3856 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003857 if (req->base.copy.buf) {
3858 int limit;
3859
Yun Parkeaea8632017-04-09 09:53:45 -07003860 limit = sizeof(struct
3861 rx_remote_buffer_mgmt_stats);
3862 if (req->base.copy.byte_limit < limit)
3863 limit = req->base.copy.
3864 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003865 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303866 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003867 }
3868 break;
3869
3870 case HTT_DBG_STATS_TXBF_INFO:
3871 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
3872 if (req->base.copy.buf) {
3873 int limit;
3874
Yun Parkeaea8632017-04-09 09:53:45 -07003875 limit = sizeof(struct
3876 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003877 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003878 limit = req->base.copy.
3879 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003880 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303881 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003882 }
3883 break;
3884
3885 case HTT_DBG_STATS_SND_INFO:
3886 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
3887 if (req->base.copy.buf) {
3888 int limit;
3889
Yun Parkeaea8632017-04-09 09:53:45 -07003890 limit = sizeof(struct
3891 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003892 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003893 limit = req->base.copy.
3894 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003895 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303896 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003897 }
3898 break;
3899
3900 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003901 bytes = sizeof(struct
3902 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003903 if (req->base.copy.buf) {
3904 int limit;
3905
Yun Parkeaea8632017-04-09 09:53:45 -07003906 limit = sizeof(struct
3907 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003908 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003909 limit = req->base.copy.
3910 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003911 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303912 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003913 }
3914 break;
3915
3916 case HTT_DBG_STATS_ERROR_INFO:
3917 bytes =
3918 sizeof(struct wlan_dbg_wifi2_error_stats);
3919 if (req->base.copy.buf) {
3920 int limit;
3921
Yun Parkeaea8632017-04-09 09:53:45 -07003922 limit = sizeof(struct
3923 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003924 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003925 limit = req->base.copy.
3926 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003927 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303928 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003929 }
3930 break;
3931
3932 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
3933 bytes =
3934 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
3935 if (req->base.copy.buf) {
3936 int limit;
3937
3938 limit = sizeof(struct
3939 rx_txbf_musu_ndpa_pkts_stats);
3940 if (req->base.copy.byte_limit < limit)
3941 limit =
3942 req->base.copy.byte_limit;
3943 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303944 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003945 }
3946 break;
3947
3948 default:
3949 break;
3950 }
Yun Parkeaea8632017-04-09 09:53:45 -07003951 buf = req->base.copy.buf ?
3952 req->base.copy.buf : stats_data;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003953
3954 /* Not implemented for MCL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003955 if (req->base.callback.fp)
3956 req->base.callback.fp(req->base.callback.ctxt,
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003957 cmn_type, buf, bytes);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003958 }
3959 stats_info_list += length;
3960 } while (1);
3961
3962 if (!more) {
tfyu9fcabd72017-09-26 17:46:48 +08003963 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3964 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
3965 if (req == tmp) {
3966 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
3967 pdev->req_list_depth--;
3968 qdf_mem_free(req);
3969 break;
3970 }
3971 }
3972 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003973 }
3974}
3975
3976#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
3977int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
3978{
3979 if (debug_specs & TXRX_DBG_MASK_OBJS) {
3980#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3981 ol_txrx_pdev_display(vdev->pdev, 0);
3982#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303983 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303984 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003985#endif
3986 }
Yun Parkeaea8632017-04-09 09:53:45 -07003987 if (debug_specs & TXRX_DBG_MASK_STATS)
Mohit Khannaca4173b2017-09-12 21:52:19 -07003988 ol_txrx_stats_display(vdev->pdev,
3989 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003990 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
3991#if defined(ENABLE_TXRX_PROT_ANALYZE)
3992 ol_txrx_prot_ans_display(vdev->pdev);
3993#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303994 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303995 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003996#endif
3997 }
3998 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
3999#if defined(ENABLE_RX_REORDER_TRACE)
4000 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4001#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304002 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304003 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004004#endif
4005
4006 }
4007 return 0;
4008}
4009#endif
4010
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004011#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004012int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4013 int max_subfrms_ampdu, int max_subfrms_amsdu)
4014{
4015 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4016 max_subfrms_ampdu, max_subfrms_amsdu);
4017}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004018#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004019
4020#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4021void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4022{
4023 struct ol_txrx_vdev_t *vdev;
4024
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304025 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004026 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304027 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004028 "%*spdev object: %pK", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304029 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004030 "%*svdev list:", indent + 4, " ");
4031 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304032 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004033 }
4034 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304035 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004036 "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004037 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304038 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004039 htt_display(pdev->htt_pdev, indent);
4040}
4041
4042void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4043{
4044 struct ol_txrx_peer_t *peer;
4045
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304046 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004047 "%*stxrx vdev: %pK\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304048 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004049 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304050 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004051 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4052 indent + 4, " ",
4053 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4054 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4055 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304056 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004057 "%*speer list:", indent + 4, " ");
4058 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304059 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004060 }
4061}
4062
4063void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4064{
4065 int i;
4066
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304067 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004068 "%*stxrx peer: %pK", indent, " ", peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004069 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4070 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304071 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004072 "%*sID: %d", indent + 4, " ",
4073 peer->peer_ids[i]);
4074 }
4075 }
4076}
4077#endif /* TXRX_DEBUG_LEVEL */
4078
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004079/**
4080 * ol_txrx_stats() - update ol layer stats
4081 * @vdev_id: vdev_id
4082 * @buffer: pointer to buffer
4083 * @buf_len: length of the buffer
4084 *
4085 * Return: length of string
4086 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004087static int
Yun Parkeaea8632017-04-09 09:53:45 -07004088ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004089{
4090 uint32_t len = 0;
4091
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004092 struct ol_txrx_vdev_t *vdev =
4093 (struct ol_txrx_vdev_t *)
4094 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004095
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004096 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304097 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304098 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004099 snprintf(buffer, buf_len, "vdev not found");
4100 return len;
4101 }
4102
4103 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004104 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304105 ((vdev->ll_pause.is_q_paused == false) ?
4106 "UNPAUSED" : "PAUSED"),
4107 vdev->ll_pause.q_pause_cnt,
4108 vdev->ll_pause.q_unpause_cnt,
4109 vdev->ll_pause.q_overflow_cnt,
4110 ((vdev->ll_pause.is_q_timer_on == false)
4111 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004112 return len;
4113}
4114
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004115#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4116/**
4117 * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4118 * @peer: peer pointer
4119 *
4120 * Return: None
4121 */
4122static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4123{
Nirav Shahe6194ac2018-07-13 11:04:41 +05304124 txrx_nofl_info("cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4125 peer->bufq_info.curr,
4126 peer->bufq_info.dropped,
4127 peer->bufq_info.high_water_mark,
4128 peer->bufq_info.qdepth_no_thresh,
4129 peer->bufq_info.thresh);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004130}
4131
4132/**
4133 * ol_txrx_disp_peer_stats() - display peer stats
4134 * @pdev: pdev pointer
4135 *
4136 * Return: None
4137 */
4138static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4139{ int i;
4140 struct ol_txrx_peer_t *peer;
4141 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
4142
4143 if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4144 return;
4145
4146 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
Manjunathappa Prakasha4272ab2018-09-17 11:39:44 -07004147 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004148 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4149 peer = pdev->local_peer_ids.map[i];
Frank Liu4362e462018-01-16 11:51:55 +08004150 if (peer) {
Mohit Khannab7bec722017-11-10 11:43:44 -08004151 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +08004152 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004153 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Manjunathappa Prakasha4272ab2018-09-17 11:39:44 -07004154 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004155
4156 if (peer) {
Nirav Shahe6194ac2018-07-13 11:04:41 +05304157 txrx_nofl_info("stats: peer 0x%pK local peer id %d",
4158 peer, i);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004159 ol_txrx_disp_peer_cached_bufq_stats(peer);
Mohit Khannab7bec722017-11-10 11:43:44 -08004160 ol_txrx_peer_release_ref(peer,
4161 PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004162 }
4163 }
4164}
4165#else
4166static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4167{
Nirav Shahe6194ac2018-07-13 11:04:41 +05304168 txrx_nofl_info("peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004169}
4170#endif
4171
Mohit Khannaca4173b2017-09-12 21:52:19 -07004172void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4173 enum qdf_stats_verbosity_level level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004174{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004175 u64 tx_dropped =
4176 pdev->stats.pub.tx.dropped.download_fail.pkts
4177 + pdev->stats.pub.tx.dropped.target_discard.pkts
4178 + pdev->stats.pub.tx.dropped.no_ack.pkts
4179 + pdev->stats.pub.tx.dropped.others.pkts;
4180
4181 if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
Nirav Shahe6194ac2018-07-13 11:04:41 +05304182 txrx_nofl_dbg("STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
4183 pdev->tx_desc.num_free,
4184 pdev->tx_desc.pool_size,
4185 pdev->stats.pub.tx.from_stack.pkts,
4186 pdev->stats.pub.tx.tso.tso_pkts.pkts,
4187 pdev->stats.pub.tx.delivered.pkts,
4188 htt_tx_status_download_fail,
4189 pdev->stats.pub.tx.dropped.download_fail.pkts,
4190 htt_tx_status_discard,
4191 pdev->stats.pub.tx.dropped.
4192 target_discard.pkts,
4193 htt_tx_status_no_ack,
4194 pdev->stats.pub.tx.dropped.no_ack.pkts,
4195 pdev->stats.pub.tx.dropped.others.pkts,
4196 pdev->stats.pub.tx.dropped.host_reject.pkts,
4197 pdev->stats.pub.rx.delivered.pkts,
4198 pdev->stats.pub.rx.dropped_err.pkts,
4199 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4200 pdev->stats.pub.rx.dropped_mic_err.pkts,
4201 pdev->stats.pub.rx.intra_bss_fwd.
4202 packets_stack,
4203 pdev->stats.pub.rx.intra_bss_fwd.
4204 packets_fwd,
4205 pdev->stats.pub.rx.intra_bss_fwd.
4206 packets_stack_n_fwd);
Mohit Khannaca4173b2017-09-12 21:52:19 -07004207 return;
4208 }
4209
Nirav Shahe6194ac2018-07-13 11:04:41 +05304210 txrx_nofl_info("TX PATH Statistics:");
4211 txrx_nofl_info("sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4212 pdev->stats.pub.tx.from_stack.pkts,
4213 pdev->stats.pub.tx.from_stack.bytes,
4214 pdev->stats.pub.tx.dropped.host_reject.pkts,
4215 pdev->stats.pub.tx.dropped.host_reject.bytes,
4216 tx_dropped,
4217 pdev->stats.pub.tx.dropped.download_fail.bytes
4218 + pdev->stats.pub.tx.dropped.target_discard.bytes
4219 + pdev->stats.pub.tx.dropped.no_ack.bytes);
4220 txrx_nofl_info("successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B) others: %lld (%lld B)",
4221 pdev->stats.pub.tx.delivered.pkts,
4222 pdev->stats.pub.tx.delivered.bytes,
4223 pdev->stats.pub.tx.dropped.download_fail.pkts,
4224 pdev->stats.pub.tx.dropped.download_fail.bytes,
4225 pdev->stats.pub.tx.dropped.target_discard.pkts,
4226 pdev->stats.pub.tx.dropped.target_discard.bytes,
4227 pdev->stats.pub.tx.dropped.no_ack.pkts,
4228 pdev->stats.pub.tx.dropped.no_ack.bytes,
4229 pdev->stats.pub.tx.dropped.others.pkts,
4230 pdev->stats.pub.tx.dropped.others.bytes);
4231 txrx_nofl_info("Tx completions per HTT message:\n"
4232 "Single Packet %d\n"
4233 " 2-10 Packets %d\n"
4234 "11-20 Packets %d\n"
4235 "21-30 Packets %d\n"
4236 "31-40 Packets %d\n"
4237 "41-50 Packets %d\n"
4238 "51-60 Packets %d\n"
4239 " 60+ Packets %d\n",
4240 pdev->stats.pub.tx.comp_histogram.pkts_1,
4241 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4242 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4243 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4244 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4245 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4246 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4247 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304248
Nirav Shahe6194ac2018-07-13 11:04:41 +05304249 txrx_nofl_info("RX PATH Statistics:");
4250 txrx_nofl_info("%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
4251 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4252 "msdus with frag_ind: %d msdus with offload_ind: %d",
4253 pdev->stats.priv.rx.normal.ppdus,
4254 pdev->stats.priv.rx.normal.mpdus,
4255 pdev->stats.pub.rx.delivered.pkts,
4256 pdev->stats.pub.rx.delivered.bytes,
4257 pdev->stats.pub.rx.dropped_err.pkts,
4258 pdev->stats.pub.rx.dropped_err.bytes,
4259 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4260 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4261 pdev->stats.pub.rx.dropped_mic_err.pkts,
4262 pdev->stats.pub.rx.dropped_mic_err.bytes,
4263 pdev->stats.pub.rx.msdus_with_frag_ind,
4264 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004265
Nirav Shahe6194ac2018-07-13 11:04:41 +05304266 txrx_nofl_info(" fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4267 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4268 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4269 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304270
Nirav Shahe6194ac2018-07-13 11:04:41 +05304271 txrx_nofl_info("packets per HTT message:\n"
4272 "Single Packet %d\n"
4273 " 2-10 Packets %d\n"
4274 "11-20 Packets %d\n"
4275 "21-30 Packets %d\n"
4276 "31-40 Packets %d\n"
4277 "41-50 Packets %d\n"
4278 "51-60 Packets %d\n"
4279 " 60+ Packets %d\n",
4280 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4281 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4282 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4283 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4284 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4285 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4286 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4287 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004288
4289 ol_txrx_disp_peer_stats(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004290}
4291
4292void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4293{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304294 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004295}
4296
4297#if defined(ENABLE_TXRX_PROT_ANALYZE)
4298
4299void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4300{
4301 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4302 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4303}
4304
4305#endif /* ENABLE_TXRX_PROT_ANALYZE */
4306
4307#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4308int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4309{
4310 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4311 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4312}
4313#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4314
4315#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4316A_STATUS
4317ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4318 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4319{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304320 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304321 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304322 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304323 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004324 return A_OK;
4325}
4326#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4327
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004328static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004329{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004330 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004331
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004332 if (NULL == vdev)
4333 return;
4334
4335 vdev->disable_intrabss_fwd = val;
4336}
4337
Nirav Shahc657ef52016-07-26 14:22:38 +05304338/**
4339 * ol_txrx_update_mac_id() - update mac_id for vdev
4340 * @vdev_id: vdev id
4341 * @mac_id: mac id
4342 *
4343 * Return: none
4344 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004345static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304346{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004347 struct ol_txrx_vdev_t *vdev =
4348 (struct ol_txrx_vdev_t *)
4349 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304350
4351 if (NULL == vdev) {
4352 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4353 "%s: Invalid vdev_id %d", __func__, vdev_id);
4354 return;
4355 }
4356 vdev->mac_id = mac_id;
4357}
4358
Alok Kumar75355aa2018-03-19 17:32:58 +05304359/**
4360 * ol_txrx_get_tx_ack_count() - get tx ack count
4361 * @vdev_id: vdev_id
4362 *
4363 * Return: tx ack count
4364 */
4365static uint32_t ol_txrx_get_tx_ack_stats(uint8_t vdev_id)
4366{
4367 struct ol_txrx_vdev_t *vdev =
4368 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
4369 if (!vdev) {
4370 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4371 "%s: Invalid vdev_id %d", __func__, vdev_id);
4372 return 0;
4373 }
4374 return vdev->txrx_stats.txack_success;
4375}
4376
Leo Chang8e073612015-11-13 10:55:34 -08004377/**
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004378 * ol_txrx_display_stats() - Display OL TXRX display stats
4379 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304380 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004381 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304382 */
Mohit Khannaca4173b2017-09-12 21:52:19 -07004383static QDF_STATUS
4384ol_txrx_display_stats(void *soc, uint16_t value,
4385 enum qdf_stats_verbosity_level verb_level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004386{
4387 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004388 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004389
Anurag Chouhan6d760662016-02-20 16:05:43 +05304390 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004391 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304392 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304393 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004394 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004395 }
4396
4397 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004398 case CDP_TXRX_PATH_STATS:
Mohit Khannaca4173b2017-09-12 21:52:19 -07004399 ol_txrx_stats_display(pdev, verb_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004400 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004401 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004402 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004403 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004404 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07004405 ol_tx_dump_flow_pool_info((void *)pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004406 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004407 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304408 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004409 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004410 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4411 htt_display_rx_buf_debug(pdev->htt_pdev);
4412 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304413#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004414 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304415 ol_tx_sched_cur_state_display(pdev);
4416 ol_tx_sched_stats_display(pdev);
4417 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004418 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304419 ol_tx_queue_log_display(pdev);
4420 break;
4421#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004422 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304423 ol_tx_dump_group_credit_stats(pdev);
4424 break;
4425#endif
4426
4427#ifdef DEBUG_HL_LOGGING
Nirav Shaheb017be2018-02-15 11:20:58 +05304428 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304429 htt_dump_bundle_stats(pdev->htt_pdev);
4430 break;
4431#endif
4432#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004433 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004434 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004435 break;
4436 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004437 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004438}
4439
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004440/**
4441 * ol_txrx_clear_stats() - Clear OL TXRX stats
4442 * @value: Module id for which stats needs to be cleared
4443 *
4444 * Return: None
4445 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004446static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004447{
4448 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004449 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004450
Anurag Chouhan6d760662016-02-20 16:05:43 +05304451 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004452 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304453 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304454 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004455 return;
4456 }
4457
4458 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004459 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004460 ol_txrx_stats_clear(pdev);
4461 break;
Yun Park1027e8c2017-10-13 15:17:37 -07004462 case CDP_TXRX_TSO_STATS:
4463 ol_txrx_tso_stats_clear(pdev);
4464 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004465 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004466 ol_tx_clear_flow_pool_stats();
4467 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004468 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304469 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004470 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304471#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004472 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304473 ol_tx_sched_stats_clear(pdev);
4474 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004475 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304476 ol_tx_queue_log_clear(pdev);
4477 break;
4478#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004479 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304480 ol_tx_clear_group_credit_stats(pdev);
4481 break;
4482#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004483 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304484 htt_clear_bundle_stats(pdev->htt_pdev);
4485 break;
4486#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004487 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004488 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004489 break;
4490 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004491
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004492}
4493
4494/**
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004495 * ol_txrx_drop_nbuf_list() - drop an nbuf list
4496 * @buf_list: buffer list to be dropepd
4497 *
4498 * Return: int (number of bufs dropped)
4499 */
4500static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
4501{
4502 int num_dropped = 0;
4503 qdf_nbuf_t buf, next_buf;
4504 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4505
4506 buf = buf_list;
4507 while (buf) {
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05304508 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004509 next_buf = qdf_nbuf_queue_next(buf);
4510 if (pdev)
4511 TXRX_STATS_MSDU_INCR(pdev,
4512 rx.dropped_peer_invalid, buf);
4513 qdf_nbuf_free(buf);
4514 buf = next_buf;
4515 num_dropped++;
4516 }
4517 return num_dropped;
4518}
4519
4520/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004521 * ol_rx_data_cb() - data rx callback
4522 * @peer: peer
4523 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304524 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004525 *
4526 * Return: None
4527 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304528static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
4529 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004530{
Mohit Khanna0696eef2016-04-14 16:14:08 -07004531 void *osif_dev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004532 uint8_t drop_count = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304533 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304534 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004535 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304536 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004537
Jeff Johnsondac9e382017-09-24 10:36:08 -07004538 if (qdf_unlikely(!pdev))
Nirav Shah36a87bf2016-02-22 12:38:46 +05304539 goto free_buf;
4540
4541 /* Do not use peer directly. Derive peer from staid to
4542 * make sure that peer is valid.
4543 */
Jingxiang Ge3badb982018-01-02 17:39:01 +08004544 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
4545 staid, PEER_DEBUG_ID_OL_RX_THREAD);
Nirav Shah36a87bf2016-02-22 12:38:46 +05304546 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004547 goto free_buf;
4548
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304549 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07004550 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
4551 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304552 qdf_spin_unlock_bh(&peer->peer_info_lock);
Jingxiang Ge9f297062018-01-24 13:31:31 +08004553 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004554 goto free_buf;
4555 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08004556
4557 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07004558 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304559 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004560
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004561 qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
4562 if (!list_empty(&peer->bufq_info.cached_bufq)) {
4563 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004564 /* Flush the cached frames to HDD before passing new rx frame */
4565 ol_txrx_flush_rx_frames(peer, 0);
4566 } else
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004567 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004568
Jingxiang Ge3badb982018-01-02 17:39:01 +08004569 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
4570
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004571 buf = buf_list;
4572 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304573 next_buf = qdf_nbuf_queue_next(buf);
4574 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07004575 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304576 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304577 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05304578 if (pdev)
4579 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304580 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004581 }
4582 buf = next_buf;
4583 }
4584 return;
4585
4586free_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004587 drop_count = ol_txrx_drop_nbuf_list(buf_list);
Nirav Shah7c8c1712018-09-10 16:01:31 +05304588 ol_txrx_warn("Dropped frames %u", drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004589}
4590
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004591/* print for every 16th packet */
4592#define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
4593struct ol_rx_cached_buf *cache_buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304594
4595/** helper function to drop packets
4596 * Note: caller must hold the cached buq lock before invoking
4597 * this function. Also, it assumes that the pointers passed in
4598 * are valid (non-NULL)
4599 */
4600static inline void ol_txrx_drop_frames(
4601 struct ol_txrx_cached_bufq_t *bufqi,
4602 qdf_nbuf_t rx_buf_list)
4603{
4604 uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004605
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304606 bufqi->dropped += dropped;
4607 bufqi->qdepth_no_thresh += dropped;
4608
4609 if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
4610 bufqi->high_water_mark = bufqi->qdepth_no_thresh;
4611}
4612
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004613static QDF_STATUS ol_txrx_enqueue_rx_frames(
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304614 struct ol_txrx_peer_t *peer,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004615 struct ol_txrx_cached_bufq_t *bufqi,
4616 qdf_nbuf_t rx_buf_list)
4617{
4618 struct ol_rx_cached_buf *cache_buf;
4619 qdf_nbuf_t buf, next_buf;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004620 static uint32_t count;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004621
4622 if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
4623 ol_txrx_info_high(
4624 "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
4625 bufqi->curr, bufqi->dropped);
4626
4627 qdf_spin_lock_bh(&bufqi->bufq_lock);
4628 if (bufqi->curr >= bufqi->thresh) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304629 ol_txrx_drop_frames(bufqi, rx_buf_list);
4630 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4631 return QDF_STATUS_E_FAULT;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004632 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004633 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4634
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004635 buf = rx_buf_list;
4636 while (buf) {
4637 next_buf = qdf_nbuf_queue_next(buf);
4638 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
4639 if (!cache_buf) {
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004640 qdf_nbuf_free(buf);
4641 } else {
4642 /* Add NULL terminator */
4643 qdf_nbuf_set_next(buf, NULL);
4644 cache_buf->buf = buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304645 if (peer && peer->valid) {
4646 qdf_spin_lock_bh(&bufqi->bufq_lock);
4647 list_add_tail(&cache_buf->list,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004648 &bufqi->cached_bufq);
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304649 bufqi->curr++;
4650 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4651 } else {
4652 qdf_mem_free(cache_buf);
4653 rx_buf_list = buf;
4654 qdf_nbuf_set_next(rx_buf_list, next_buf);
4655 qdf_spin_lock_bh(&bufqi->bufq_lock);
4656 ol_txrx_drop_frames(bufqi, rx_buf_list);
4657 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4658 return QDF_STATUS_E_FAULT;
4659 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004660 }
4661 buf = next_buf;
4662 }
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304663 return QDF_STATUS_SUCCESS;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004664}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004665/**
4666 * ol_rx_data_process() - process rx frame
4667 * @peer: peer
4668 * @rx_buf_list: rx buffer list
4669 *
4670 * Return: None
4671 */
4672void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05304673 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004674{
Yun Parkeaea8632017-04-09 09:53:45 -07004675 /*
4676 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004677 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07004678 * IPA kernel module API should not be called on SIRQ CTXT
4679 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08004680 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304681 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004682
4683 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304684 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004685 goto drop_rx_buf;
4686 }
4687
Dhanashri Atre182b0272016-02-17 15:35:07 -08004688 qdf_assert(peer->vdev);
4689
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304690 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004691 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08004692 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304693 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004694
4695 /*
4696 * If there is a data frame from peer before the peer is
4697 * registered for data service, enqueue them on to pending queue
4698 * which will be flushed to HDD once that station is registered.
4699 */
4700 if (!data_rx) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304701 if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
4702 rx_buf_list)
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004703 != QDF_STATUS_SUCCESS)
Poddar, Siddarth07eebf32017-04-19 12:40:26 +05304704 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4705 "%s: failed to enqueue rx frm to cached_bufq",
4706 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004707 } else {
4708#ifdef QCA_CONFIG_SMP
4709 /*
4710 * If the kernel is SMP, schedule rx thread to
4711 * better use multicores.
4712 */
4713 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05304714 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004715 } else {
4716 p_cds_sched_context sched_ctx =
4717 get_cds_sched_ctxt();
4718 struct cds_ol_rx_pkt *pkt;
4719
4720 if (unlikely(!sched_ctx))
4721 goto drop_rx_buf;
4722
4723 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Alok Kumar3a6327d2018-08-06 17:28:25 +05304724 if (!pkt)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004725 goto drop_rx_buf;
Alok Kumar3a6327d2018-08-06 17:28:25 +05304726
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004727 pkt->callback = (cds_ol_rx_thread_cb)
4728 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304729 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004730 pkt->Rxpkt = (void *)rx_buf_list;
4731 pkt->staId = peer->local_id;
4732 cds_indicate_rxpkt(sched_ctx, pkt);
4733 }
4734#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304735 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004736#endif /* QCA_CONFIG_SMP */
4737 }
4738
4739 return;
4740
4741drop_rx_buf:
Alok Kumar3a6327d2018-08-06 17:28:25 +05304742 ol_txrx_drop_nbuf_list(rx_buf_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004743}
4744
4745/**
4746 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004747 * @sta_desc: sta descriptor
4748 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304749 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004750 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004751static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004752{
4753 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304754 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004755 union ol_txrx_peer_update_param_t param;
4756 struct privacy_exemption privacy_filter;
4757
4758 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304759 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304760 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004761 }
4762
4763 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304764 ol_txrx_err("Invalid sta id :%d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004765 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304766 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004767 }
4768
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004769 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
4770 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004771 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304772 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004773
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304774 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004775 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304776 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004777
4778 param.qos_capable = sta_desc->is_qos_enabled;
4779 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
4780 ol_txrx_peer_update_qos_capable);
4781
4782 if (sta_desc->is_wapi_supported) {
4783 /*Privacy filter to accept unencrypted WAI frames */
4784 privacy_filter.ether_type = ETHERTYPE_WAI;
4785 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
4786 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
4787 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
4788 }
4789
4790 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304791 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004792}
4793
4794/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004795 * ol_txrx_register_ocb_peer - Function to register the OCB peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004796 * @mac_addr: MAC address of the self peer
4797 * @peer_id: Pointer to the peer ID
4798 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304799 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004800 */
Jeff Johnson382bce02017-09-01 14:21:07 -07004801static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004802 uint8_t *peer_id)
4803{
4804 ol_txrx_pdev_handle pdev;
4805 ol_txrx_peer_handle peer;
4806
Anurag Chouhan6d760662016-02-20 16:05:43 +05304807 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004808 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05304809 ol_txrx_err("Unable to find pdev!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304810 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004811 }
4812
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004813 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
4814 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004815 if (!peer) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05304816 ol_txrx_err("Unable to find OCB peer!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304817 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004818 }
4819
4820 ol_txrx_set_ocb_peer(pdev, peer);
4821
4822 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004823 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004824 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004825
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304826 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004827}
4828
4829/**
4830 * ol_txrx_set_ocb_peer - Function to store the OCB peer
4831 * @pdev: Handle to the HTT instance
4832 * @peer: Pointer to the peer
4833 */
4834void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
4835 struct ol_txrx_peer_t *peer)
4836{
4837 if (pdev == NULL)
4838 return;
4839
4840 pdev->ocb_peer = peer;
4841 pdev->ocb_peer_valid = (NULL != peer);
4842}
4843
4844/**
4845 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
4846 * @pdev: Handle to the HTT instance
4847 * @peer: Pointer to the returned peer
4848 *
4849 * Return: true if the peer is valid, false if not
4850 */
4851bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
4852 struct ol_txrx_peer_t **peer)
4853{
4854 int rc;
4855
4856 if ((pdev == NULL) || (peer == NULL)) {
4857 rc = false;
4858 goto exit;
4859 }
4860
4861 if (pdev->ocb_peer_valid) {
4862 *peer = pdev->ocb_peer;
4863 rc = true;
4864 } else {
4865 rc = false;
4866 }
4867
4868exit:
4869 return rc;
4870}
4871
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004872#ifdef RECEIVE_OFFLOAD
4873/**
4874 * ol_txrx_offld_flush_handler() - offld flush handler
4875 * @context: dev handle
4876 * @rxpkt: rx data
4877 * @staid: station id
4878 *
4879 * This function handles an offld flush indication.
4880 * If the rx thread is enabled, it will be invoked by the rx
4881 * thread else it will be called in the tasklet context
4882 *
4883 * Return: none
4884 */
4885static void ol_txrx_offld_flush_handler(void *context,
4886 void *rxpkt,
4887 uint16_t staid)
4888{
4889 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4890
4891 if (qdf_unlikely(!pdev)) {
4892 ol_txrx_err("Invalid context");
4893 qdf_assert(0);
4894 return;
4895 }
4896
4897 if (pdev->offld_flush_cb)
4898 pdev->offld_flush_cb(context);
4899 else
4900 ol_txrx_err("offld_flush_cb NULL");
4901}
4902
4903/**
4904 * ol_txrx_offld_flush() - offld flush callback
4905 * @data: opaque data pointer
4906 *
4907 * This is the callback registered with CE to trigger
4908 * an offld flush
4909 *
4910 * Return: none
4911 */
4912static void ol_txrx_offld_flush(void *data)
4913{
4914 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
4915 struct cds_ol_rx_pkt *pkt;
4916 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4917
4918 if (qdf_unlikely(!sched_ctx))
4919 return;
4920
Amar Singhal4e855ad2018-09-04 12:19:00 -07004921 if (qdf_unlikely(!pdev)) {
4922 ol_txrx_err("TXRX module context is NULL");
4923 return;
4924 }
4925
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004926 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
4927 ol_txrx_offld_flush_handler(data, NULL, 0);
4928 } else {
4929 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Alok Kumar3a6327d2018-08-06 17:28:25 +05304930 if (qdf_unlikely(!pkt))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004931 return;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004932
4933 pkt->callback = ol_txrx_offld_flush_handler;
4934 pkt->context = data;
4935 pkt->Rxpkt = NULL;
4936 pkt->staId = 0;
4937 cds_indicate_rxpkt(sched_ctx, pkt);
4938 }
4939}
4940
4941/**
4942 * ol_register_offld_flush_cb() - register the offld flush callback
4943 * @offld_flush_cb: flush callback function
4944 * @offld_init_cb: Allocate and initialize offld data structure.
4945 *
4946 * Store the offld flush callback provided and in turn
4947 * register OL's offld flush handler with CE
4948 *
4949 * Return: none
4950 */
4951static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
4952{
4953 struct hif_opaque_softc *hif_device;
4954 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4955
4956 if (pdev == NULL) {
4957 ol_txrx_err("pdev NULL!");
4958 TXRX_ASSERT2(0);
4959 goto out;
4960 }
4961 if (pdev->offld_flush_cb != NULL) {
4962 ol_txrx_info("offld already initialised");
4963 if (pdev->offld_flush_cb != offld_flush_cb) {
4964 ol_txrx_err(
4965 "offld_flush_cb is differ to previously registered callback")
4966 TXRX_ASSERT2(0);
4967 goto out;
4968 }
4969 goto out;
4970 }
4971 pdev->offld_flush_cb = offld_flush_cb;
4972 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
4973
4974 if (qdf_unlikely(hif_device == NULL)) {
4975 ol_txrx_err("hif_device NULL!");
4976 qdf_assert(0);
4977 goto out;
4978 }
4979
4980 hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
4981
4982out:
4983 return;
4984}
4985
4986/**
4987 * ol_deregister_offld_flush_cb() - deregister the offld flush callback
4988 *
4989 * Remove the offld flush callback provided and in turn
4990 * deregister OL's offld flush handler with CE
4991 *
4992 * Return: none
4993 */
4994static void ol_deregister_offld_flush_cb(void)
4995{
4996 struct hif_opaque_softc *hif_device;
4997 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4998
4999 if (pdev == NULL) {
5000 ol_txrx_err("pdev NULL!");
5001 return;
5002 }
5003 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5004
5005 if (qdf_unlikely(hif_device == NULL)) {
5006 ol_txrx_err("hif_device NULL!");
5007 qdf_assert(0);
5008 return;
5009 }
5010
5011 hif_offld_flush_cb_deregister(hif_device);
5012
5013 pdev->offld_flush_cb = NULL;
5014}
5015#endif /* RECEIVE_OFFLOAD */
5016
Poddar, Siddarth34872782017-08-10 14:08:51 +05305017/**
5018 * ol_register_data_stall_detect_cb() - register data stall callback
5019 * @data_stall_detect_callback: data stall callback function
5020 *
5021 *
5022 * Return: QDF_STATUS Enumeration
5023 */
5024static QDF_STATUS ol_register_data_stall_detect_cb(
5025 data_stall_detect_cb data_stall_detect_callback)
5026{
5027 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5028
5029 if (pdev == NULL) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05305030 ol_txrx_err("pdev NULL!");
Poddar, Siddarth34872782017-08-10 14:08:51 +05305031 return QDF_STATUS_E_INVAL;
5032 }
5033 pdev->data_stall_detect_callback = data_stall_detect_callback;
5034 return QDF_STATUS_SUCCESS;
5035}
5036
5037/**
5038 * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5039 * @data_stall_detect_callback: data stall callback function
5040 *
5041 *
5042 * Return: QDF_STATUS Enumeration
5043 */
5044static QDF_STATUS ol_deregister_data_stall_detect_cb(
5045 data_stall_detect_cb data_stall_detect_callback)
5046{
5047 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5048
5049 if (pdev == NULL) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05305050 ol_txrx_err("pdev NULL!");
Poddar, Siddarth34872782017-08-10 14:08:51 +05305051 return QDF_STATUS_E_INVAL;
5052 }
5053 pdev->data_stall_detect_callback = NULL;
5054 return QDF_STATUS_SUCCESS;
5055}
5056
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305057/**
5058 * ol_txrx_post_data_stall_event() - post data stall event
5059 * @indicator: Module triggering data stall
5060 * @data_stall_type: data stall event type
5061 * @pdev_id: pdev id
5062 * @vdev_id_bitmap: vdev id bitmap
5063 * @recovery_type: data stall recovery type
5064 *
5065 * Return: None
5066 */
5067static void ol_txrx_post_data_stall_event(
5068 enum data_stall_log_event_indicator indicator,
5069 enum data_stall_log_event_type data_stall_type,
5070 uint32_t pdev_id, uint32_t vdev_id_bitmap,
5071 enum data_stall_log_recovery_type recovery_type)
5072{
5073 struct scheduler_msg msg = {0};
5074 QDF_STATUS status;
5075 struct data_stall_event_info *data_stall_info;
5076 ol_txrx_pdev_handle pdev;
5077
5078 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5079 if (!pdev) {
5080 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5081 "%s: pdev is NULL.", __func__);
5082 return;
5083 }
5084 data_stall_info = qdf_mem_malloc(sizeof(*data_stall_info));
Nirav Shah7c8c1712018-09-10 16:01:31 +05305085 if (!data_stall_info)
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305086 return;
Nirav Shah7c8c1712018-09-10 16:01:31 +05305087
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305088 data_stall_info->indicator = indicator;
5089 data_stall_info->data_stall_type = data_stall_type;
5090 data_stall_info->vdev_id_bitmap = vdev_id_bitmap;
5091 data_stall_info->pdev_id = pdev_id;
5092 data_stall_info->recovery_type = recovery_type;
5093
Poddar, Siddarthb9047592017-10-05 15:48:28 +05305094 if (data_stall_info->data_stall_type ==
5095 DATA_STALL_LOG_FW_RX_REFILL_FAILED)
5096 htt_log_rx_ring_info(pdev->htt_pdev);
5097
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305098 sys_build_message_header(SYS_MSG_ID_DATA_STALL_MSG, &msg);
5099 /* Save callback and data */
5100 msg.callback = pdev->data_stall_detect_callback;
5101 msg.bodyptr = data_stall_info;
5102 msg.bodyval = 0;
5103
gaurank kathpalia9fb3f4b2018-08-28 20:19:48 +05305104 status = scheduler_post_message(QDF_MODULE_ID_TXRX,
5105 QDF_MODULE_ID_HDD,
5106 QDF_MODULE_ID_SYS, &msg);
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305107
5108 if (status != QDF_STATUS_SUCCESS) {
5109 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5110 "%s: failed to post data stall msg to SYS", __func__);
5111 qdf_mem_free(data_stall_info);
5112 }
5113}
5114
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305115void
5116ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5117{
Nirav Shah7c8c1712018-09-10 16:01:31 +05305118 qdf_print(" Pkt: VA 0x%pK PA 0x%llx len %d\n",
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305119 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5120 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5121 qdf_nbuf_data(nbuf), len, true);
5122}
5123
Dhanashri Atre12a08392016-02-17 13:10:34 -08005124/**
5125 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5126 * @vdev_id: vdev_id
5127 *
5128 * Return: vdev handle
5129 * NULL if not found.
5130 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005131struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005132{
5133 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5134 ol_txrx_vdev_handle vdev = NULL;
5135
5136 if (qdf_unlikely(!pdev))
5137 return NULL;
5138
5139 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5140 if (vdev->vdev_id == vdev_id)
5141 break;
5142 }
5143
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005144 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005145}
Nirav Shah2e583a02016-04-30 14:06:12 +05305146
5147/**
5148 * ol_txrx_set_wisa_mode() - set wisa mode
5149 * @vdev: vdev handle
5150 * @enable: enable flag
5151 *
5152 * Return: QDF STATUS
5153 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005154static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305155{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005156 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005157
Nirav Shah2e583a02016-04-30 14:06:12 +05305158 if (!vdev)
5159 return QDF_STATUS_E_INVAL;
5160
5161 vdev->is_wisa_mode_enable = enable;
5162 return QDF_STATUS_SUCCESS;
5163}
Leo Chang98726762016-10-28 11:07:18 -07005164
5165/**
5166 * ol_txrx_get_vdev_id() - get interface id from interface context
5167 * @pvdev: vdev handle
5168 *
5169 * Return: virtual interface id
5170 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005171static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005172{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005173 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005174
Leo Chang98726762016-10-28 11:07:18 -07005175 return vdev->vdev_id;
5176}
5177
5178/**
Leo Chang98726762016-10-28 11:07:18 -07005179 * ol_txrx_soc_attach_target() - attach soc target
5180 * @soc: soc handle
5181 *
5182 * MCL legacy OL do nothing here
5183 *
5184 * Return: 0
5185 */
Venkata Sharath Chandra Manchala598f5032018-09-05 18:55:43 -07005186static QDF_STATUS ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005187{
5188 /* MCL legacy OL do nothing here */
Venkata Sharath Chandra Manchala598f5032018-09-05 18:55:43 -07005189 return QDF_STATUS_SUCCESS;
Leo Chang98726762016-10-28 11:07:18 -07005190}
5191
5192/**
5193 * ol_txrx_soc_detach() - detach soc target
5194 * @soc: soc handle
5195 *
5196 * MCL legacy OL do nothing here
5197 *
5198 * Return: noe
5199 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005200static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005201{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005202 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005203}
5204
5205/**
5206 * ol_txrx_pkt_log_con_service() - connect packet log service
5207 * @ppdev: physical device handle
5208 * @scn: device context
5209 *
5210 * Return: noe
5211 */
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305212#ifdef REMOVE_PKT_LOG
5213static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
5214{
5215}
5216#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005217static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005218{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005219 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005220
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005221 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005222 pktlog_htc_attach();
5223}
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305224#endif
Leo Chang98726762016-10-28 11:07:18 -07005225
5226/* OL wrapper functions for CDP abstraction */
5227/**
5228 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5229 * @peer: peer handle
5230 * @drop: rx packets drop or deliver
5231 *
5232 * Return: none
5233 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005234static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005235{
5236 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5237}
5238
5239/**
5240 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5241 * @ppdev: pdev handle
5242 * @vdev_id: interface id
5243 *
5244 * Return: virtual interface instance
5245 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005246static
5247struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5248 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005249{
5250 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5251}
5252
5253/**
5254 * ol_txrx_wrapper_register_peer() - register peer
5255 * @pdev: pdev handle
5256 * @sta_desc: peer description
5257 *
5258 * Return: QDF STATUS
5259 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005260static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005261 struct ol_txrx_desc_type *sta_desc)
5262{
5263 return ol_txrx_register_peer(sta_desc);
5264}
5265
5266/**
5267 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5268 * @pdev - the data physical device object
5269 * @local_peer_id - the ID txrx assigned locally to the peer in question
5270 *
5271 * The control SW typically uses the txrx peer handle to refer to the peer.
5272 * In unusual circumstances, if it is infeasible for the control SW maintain
5273 * the txrx peer handle but it can maintain a small integer local peer ID,
5274 * this function allows the peer handled to be retrieved, based on the local
5275 * peer ID.
5276 *
5277 * @return handle to the txrx peer object
5278 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005279static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005280ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5281 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005282{
5283 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005284 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005285}
5286
5287/**
5288 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5289 * @pdev: pdev handle
5290 *
5291 * Return: 1 high latency bus
5292 * 0 low latency bus
5293 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005294static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005295{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005296 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005297}
5298
5299/**
5300 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5301 * @data_peer - which peer has changed its state
5302 * @state - the new state of the peer
5303 *
5304 * Specify the peer's authentication state (none, connected, authenticated)
5305 * to allow the data SW to determine whether to filter out invalid data frames.
5306 * (In the "connected" state, where security is enabled, but authentication
5307 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5308 * be discarded.)
5309 * This function is only relevant for systems in which the tx and rx filtering
5310 * are done in the host rather than in the target.
5311 *
5312 * Return: QDF Status
5313 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005314static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005315 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5316{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005317 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005318 peer_mac, state);
5319}
5320
5321/**
5322 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5323 * @pdev: pdev handle
Jeff Johnson37df7c32018-05-10 12:30:35 -07005324 * @peer_addr: peer address want to find
Leo Chang98726762016-10-28 11:07:18 -07005325 * @peer_id: peer id
5326 *
5327 * Return: peer instance pointer
5328 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005329static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005330 uint8_t *peer_addr, uint8_t *peer_id)
5331{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005332 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005333 peer_addr, peer_id);
5334}
5335
5336/**
Mohit Khannab7bec722017-11-10 11:43:44 -08005337 * ol_txrx_wrapper_peer_get_ref_by_addr() - get peer reference by address
5338 * @pdev: pdev handle
5339 * @peer_addr: peer address we want to find
5340 * @peer_id: peer id
5341 * @debug_id: peer debug id for tracking
5342 *
5343 * Return: peer instance pointer
5344 */
5345static void *
5346ol_txrx_wrapper_peer_get_ref_by_addr(struct cdp_pdev *pdev,
5347 u8 *peer_addr, uint8_t *peer_id,
5348 enum peer_debug_id_type debug_id)
5349{
5350 return ol_txrx_peer_get_ref_by_addr((ol_txrx_pdev_handle)pdev,
5351 peer_addr, peer_id, debug_id);
5352}
5353
5354/**
5355 * ol_txrx_wrapper_peer_release_ref() - release peer reference
5356 * @peer: peer handle
5357 * @debug_id: peer debug id for tracking
5358 *
5359 * Release peer ref acquired by peer get ref api
5360 *
5361 * Return: void
5362 */
5363static void ol_txrx_wrapper_peer_release_ref(void *peer,
5364 enum peer_debug_id_type debug_id)
5365{
5366 ol_txrx_peer_release_ref(peer, debug_id);
5367}
5368
5369/**
Leo Chang98726762016-10-28 11:07:18 -07005370 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5371 * @cfg_ctx: cfg context
5372 * @cfg_param: cfg parameters
5373 *
5374 * Return: none
5375 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08005376static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005377ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5378 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07005379{
5380 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005381 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005382 (struct txrx_pdev_cfg_param_t *)cfg_param);
5383}
5384
jitiphil377bcc12018-10-05 19:46:08 +05305385/**
5386 * ol_txrx_get_cfg() - get ini/cgf values in legacy dp
5387 * @soc: soc context
5388 * @cfg_param: cfg parameters
5389 *
5390 * Return: none
5391 */
5392static uint32_t ol_txrx_get_cfg(void *soc, enum cdp_dp_cfg cfg)
5393{
5394 struct txrx_pdev_cfg_t *cfg_ctx;
5395 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5396 uint32_t value = 0;
5397
5398 cfg_ctx = (struct txrx_pdev_cfg_t *)(pdev->ctrl_pdev);
5399 switch (cfg) {
5400 case cfg_dp_enable_data_stall:
5401 value = cfg_ctx->enable_data_stall_detection;
5402 break;
5403 case cfg_dp_enable_ip_tcp_udp_checksum_offload:
5404 value = cfg_ctx->ip_tcp_udp_checksum_offload;
5405 break;
5406 case cfg_dp_tso_enable:
5407 value = cfg_ctx->tso_enable;
5408 break;
5409 case cfg_dp_lro_enable:
5410 value = cfg_ctx->lro_enable;
5411 break;
5412 case cfg_dp_gro_enable:
5413 value = cfg_ctx->gro_enable;
5414 break;
5415#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5416 case cfg_dp_tx_flow_start_queue_offset:
5417 value = cfg_ctx->tx_flow_start_queue_offset;
5418 break;
5419 case cfg_dp_tx_flow_stop_queue_threshold:
5420 value = cfg_ctx->tx_flow_stop_queue_th;
5421 break;
5422#endif
5423 case cfg_dp_ipa_uc_tx_buf_size:
5424 value = cfg_ctx->uc_tx_buffer_size;
5425 break;
5426 case cfg_dp_ipa_uc_tx_partition_base:
5427 value = cfg_ctx->uc_tx_partition_base;
5428 break;
5429 case cfg_dp_ipa_uc_rx_ind_ring_count:
5430 value = cfg_ctx->uc_rx_indication_ring_count;
5431 break;
5432 case cfg_dp_enable_flow_steering:
5433 value = cfg_ctx->enable_flow_steering;
5434 break;
5435 case cfg_dp_reorder_offload_supported:
5436 value = cfg_ctx->is_full_reorder_offload;
5437 break;
5438 case cfg_dp_ce_classify_enable:
5439 value = cfg_ctx->ce_classify_enabled;
5440 break;
5441 case cfg_dp_disable_intra_bss_fwd:
5442 value = cfg_ctx->disable_intra_bss_fwd;
5443 break;
5444 default:
5445 value = 0;
5446 break;
5447 }
5448
5449 return value;
5450}
5451
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005452#ifdef WDI_EVENT_ENABLE
5453void *ol_get_pldev(struct cdp_pdev *txrx_pdev)
5454{
5455 struct ol_txrx_pdev_t *pdev =
5456 (struct ol_txrx_pdev_t *)txrx_pdev;
5457 if (pdev != NULL)
5458 return pdev->pl_dev;
5459
5460 return NULL;
5461}
5462#endif
5463
Leo Chang98726762016-10-28 11:07:18 -07005464static struct cdp_cmn_ops ol_ops_cmn = {
5465 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
5466 .txrx_vdev_attach = ol_txrx_vdev_attach,
5467 .txrx_vdev_detach = ol_txrx_vdev_detach,
5468 .txrx_pdev_attach = ol_txrx_pdev_attach,
5469 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
5470 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05305471 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07005472 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08005473 .txrx_peer_create = ol_txrx_peer_attach,
5474 .txrx_peer_setup = NULL,
5475 .txrx_peer_teardown = NULL,
5476 .txrx_peer_delete = ol_txrx_peer_detach,
Alok Kumare1977442018-11-28 17:16:03 +05305477 .txrx_peer_delete_sync = ol_txrx_peer_detach_sync,
Leo Chang98726762016-10-28 11:07:18 -07005478 .txrx_vdev_register = ol_txrx_vdev_register,
5479 .txrx_soc_detach = ol_txrx_soc_detach,
5480 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
5481 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
5482 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08005483 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07005484 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
5485 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
5486 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07005487 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005488 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
5489 .display_stats = ol_txrx_display_stats,
jitiphil377bcc12018-10-05 19:46:08 +05305490 .txrx_get_cfg = ol_txrx_get_cfg,
Leo Chang98726762016-10-28 11:07:18 -07005491 /* TODO: Add other functions */
5492};
5493
5494static struct cdp_misc_ops ol_ops_misc = {
5495 .set_ibss_vdev_heart_beat_timer =
5496 ol_txrx_set_ibss_vdev_heart_beat_timer,
5497#ifdef CONFIG_HL_SUPPORT
5498 .set_wmm_param = ol_txrx_set_wmm_param,
5499#endif /* CONFIG_HL_SUPPORT */
5500 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
5501 .bad_peer_txctl_update_threshold =
5502 ol_txrx_bad_peer_txctl_update_threshold,
5503 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
5504 .tx_non_std = ol_tx_non_std,
5505 .get_vdev_id = ol_txrx_get_vdev_id,
Alok Kumar75355aa2018-03-19 17:32:58 +05305506 .get_tx_ack_stats = ol_txrx_get_tx_ack_stats,
Leo Chang98726762016-10-28 11:07:18 -07005507 .set_wisa_mode = ol_txrx_set_wisa_mode,
Poddar, Siddarth34872782017-08-10 14:08:51 +05305508 .txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
5509 .txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305510 .txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
Leo Chang98726762016-10-28 11:07:18 -07005511#ifdef FEATURE_RUNTIME_PM
5512 .runtime_suspend = ol_txrx_runtime_suspend,
5513 .runtime_resume = ol_txrx_runtime_resume,
5514#endif /* FEATURE_RUNTIME_PM */
5515 .get_opmode = ol_txrx_get_opmode,
5516 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
5517 .update_mac_id = ol_txrx_update_mac_id,
5518 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
5519 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
5520 .pkt_log_init = htt_pkt_log_init,
5521 .pkt_log_con_service = ol_txrx_pkt_log_con_service
5522};
5523
5524static struct cdp_flowctl_ops ol_ops_flowctl = {
5525#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5526 .register_pause_cb = ol_txrx_register_pause_cb,
5527 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005528 .dump_flow_pool_info = ol_tx_dump_flow_pool_info,
Leo Chang98726762016-10-28 11:07:18 -07005529#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
5530};
5531
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305532#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
Leo Chang98726762016-10-28 11:07:18 -07005533static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
Leo Chang98726762016-10-28 11:07:18 -07005534 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
5535 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
5536 .flow_control_cb = ol_txrx_flow_control_cb,
5537 .get_tx_resource = ol_txrx_get_tx_resource,
5538 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
5539 .vdev_flush = ol_txrx_vdev_flush,
5540 .vdev_pause = ol_txrx_vdev_pause,
5541 .vdev_unpause = ol_txrx_vdev_unpause
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305542}; /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
5543#elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
5544static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
5545 .register_tx_flow_control = ol_txrx_register_hl_flow_control,
5546 .vdev_flush = ol_txrx_vdev_flush,
5547 .vdev_pause = ol_txrx_vdev_pause,
Ajit Pal Singh851a7772018-05-14 16:55:09 +05305548 .vdev_unpause = ol_txrx_vdev_unpause,
Ajit Pal Singhd6c08f22018-04-25 16:55:26 +05305549 .set_vdev_os_queue_status = ol_txrx_set_vdev_os_queue_status,
5550 .set_vdev_tx_desc_limit = ol_txrx_set_vdev_tx_desc_limit
Leo Chang98726762016-10-28 11:07:18 -07005551};
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305552#else /* QCA_HL_NETDEV_FLOW_CONTROL */
5553static struct cdp_lflowctl_ops ol_ops_l_flowctl = { };
5554#endif
Leo Chang98726762016-10-28 11:07:18 -07005555
Leo Chang98726762016-10-28 11:07:18 -07005556#ifdef IPA_OFFLOAD
Yun Parkb4f591d2017-03-29 15:51:01 -07005557static struct cdp_ipa_ops ol_ops_ipa = {
Leo Chang98726762016-10-28 11:07:18 -07005558 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
5559 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
5560 .ipa_set_active = ol_txrx_ipa_uc_set_active,
5561 .ipa_op_response = ol_txrx_ipa_uc_op_response,
5562 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
5563 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
5564 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07005565 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
Yun Parkb4f591d2017-03-29 15:51:01 -07005566 .ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
5567 .ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
5568 .ipa_setup = ol_txrx_ipa_setup,
5569 .ipa_cleanup = ol_txrx_ipa_cleanup,
5570 .ipa_setup_iface = ol_txrx_ipa_setup_iface,
5571 .ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
5572 .ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
5573 .ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
5574 .ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
5575#ifdef FEATURE_METERING
Yun Park637d6482016-10-05 10:51:33 -07005576 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
5577 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Yun Parkb4f591d2017-03-29 15:51:01 -07005578#endif
Leo Chang98726762016-10-28 11:07:18 -07005579};
Yun Parkb4f591d2017-03-29 15:51:01 -07005580#endif
Leo Chang98726762016-10-28 11:07:18 -07005581
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005582#ifdef RECEIVE_OFFLOAD
5583static struct cdp_rx_offld_ops ol_rx_offld_ops = {
5584 .register_rx_offld_flush_cb = ol_register_offld_flush_cb,
5585 .deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
5586};
5587#endif
5588
Leo Chang98726762016-10-28 11:07:18 -07005589static struct cdp_bus_ops ol_ops_bus = {
5590 .bus_suspend = ol_txrx_bus_suspend,
5591 .bus_resume = ol_txrx_bus_resume
5592};
5593
Nirav Shah575282c2018-07-08 22:48:00 +05305594#ifdef WLAN_FEATURE_DSRC
Leo Chang98726762016-10-28 11:07:18 -07005595static struct cdp_ocb_ops ol_ops_ocb = {
5596 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
5597 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
5598};
Nirav Shah575282c2018-07-08 22:48:00 +05305599#endif
Leo Chang98726762016-10-28 11:07:18 -07005600
5601static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005602#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07005603 .throttle_init_period = ol_tx_throttle_init_period,
5604 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005605#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07005606};
5607
5608static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07005609 .clear_stats = ol_txrx_clear_stats,
5610 .stats = ol_txrx_stats
5611};
5612
5613static struct cdp_cfg_ops ol_ops_cfg = {
5614 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
5615 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
5616 .cfg_attach = ol_pdev_cfg_attach,
5617 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
5618 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
5619 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
5620 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
5621 .set_flow_control_parameters =
5622 ol_txrx_wrapper_set_flow_control_parameters,
5623 .set_flow_steering = ol_set_cfg_flow_steering,
Yu Wang66a250b2017-07-19 11:46:40 +08005624 .set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
jitiphilebf3a922018-11-05 14:25:00 +05305625 .set_new_htt_msg_format =
5626 ol_txrx_set_new_htt_msg_format,
Alok Kumare1977442018-11-28 17:16:03 +05305627 .set_peer_unmap_conf_support = ol_txrx_set_peer_unmap_conf_support,
5628 .get_peer_unmap_conf_support = ol_txrx_get_peer_unmap_conf_support,
Leo Chang98726762016-10-28 11:07:18 -07005629};
5630
5631static struct cdp_peer_ops ol_ops_peer = {
5632 .register_peer = ol_txrx_wrapper_register_peer,
5633 .clear_peer = ol_txrx_clear_peer,
Mohit Khannab7bec722017-11-10 11:43:44 -08005634 .peer_get_ref_by_addr = ol_txrx_wrapper_peer_get_ref_by_addr,
5635 .peer_release_ref = ol_txrx_wrapper_peer_release_ref,
Leo Chang98726762016-10-28 11:07:18 -07005636 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
5637 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
5638 .local_peer_id = ol_txrx_local_peer_id,
5639 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
5640 .peer_state_update = ol_txrx_wrapper_peer_state_update,
5641 .get_vdevid = ol_txrx_get_vdevid,
5642 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
5643 .register_ocb_peer = ol_txrx_register_ocb_peer,
5644 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
5645 .get_peer_state = ol_txrx_get_peer_state,
5646 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
5647 .update_ibss_add_peer_num_of_vdev =
5648 ol_txrx_update_ibss_add_peer_num_of_vdev,
5649 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
5650 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08005651#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07005652 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
5653 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005654 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
5655 .update_last_real_peer = ol_txrx_update_last_real_peer,
5656#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07005657 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
5658};
5659
5660static struct cdp_tx_delay_ops ol_ops_delay = {
5661#ifdef QCA_COMPUTE_TX_DELAY
5662 .tx_delay = ol_tx_delay,
5663 .tx_delay_hist = ol_tx_delay_hist,
5664 .tx_packet_count = ol_tx_packet_count,
5665 .tx_set_compute_interval = ol_tx_set_compute_interval
5666#endif /* QCA_COMPUTE_TX_DELAY */
5667};
5668
5669static struct cdp_pmf_ops ol_ops_pmf = {
5670 .get_pn_info = ol_txrx_get_pn_info
5671};
5672
Leo Chang98726762016-10-28 11:07:18 -07005673static struct cdp_ctrl_ops ol_ops_ctrl = {
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305674 .txrx_get_pldev = ol_get_pldev,
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005675 .txrx_wdi_event_sub = wdi_event_sub,
5676 .txrx_wdi_event_unsub = wdi_event_unsub,
Leo Chang98726762016-10-28 11:07:18 -07005677};
5678
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305679/* WINplatform specific structures */
Leo Chang98726762016-10-28 11:07:18 -07005680static struct cdp_me_ops ol_ops_me = {
5681 /* EMPTY FOR MCL */
5682};
5683
5684static struct cdp_mon_ops ol_ops_mon = {
5685 /* EMPTY FOR MCL */
5686};
5687
5688static struct cdp_host_stats_ops ol_ops_host_stats = {
5689 /* EMPTY FOR MCL */
5690};
5691
5692static struct cdp_wds_ops ol_ops_wds = {
5693 /* EMPTY FOR MCL */
5694};
5695
5696static struct cdp_raw_ops ol_ops_raw = {
5697 /* EMPTY FOR MCL */
5698};
5699
5700static struct cdp_ops ol_txrx_ops = {
5701 .cmn_drv_ops = &ol_ops_cmn,
5702 .ctrl_ops = &ol_ops_ctrl,
5703 .me_ops = &ol_ops_me,
5704 .mon_ops = &ol_ops_mon,
5705 .host_stats_ops = &ol_ops_host_stats,
5706 .wds_ops = &ol_ops_wds,
5707 .raw_ops = &ol_ops_raw,
5708 .misc_ops = &ol_ops_misc,
5709 .cfg_ops = &ol_ops_cfg,
5710 .flowctl_ops = &ol_ops_flowctl,
5711 .l_flowctl_ops = &ol_ops_l_flowctl,
Yun Parkb4f591d2017-03-29 15:51:01 -07005712#ifdef IPA_OFFLOAD
Leo Chang98726762016-10-28 11:07:18 -07005713 .ipa_ops = &ol_ops_ipa,
Yun Parkb4f591d2017-03-29 15:51:01 -07005714#endif
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005715#ifdef RECEIVE_OFFLOAD
5716 .rx_offld_ops = &ol_rx_offld_ops,
5717#endif
Leo Chang98726762016-10-28 11:07:18 -07005718 .bus_ops = &ol_ops_bus,
Nirav Shah575282c2018-07-08 22:48:00 +05305719#ifdef WLAN_FEATURE_DSRC
Leo Chang98726762016-10-28 11:07:18 -07005720 .ocb_ops = &ol_ops_ocb,
Nirav Shah575282c2018-07-08 22:48:00 +05305721#endif
Leo Chang98726762016-10-28 11:07:18 -07005722 .peer_ops = &ol_ops_peer,
5723 .throttle_ops = &ol_ops_throttle,
5724 .mob_stats_ops = &ol_ops_mob_stats,
5725 .delay_ops = &ol_ops_delay,
5726 .pmf_ops = &ol_ops_pmf
5727};
5728
Jeff Johnson02c37b42017-01-10 14:49:24 -08005729/*
5730 * Local prototype added to temporarily address warning caused by
5731 * -Wmissing-prototypes. A more correct solution, namely to expose
5732 * a prototype in an appropriate header file, will come later.
5733 */
5734struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5735 struct ol_if_ops *dp_ol_if_ops);
5736struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5737 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07005738{
5739 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005740
Nirav Shah7c8c1712018-09-10 16:01:31 +05305741 if (!soc)
Leo Chang98726762016-10-28 11:07:18 -07005742 return NULL;
Leo Chang98726762016-10-28 11:07:18 -07005743
5744 soc->ops = &ol_txrx_ops;
5745 return soc;
5746}
jitiphilebf3a922018-11-05 14:25:00 +05305747
5748bool ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t *pdev)
5749{
5750 if (!pdev) {
5751 qdf_print("%s: pdev is NULL\n", __func__);
5752 return false;
5753 }
5754 return pdev->new_htt_msg_format;
5755}
5756
5757void ol_txrx_set_new_htt_msg_format(uint8_t val)
5758{
5759 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5760
5761 if (!pdev) {
5762 qdf_print("%s: pdev is NULL\n", __func__);
5763 return;
5764 }
5765 pdev->new_htt_msg_format = val;
5766}
5767
Alok Kumare1977442018-11-28 17:16:03 +05305768bool ol_txrx_get_peer_unmap_conf_support(void)
5769{
5770 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5771
5772 if (!pdev) {
5773 qdf_print("%s: pdev is NULL\n", __func__);
5774 return false;
5775 }
5776 return pdev->enable_peer_unmap_conf_support;
5777}
5778
5779void ol_txrx_set_peer_unmap_conf_support(bool val)
5780{
5781 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5782
5783 if (!pdev) {
5784 qdf_print("%s: pdev is NULL\n", __func__);
5785 return;
5786 }
5787 pdev->enable_peer_unmap_conf_support = val;
5788}