blob: d076cff16d5cf89f385dc2ebf432fdeffcec9e25 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
wadesong9f2b1102017-12-20 22:58:35 +08002 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== includes ===*/
20/* header files for OS primitives */
21#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053023#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053024#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053025#include <qdf_atomic.h> /* qdf_atomic_read */
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053026#include <qdf_debugfs.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080027
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028/* header files for utilities */
29#include <cds_queue.h> /* TAILQ */
30
31/* header files for configuration API */
32#include <ol_cfg.h> /* ol_cfg_is_high_latency */
33#include <ol_if_athvar.h>
34
35/* header files for HTT API */
36#include <ol_htt_api.h>
37#include <ol_htt_tx_api.h>
38
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039/* header files for our own APIs */
40#include <ol_txrx_api.h>
41#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070042#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070043#include <ol_txrx_ctrl_api.h>
44#include <cdp_txrx_stats.h>
45#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046/* header files for our internal definitions */
47#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
48#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049#include <ol_tx.h> /* ol_tx_ll */
50#include <ol_rx.h> /* ol_rx_deliver */
51#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
52#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
53#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
54#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
55#include <ol_rx_reorder.h>
56#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
57#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
58#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053059#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080060#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070061#include <ol_txrx_types.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080062#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070063#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080064#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070065#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080066#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053067#include "hif.h"
wadesong9e95bd92017-04-14 14:28:40 +080068#include "hif_main.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070069#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053070#ifndef REMOVE_PKT_LOG
71#include "pktlog_ac.h"
72#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070073#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053074#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053075#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080076#include <cdp_txrx_handle.h>
Poddar, Siddarthdb568162017-07-27 18:16:38 +053077#include "wlan_qct_sys.h"
78
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080079#include <htt_internal.h>
Yun Parkb4f591d2017-03-29 15:51:01 -070080#include <ol_txrx_ipa.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070081#include "wlan_roam_debug.h"
Yun Parkb4f591d2017-03-29 15:51:01 -070082
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053083#define DPT_DEBUGFS_PERMS (QDF_FILE_USR_READ | \
84 QDF_FILE_USR_WRITE | \
85 QDF_FILE_GRP_READ | \
86 QDF_FILE_OTH_READ)
87
Rakshith Suresh Patkar83871f72018-05-08 16:31:57 +053088#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Leo Chang98726762016-10-28 11:07:18 -070089ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080090ol_txrx_peer_find_by_local_id(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070091 uint8_t local_peer_id);
Jingxiang Ge3badb982018-01-02 17:39:01 +080092ol_txrx_peer_handle
93ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
94 uint8_t local_peer_id,
95 enum peer_debug_id_type dbg_id);
Leo Chang98726762016-10-28 11:07:18 -070096#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080097QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070098 uint8_t *peer_mac,
99 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800100static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
101 bool val);
102int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700103extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800104ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700105 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700106
Leo Chang98726762016-10-28 11:07:18 -0700107extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
108 uint64_t **last_pn, uint32_t **rmf_pn_replays);
109
Mohit Khanna78cb6bb2017-03-31 17:05:14 -0700110/* thresh for peer's cached buf queue beyond which the elements are dropped */
111#define OL_TXRX_CACHED_BUFQ_THRESH 128
112
Yu Wang053d3e72017-02-08 18:48:24 +0800113#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530114
115/**
116 * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
117 * @vdev: the data virtual device
118 * @bss_addr: bss address
119 *
120 * Return: None
121 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800122static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800123ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530124{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800125 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700126
Frank Liu4362e462018-01-16 11:51:55 +0800127 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530128 if (bss_addr && vdev->last_real_peer &&
Ankit Guptaa5076012016-09-14 11:32:19 -0700129 !qdf_mem_cmp((u8 *)bss_addr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530130 vdev->last_real_peer->mac_addr.raw,
Ankit Guptaa5076012016-09-14 11:32:19 -0700131 IEEE80211_ADDR_LEN))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530132 qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
133 vdev->last_real_peer->mac_addr.raw,
134 OL_TXRX_MAC_ADDR_LEN);
Frank Liu4362e462018-01-16 11:51:55 +0800135 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530136}
137
138/**
139 * ol_txrx_add_last_real_peer() - add last peer
140 * @pdev: the data physical device
141 * @vdev: virtual device
142 * @peer_id: peer id
143 *
144 * Return: None
145 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800146static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800147ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
148 struct cdp_vdev *pvdev, uint8_t *peer_id)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530149{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800150 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
151 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530152 ol_txrx_peer_handle peer;
Yun Parkeaea8632017-04-09 09:53:45 -0700153
Frank Liu4362e462018-01-16 11:51:55 +0800154 peer = ol_txrx_find_peer_by_addr(
155 (struct cdp_pdev *)pdev,
156 vdev->hl_tdls_ap_mac_addr.raw,
157 peer_id);
158
159 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
160 if (!vdev->last_real_peer && peer &&
161 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
162 vdev->last_real_peer = peer;
163 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530164}
165
166/**
167 * is_vdev_restore_last_peer() - check for vdev last peer
168 * @peer: peer object
169 *
170 * Return: true if last peer is not null
171 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800172static bool
Leo Chang98726762016-10-28 11:07:18 -0700173is_vdev_restore_last_peer(void *ppeer)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530174{
Leo Chang98726762016-10-28 11:07:18 -0700175 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530176 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700177
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530178 vdev = peer->vdev;
179 return vdev->last_real_peer && (vdev->last_real_peer == peer);
180}
181
182/**
183 * ol_txrx_update_last_real_peer() - check for vdev last peer
184 * @pdev: the data physical device
185 * @peer: peer device
186 * @peer_id: peer id
187 * @restore_last_peer: restore last peer flag
188 *
189 * Return: None
190 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800191static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800192ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530193 uint8_t *peer_id, bool restore_last_peer)
194{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800195 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -0700196 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530197 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700198
Frank Liu4362e462018-01-16 11:51:55 +0800199 if (!restore_last_peer)
200 return;
201
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530202 vdev = peer->vdev;
Frank Liu4362e462018-01-16 11:51:55 +0800203 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530204 vdev->hl_tdls_ap_mac_addr.raw, peer_id);
Frank Liu4362e462018-01-16 11:51:55 +0800205
206 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
207 if (!vdev->last_real_peer && peer &&
208 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
209 vdev->last_real_peer = peer;
210 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530211}
212#endif
213
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530214/**
215 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
216 * fw is compatible for marking first packet after wow wakeup
217 * @value: 1 for enabled/ 0 for disabled
218 *
219 * Return: None
220 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800221static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530222{
223 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
224
225 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530226 ol_txrx_err(
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530227 "%s: pdev is NULL\n", __func__);
228 return;
229 }
230
231 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
232}
233
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530234u_int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800235ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530236{
237 u_int16_t desc_pool_size;
238 u_int16_t steady_state_tx_lifetime_ms;
239 u_int16_t safety_factor;
240
241 /*
242 * Steady-state tx latency:
243 * roughly 1-2 ms flight time
244 * + roughly 1-2 ms prep time,
245 * + roughly 1-2 ms target->host notification time.
246 * = roughly 6 ms total
247 * Thus, steady state number of frames =
248 * steady state max throughput / frame size * tx latency, e.g.
249 * 1 Gbps / 1500 bytes * 6 ms = 500
250 *
251 */
252 steady_state_tx_lifetime_ms = 6;
253
254 safety_factor = 8;
255
256 desc_pool_size =
257 ol_cfg_max_thruput_mbps(ctrl_pdev) *
258 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
259 (8 * OL_TX_AVG_FRM_BYTES) *
260 steady_state_tx_lifetime_ms *
261 safety_factor;
262
263 /* minimum */
264 if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
265 desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
266
267 /* maximum */
268 if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
269 desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
270
271 return desc_pool_size;
272}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800273
274/*=== function definitions ===*/
275
Nirav Shah22bf44d2015-12-10 15:39:48 +0530276/**
277 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
278 * wmi is enabled or not.
279 * @value: 1 for enabled/ 0 for disable
280 *
281 * Return: None
282 */
283void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
284{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530285 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700286
Nirav Shah22bf44d2015-12-10 15:39:48 +0530287 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530288 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530289 return;
290 }
291 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530292}
293
294/**
295 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
296 *
297 * Return: is_mgmt_over_wmi_enabled
298 */
299uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
300{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530301 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700302
Nirav Shah22bf44d2015-12-10 15:39:48 +0530303 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530304 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530305 return 0;
306 }
307 return pdev->is_mgmt_over_wmi_enabled;
308}
309
310
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800311#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800312static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800313ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
314 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800316 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
317 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800318 struct ol_txrx_peer_t *peer;
319
320 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
321 if (!peer)
322 return NULL;
323 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800324 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325 return peer;
326}
327
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800328static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800329{
Leo Chang98726762016-10-28 11:07:18 -0700330 struct ol_txrx_peer_t *peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700331
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800332 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530333 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530334 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530335 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800336 }
337
338 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530339 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340}
341
Yun Park0dad1002017-07-14 14:57:01 -0700342static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(struct cdp_pdev *ppdev,
343 uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800344{
Yun Park0dad1002017-07-14 14:57:01 -0700345 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800346 struct ol_txrx_peer_t *peer = NULL;
Yun Park5dd9a122018-01-12 15:00:12 -0800347 ol_txrx_vdev_handle vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800348
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800349 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530350 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530351 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800352 return NULL;
353 }
354
Yun Park5dd9a122018-01-12 15:00:12 -0800355 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev, sta_id,
356 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800357 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530358 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530359 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800360 return NULL;
361 }
362
Yun Park5dd9a122018-01-12 15:00:12 -0800363 vdev = peer->vdev;
364 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
365
366 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367}
368
Mohit Khannababadb82017-02-21 18:54:19 -0800369/**
370 * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
371 * @ppdev: pointer of type cdp_pdev
372 * @peer_addr: peer mac addr
373 * @peer_id: pointer to fill in the value of peer->local_id for caller
374 *
375 * This function finds a peer with given mac address and returns its peer_id.
376 * Note that this function does not increment the peer->ref_cnt.
377 * This means that the peer may be deleted in some other parallel context after
378 * its been found.
379 *
380 * Return: peer handle if peer is found, NULL if peer is not found.
381 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800382void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Yun Park0dad1002017-07-14 14:57:01 -0700383 uint8_t *peer_addr,
384 uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800385{
386 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800387 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800388
Mohit Khannab7bec722017-11-10 11:43:44 -0800389 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
390 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800391 if (!peer)
392 return NULL;
393 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800394 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khannababadb82017-02-21 18:54:19 -0800395 return peer;
396}
397
398/**
Mohit Khannab7bec722017-11-10 11:43:44 -0800399 * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
Mohit Khannababadb82017-02-21 18:54:19 -0800400 * @pdev: pointer of type ol_txrx_pdev_handle
401 * @peer_addr: peer mac addr
402 * @peer_id: pointer to fill in the value of peer->local_id for caller
403 *
404 * This function finds the peer with given mac address and returns its peer_id.
405 * Note that this function increments the peer->ref_cnt.
406 * This makes sure that peer will be valid. This also means the caller needs to
Mohit Khannab7bec722017-11-10 11:43:44 -0800407 * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
Mohit Khannababadb82017-02-21 18:54:19 -0800408 * reference.
409 * Sample usage:
410 * {
411 * //the API call below increments the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800412 * peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800413 *
414 * // Once peer usage is done
415 *
416 * //the API call below decrements the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800417 * ol_txrx_peer_release_ref(peer, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800418 * }
419 *
420 * Return: peer handle if the peer is found, NULL if peer is not found.
421 */
Mohit Khannab7bec722017-11-10 11:43:44 -0800422ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
423 u8 *peer_addr,
424 u8 *peer_id,
425 enum peer_debug_id_type dbg_id)
Mohit Khannababadb82017-02-21 18:54:19 -0800426{
427 struct ol_txrx_peer_t *peer;
428
Mohit Khannab7bec722017-11-10 11:43:44 -0800429 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
430 dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800431 if (!peer)
432 return NULL;
433 *peer_id = peer->local_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800434 return peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800435}
436
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800437static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800438{
Leo Chang98726762016-10-28 11:07:18 -0700439 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700440
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800441 return peer->local_id;
442}
443
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700444/**
445 * @brief Find a txrx peer handle from a peer's local ID
446 * @details
447 * The control SW typically uses the txrx peer handle to refer to the peer.
448 * In unusual circumstances, if it is infeasible for the control SW maintain
449 * the txrx peer handle but it can maintain a small integer local peer ID,
450 * this function allows the peer handled to be retrieved, based on the local
451 * peer ID.
452 *
453 * @param pdev - the data physical device object
454 * @param local_peer_id - the ID txrx assigned locally to the peer in question
455 * @return handle to the txrx peer object
456 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800457ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800458ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800459 uint8_t local_peer_id)
460{
461 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800462 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700463
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800464 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
465 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
466 return NULL;
467 }
468
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530469 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530471 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472 return peer;
473}
474
Jingxiang Ge3badb982018-01-02 17:39:01 +0800475/**
476 * @brief Find a txrx peer handle from a peer's local ID
477 * @param pdev - the data physical device object
478 * @param local_peer_id - the ID txrx assigned locally to the peer in question
479 * @dbg_id - debug_id to track caller
480 * @return handle to the txrx peer object
481 * @details
482 * The control SW typically uses the txrx peer handle to refer to the peer.
483 * In unusual circumstances, if it is infeasible for the control SW maintain
484 * the txrx peer handle but it can maintain a small integer local peer ID,
485 * this function allows the peer handled to be retrieved, based on the local
486 * peer ID.
487 *
488 * Note that this function increments the peer->ref_cnt.
489 * This makes sure that peer will be valid. This also means the caller needs to
490 * call the corresponding API -
491 * ol_txrx_peer_release_ref
492 *
493 * reference.
494 * Sample usage:
495 * {
496 * //the API call below increments the peer->ref_cnt
497 * peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
498 *
499 * // Once peer usage is done
500 *
501 * //the API call below decrements the peer->ref_cnt
502 * ol_txrx_peer_release_ref(peer, dbg_id);
503 * }
504 *
505 * Return: peer handle if the peer is found, NULL if peer is not found.
506 */
507ol_txrx_peer_handle
508ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
509 uint8_t local_peer_id,
510 enum peer_debug_id_type dbg_id)
511{
512 struct ol_txrx_peer_t *peer = NULL;
513 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
514
515 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
516 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
517 return NULL;
518 }
519
520 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
521 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
522 peer = pdev->local_peer_ids.map[local_peer_id];
523 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
524 if (peer && peer->valid)
525 ol_txrx_peer_get_ref(peer, dbg_id);
Jingxiang Ge9f297062018-01-24 13:31:31 +0800526 else
527 peer = NULL;
Jingxiang Ge3badb982018-01-02 17:39:01 +0800528 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
529
530 return peer;
531}
532
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
534{
535 int i;
536
537 /* point the freelist to the first ID */
538 pdev->local_peer_ids.freelist = 0;
539
540 /* link each ID to the next one */
541 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
542 pdev->local_peer_ids.pool[i] = i + 1;
543 pdev->local_peer_ids.map[i] = NULL;
544 }
545
546 /* link the last ID to itself, to mark the end of the list */
547 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
548 pdev->local_peer_ids.pool[i] = i;
549
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530550 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800551}
552
553static void
554ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
555 struct ol_txrx_peer_t *peer)
556{
557 int i;
558
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530559 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800560 i = pdev->local_peer_ids.freelist;
561 if (pdev->local_peer_ids.pool[i] == i) {
562 /* the list is empty, except for the list-end marker */
563 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
564 } else {
565 /* take the head ID and advance the freelist */
566 peer->local_id = i;
567 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
568 pdev->local_peer_ids.map[i] = peer;
569 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530570 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800571}
572
573static void
574ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
575 struct ol_txrx_peer_t *peer)
576{
577 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700578
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800579 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
580 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
581 return;
582 }
583 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530584 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800585 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
586 pdev->local_peer_ids.freelist = i;
587 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530588 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800589}
590
591static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
592{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530593 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800594}
595
596#else
597#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
598#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
599#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
600#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
601#endif
602
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530603#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
604
605/**
606 * ol_txrx_update_group_credit() - update group credit for tx queue
607 * @group: for which credit needs to be updated
608 * @credit: credits
609 * @absolute: TXQ group absolute
610 *
611 * Return: allocated pool size
612 */
613void ol_txrx_update_group_credit(
614 struct ol_tx_queue_group_t *group,
615 int32_t credit,
616 u_int8_t absolute)
617{
618 if (absolute)
619 qdf_atomic_set(&group->credit, credit);
620 else
621 qdf_atomic_add(credit, &group->credit);
622}
623
624/**
625 * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
626 * vdev id mask and ac mask is not matching
627 * @pdev: the data physical device
628 * @group_id: TXQ group id
629 * @credit: TXQ group credit count
630 * @absolute: TXQ group absolute
631 * @vdev_id_mask: TXQ vdev group id mask
632 * @ac_mask: TQX access category mask
633 *
634 * Return: None
635 */
636void ol_txrx_update_tx_queue_groups(
637 ol_txrx_pdev_handle pdev,
638 u_int8_t group_id,
639 int32_t credit,
640 u_int8_t absolute,
641 u_int32_t vdev_id_mask,
642 u_int32_t ac_mask
643 )
644{
645 struct ol_tx_queue_group_t *group;
646 u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
647 u_int32_t membership;
648 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700649
Tiger Yu1e553e52018-01-18 16:48:00 +0800650 if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
651 ol_txrx_warn("%s: invalid group_id=%u, ignore update.\n",
652 __func__,
653 group_id);
654 return;
655 }
656
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530657 group = &pdev->txq_grps[group_id];
658
659 membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
660
661 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
662 /*
663 * if the membership (vdev id mask and ac mask)
664 * matches then no need to update tx qeue groups.
665 */
666 if (group->membership == membership)
667 /* Update Credit Only */
668 goto credit_update;
669
670
671 /*
672 * membership (vdev id mask and ac mask) is not matching
673 * TODO: ignoring ac mask for now
674 */
675 group_vdev_id_mask =
676 OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
677
678 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
679 group_vdev_bit_mask =
680 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
681 group_vdev_id_mask, vdev->vdev_id);
682 vdev_bit_mask =
683 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
684 vdev_id_mask, vdev->vdev_id);
685
686 if (group_vdev_bit_mask != vdev_bit_mask) {
687 /*
688 * Change in vdev tx queue group
689 */
690 if (!vdev_bit_mask) {
691 /* Set Group Pointer (vdev and peer) to NULL */
692 ol_tx_set_vdev_group_ptr(
693 pdev, vdev->vdev_id, NULL);
694 } else {
695 /* Set Group Pointer (vdev and peer) */
696 ol_tx_set_vdev_group_ptr(
697 pdev, vdev->vdev_id, group);
698 }
699 }
700 }
701 /* Update membership */
702 group->membership = membership;
703credit_update:
704 /* Update Credit */
705 ol_txrx_update_group_credit(group, credit, absolute);
706 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
707}
708#endif
709
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800710#ifdef QCA_LL_TX_FLOW_CONTROL_V2
711/**
712 * ol_tx_set_desc_global_pool_size() - set global pool size
713 * @num_msdu_desc: total number of descriptors
714 *
715 * Return: none
716 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800717static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800718{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530719 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700720
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800721 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530722 qdf_print("%s: pdev is NULL\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800723 return;
724 }
Nirav Shah2ae038d2015-12-23 20:36:11 +0530725 pdev->num_msdu_desc = num_msdu_desc;
726 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
727 pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
Kapil Gupta53d9b572017-06-28 17:53:25 +0530728 ol_txrx_info_high("Global pool size: %d\n",
Nirav Shah2ae038d2015-12-23 20:36:11 +0530729 pdev->num_msdu_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800730}
731
732/**
733 * ol_tx_get_desc_global_pool_size() - get global pool size
734 * @pdev: pdev handle
735 *
736 * Return: global pool size
737 */
738static inline
739uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
740{
741 return pdev->num_msdu_desc;
742}
Nirav Shah55b45a02016-01-21 10:00:16 +0530743
744/**
745 * ol_tx_get_total_free_desc() - get total free descriptors
746 * @pdev: pdev handle
747 *
748 * Return: total free descriptors
749 */
750static inline
751uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
752{
753 struct ol_tx_flow_pool_t *pool = NULL;
754 uint32_t free_desc;
755
756 free_desc = pdev->tx_desc.num_free;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530757 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530758 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
759 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530760 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530761 free_desc += pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530762 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530763 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530764 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530765
766 return free_desc;
767}
768
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800769#else
770/**
771 * ol_tx_get_desc_global_pool_size() - get global pool size
772 * @pdev: pdev handle
773 *
774 * Return: global pool size
775 */
776static inline
777uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
778{
779 return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
780}
Nirav Shah55b45a02016-01-21 10:00:16 +0530781
782/**
783 * ol_tx_get_total_free_desc() - get total free descriptors
784 * @pdev: pdev handle
785 *
786 * Return: total free descriptors
787 */
788static inline
789uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
790{
791 return pdev->tx_desc.num_free;
792}
793
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800794#endif
795
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530796#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
797
798/**
799 * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
800 * margin replenishment
801 * @desc_pool_size: tx desc pool size
802 *
803 * Return: threshold low
804 */
805static inline uint16_t
806ol_txrx_rsrc_threshold_lo(int desc_pool_size)
807{
808 int threshold_low;
Yun Parkeaea8632017-04-09 09:53:45 -0700809
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530810 /*
Yun Parkeaea8632017-04-09 09:53:45 -0700811 * 5% margin of unallocated desc is too much for per
812 * vdev mechanism.
Jeff Johnsonfa7d9602018-05-06 11:25:31 -0700813 * Define the value separately.
Yun Parkeaea8632017-04-09 09:53:45 -0700814 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530815 threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
816
817 return threshold_low;
818}
819
820/**
821 * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
822 * during tx desc margin replenishment
823 * @desc_pool_size: tx desc pool size
824 *
825 * Return: threshold high
826 */
827static inline uint16_t
828ol_txrx_rsrc_threshold_hi(int desc_pool_size)
829{
830 int threshold_high;
831 /* when freeing up descriptors,
832 * keep going until there's a 7.5% margin
833 */
834 threshold_high = ((15 * desc_pool_size)/100)/2;
835
836 return threshold_high;
837}
838#else
839
840static inline uint16_t
841ol_txrx_rsrc_threshold_lo(int desc_pool_size)
842{
843 int threshold_low;
844 /* always maintain a 5% margin of unallocated descriptors */
845 threshold_low = (5 * desc_pool_size)/100;
846
847 return threshold_low;
848}
849
850static inline uint16_t
851ol_txrx_rsrc_threshold_hi(int desc_pool_size)
852{
853 int threshold_high;
854 /* when freeing up descriptors, keep going until
855 * there's a 15% margin
856 */
857 threshold_high = (15 * desc_pool_size)/100;
858
859 return threshold_high;
860}
861#endif
862
863#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
864
865/**
866 * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
867 * @pdev: the physical device object
868 *
869 * Return: None
870 */
871static void
872ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
873{
874 qdf_spinlock_create(&pdev->txq_log_spinlock);
875 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
876 pdev->txq_log.oldest_record_offset = 0;
877 pdev->txq_log.offset = 0;
878 pdev->txq_log.allow_wrap = 1;
879 pdev->txq_log.wrapped = 0;
880}
881
882/**
883 * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
884 * @pdev: the physical device object
885 *
886 * Return: None
887 */
888static inline void
889ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
890{
891 qdf_spinlock_destroy(&pdev->txq_log_spinlock);
892}
893
894#else
895
896static inline void
897ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
898{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530899}
900
901static inline void
902ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
903{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530904}
905
906
907#endif
908
909#if defined(DEBUG_HL_LOGGING)
910
911/**
912 * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
913 * @pdev: the physical device object
914 *
915 * Return: None
916 */
917static inline void
918ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
919{
920 qdf_spinlock_create(&pdev->grp_stat_spinlock);
921 pdev->grp_stats.last_valid_index = -1;
922 pdev->grp_stats.wrap_around = 0;
923}
924
925/**
926 * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
927 * @pdev: the physical device object
928 *
929 * Return: None
930 */
931static inline void
932ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
933{
934 qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
935}
936#else
937
938static inline void
939ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
940{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530941}
942
943static inline void
944ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
945{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530946}
947#endif
948
949#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
950
951/**
952 * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
953 * @vdev: the virtual device object
954 * @flag: flag
955 *
956 * Return: None
957 */
958void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800959ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530960{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800961 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700962
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530963 vdev->hlTdlsFlag = flag;
964}
965#endif
966
967#if defined(CONFIG_HL_SUPPORT)
968
969/**
970 * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
971 * @vdev: the virtual device object
972 *
973 * Return: None
974 */
975static void
976ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
977{
978 u_int8_t i;
Yun Parkeaea8632017-04-09 09:53:45 -0700979
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530980 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
981 TAILQ_INIT(&vdev->txqs[i].head);
982 vdev->txqs[i].paused_count.total = 0;
983 vdev->txqs[i].frms = 0;
984 vdev->txqs[i].bytes = 0;
985 vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
986 vdev->txqs[i].flag = ol_tx_queue_empty;
987 /* aggregation is not applicable for vdev tx queues */
988 vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
989 ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
990 ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
991 }
992}
993
994/**
995 * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
996 * @vdev: the virtual device object
997 *
998 * Return: None
999 */
1000static void
1001ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1002{
1003 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1004 struct ol_tx_frms_queue_t *txq;
1005 int i;
1006
1007 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1008 txq = &vdev->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +05301009 ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301010 }
1011}
1012
1013/**
1014 * ol_txrx_peer_txqs_init() - initialise peer tx queues
1015 * @pdev: the physical device object
1016 * @peer: peer object
1017 *
1018 * Return: None
1019 */
1020static void
1021ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1022 struct ol_txrx_peer_t *peer)
1023{
1024 uint8_t i;
1025 struct ol_txrx_vdev_t *vdev = peer->vdev;
Yun Parkeaea8632017-04-09 09:53:45 -07001026
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301027 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1028 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1029 TAILQ_INIT(&peer->txqs[i].head);
1030 peer->txqs[i].paused_count.total = 0;
1031 peer->txqs[i].frms = 0;
1032 peer->txqs[i].bytes = 0;
1033 peer->txqs[i].ext_tid = i;
1034 peer->txqs[i].flag = ol_tx_queue_empty;
1035 peer->txqs[i].aggr_state = ol_tx_aggr_untried;
1036 ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
1037 ol_txrx_set_txq_peer(&peer->txqs[i], peer);
1038 }
1039 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1040
1041 /* aggregation is not applicable for mgmt and non-QoS tx queues */
1042 for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
1043 peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
1044
1045 ol_txrx_peer_pause(peer);
1046}
1047
1048/**
1049 * ol_txrx_peer_tx_queue_free() - free peer tx queues
1050 * @pdev: the physical device object
1051 * @peer: peer object
1052 *
1053 * Return: None
1054 */
1055static void
1056ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
1057 struct ol_txrx_peer_t *peer)
1058{
1059 struct ol_tx_frms_queue_t *txq;
1060 uint8_t i;
1061
1062 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1063 txq = &peer->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +05301064 ol_tx_queue_free(pdev, txq, i, true);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301065 }
1066}
1067#else
1068
1069static inline void
1070ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
1071{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301072}
1073
1074static inline void
1075ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1076{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301077}
1078
1079static inline void
1080ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1081 struct ol_txrx_peer_t *peer)
1082{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301083}
1084
1085static inline void
1086ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
1087 struct ol_txrx_peer_t *peer)
1088{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301089}
1090#endif
1091
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301092#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
1093static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1094{
1095 qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
1096}
1097
1098static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1099{
1100 qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
1101}
1102
1103static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1104{
1105 int msdu_idx;
1106 int seg_idx;
1107
Mohit Khannaca4173b2017-09-12 21:52:19 -07001108 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1109 "TSO Statistics:");
1110 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1111 "TSO pkts %lld, bytes %lld\n",
1112 pdev->stats.pub.tx.tso.tso_pkts.pkts,
1113 pdev->stats.pub.tx.tso.tso_pkts.bytes);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301114
Mohit Khannaca4173b2017-09-12 21:52:19 -07001115 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1116 "TSO Histogram for numbers of segments:\n"
1117 "Single segment %d\n"
1118 " 2-5 segments %d\n"
1119 " 6-10 segments %d\n"
1120 "11-15 segments %d\n"
1121 "16-20 segments %d\n"
1122 " 20+ segments %d\n",
1123 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
1124 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
1125 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
1126 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
1127 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
1128 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301129
Mohit Khannaca4173b2017-09-12 21:52:19 -07001130 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1131 "TSO History Buffer: Total size %d, current_index %d",
1132 NUM_MAX_TSO_MSDUS,
1133 TXRX_STATS_TSO_MSDU_IDX(pdev));
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301134
1135 for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
1136 if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
1137 continue;
Mohit Khannaca4173b2017-09-12 21:52:19 -07001138 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1139 "jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
1140 msdu_idx,
1141 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
1142 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
1143 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
1144 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301145
1146 for (seg_idx = 0;
1147 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
1148 msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
1149 seg_idx++) {
1150 struct qdf_tso_seg_t tso_seg =
1151 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
1152
Mohit Khannaca4173b2017-09-12 21:52:19 -07001153 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1154 "seg idx: %d", seg_idx);
1155 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1156 "tso_enable: %d",
1157 tso_seg.tso_flags.tso_enable);
1158 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1159 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
1160 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
1161 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
1162 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
1163 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
1164 tso_seg.tso_flags.ns);
1165 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1166 "tcp_seq_num: 0x%x ip_id: %d",
1167 tso_seg.tso_flags.tcp_seq_num,
1168 tso_seg.tso_flags.ip_id);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301169 }
1170 }
1171}
Yun Park1027e8c2017-10-13 15:17:37 -07001172
1173static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
1174{
1175 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_pkts,
1176 sizeof(struct ol_txrx_stats_elem));
1177#if defined(FEATURE_TSO)
1178 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_info,
1179 sizeof(struct ol_txrx_stats_tso_info));
1180 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_hist,
1181 sizeof(struct ol_txrx_tso_histogram));
1182#endif
1183}
1184
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301185#else
Yun Park1027e8c2017-10-13 15:17:37 -07001186
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301187static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1188{
1189 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1190 "TSO is not supported\n");
1191}
1192
1193static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1194{
1195 /*
1196 * keeping the body empty and not keeping an error print as print will
1197 * will show up everytime during driver load if TSO is not enabled.
1198 */
1199}
1200
1201static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1202{
1203 /*
1204 * keeping the body empty and not keeping an error print as print will
1205 * will show up everytime during driver unload if TSO is not enabled.
1206 */
1207}
1208
Yun Park1027e8c2017-10-13 15:17:37 -07001209static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
1210{
1211 /*
1212 * keeping the body empty and not keeping an error print as print will
1213 * will show up everytime during driver unload if TSO is not enabled.
1214 */
1215}
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301216#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
1217
Nirav Shahd21a2e32018-04-20 16:34:43 +05301218#if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001219/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301220 * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
1221 * @file: file to read
1222 * @arg: pdev object
1223 *
1224 * Return: QDF_STATUS
1225 */
1226static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
1227 void *arg)
1228{
1229 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
1230 uint32_t i = 0;
1231 QDF_STATUS status = QDF_STATUS_SUCCESS;
1232
1233 if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
1234 return QDF_STATUS_E_INVAL;
1235 else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
1236 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
1237 return QDF_STATUS_SUCCESS;
1238 }
1239
1240 i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
1241 status = qdf_dpt_dump_stats_debugfs(file, i);
1242 if (status == QDF_STATUS_E_FAILURE)
1243 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
1244 else if (status == QDF_STATUS_SUCCESS)
1245 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
1246
1247 return status;
1248}
1249
1250/**
1251 * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
1252 * @priv: pdev object
1253 * @buf: buff to get value for dpt parameters
1254 * @len: buf length
1255 *
1256 * Return: QDF_STATUS
1257 */
1258static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
1259 const char *buf,
1260 qdf_size_t len)
1261{
1262 return QDF_STATUS_SUCCESS;
1263}
1264
1265static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
1266{
1267 pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
1268 pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
1269 pdev->dpt_debugfs_fops.priv = pdev;
1270
1271 pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
1272
1273 if (!pdev->dpt_stats_log_dir) {
1274 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1275 "%s: error while creating debugfs dir for %s",
1276 __func__, "dpt_stats");
1277 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
1278 return -EBUSY;
1279 }
1280
1281 if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
1282 pdev->dpt_stats_log_dir,
1283 &pdev->dpt_debugfs_fops)) {
1284 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1285 "%s: debug Entry creation failed!",
1286 __func__);
1287 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
1288 return -EBUSY;
1289 }
1290
1291 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
1292 return 0;
1293}
1294
Nirav Shahd21a2e32018-04-20 16:34:43 +05301295static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
1296{
1297 qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
1298}
1299#else
1300static inline int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
1301{
1302 return 0;
1303}
1304
1305static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
1306{
1307}
1308#endif
1309
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301310/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001311 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001312 * @ctrl_pdev: cfg pdev
1313 * @htc_pdev: HTC pdev
1314 * @osdev: os dev
1315 *
1316 * Return: txrx pdev handle
1317 * NULL for failure
1318 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001319static struct cdp_pdev *
1320ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -07001321 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001322{
1323 struct ol_txrx_pdev_t *pdev;
hqufd227fe2017-06-26 17:01:14 +08001324 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001325
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301326 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001327 if (!pdev)
1328 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001329
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301330 /* init LL/HL cfg here */
1331 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001332 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
1333
1334 /* store provided params */
1335 pdev->ctrl_pdev = ctrl_pdev;
1336 pdev->osdev = osdev;
1337
1338 for (i = 0; i < htt_num_sec_types; i++)
1339 pdev->sec_types[i] = (enum ol_sec_type)i;
1340
1341 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301342 ol_txrx_tso_stats_init(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001343
1344 TAILQ_INIT(&pdev->vdev_list);
1345
tfyu9fcabd72017-09-26 17:46:48 +08001346 TAILQ_INIT(&pdev->req_list);
1347 pdev->req_list_depth = 0;
1348 qdf_spinlock_create(&pdev->req_list_spinlock);
1349
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001350 /* do initial set up of the peer ID -> peer object lookup map */
1351 if (ol_txrx_peer_find_attach(pdev))
1352 goto fail1;
1353
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301354 /* initialize the counter of the target's tx buffer availability */
1355 qdf_atomic_init(&pdev->target_tx_credit);
1356 qdf_atomic_init(&pdev->orig_target_tx_credit);
1357
1358 if (ol_cfg_is_high_latency(ctrl_pdev)) {
1359 qdf_spinlock_create(&pdev->tx_queue_spinlock);
1360 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
1361 if (pdev->tx_sched.scheduler == NULL)
1362 goto fail2;
1363 }
1364 ol_txrx_pdev_txq_log_init(pdev);
1365 ol_txrx_pdev_grp_stats_init(pdev);
1366
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001367 pdev->htt_pdev =
1368 htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
1369 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301370 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001371
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301372 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
1373 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +08001374
1375 /*
1376 * Init the tid --> category table.
1377 * Regular tids (0-15) map to their AC.
1378 * Extension tids get their own categories.
1379 */
1380 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
1381 int ac = TXRX_TID_TO_WMM_AC(tid);
1382
1383 pdev->tid_to_ac[tid] = ac;
1384 }
1385 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
1386 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
1387 pdev->tid_to_ac[OL_TX_MGMT_TID] =
1388 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
1389 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
1390 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
1391 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
1392 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
1393
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301394 ol_txrx_debugfs_init(pdev);
1395
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001396 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001397
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301398fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001399 ol_txrx_peer_find_detach(pdev);
1400
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301401fail2:
1402 if (ol_cfg_is_high_latency(ctrl_pdev))
1403 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
1404
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001405fail1:
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301406 ol_txrx_tso_stats_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301407 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001408
1409fail0:
1410 return NULL;
1411}
1412
Komal Seelamc4b28632016-02-03 15:02:18 +05301413#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1414/**
1415 * htt_pkt_log_init() - API to initialize packet log
1416 * @handle: pdev handle
1417 * @scn: HIF context
1418 *
1419 * Return: void
1420 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001421void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +05301422{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001423 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001424
Komal Seelamc4b28632016-02-03 15:02:18 +05301425 if (handle->pkt_log_init)
1426 return;
1427
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301428 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001429 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -07001430 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08001431 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Komal Seelamc4b28632016-02-03 15:02:18 +05301432 if (pktlogmod_init(scn))
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301433 qdf_print("%s: pktlogmod_init failed", __func__);
Komal Seelamc4b28632016-02-03 15:02:18 +05301434 else
1435 handle->pkt_log_init = true;
1436 }
1437}
1438
1439/**
1440 * htt_pktlogmod_exit() - API to cleanup pktlog info
1441 * @handle: Pdev handle
1442 * @scn: HIF Context
1443 *
1444 * Return: void
1445 */
Houston Hoffman8c485042017-02-08 13:40:21 -08001446static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +05301447{
Houston Hoffman8c485042017-02-08 13:40:21 -08001448 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001449 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +05301450 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -08001451 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +05301452 handle->pkt_log_init = false;
1453 }
1454}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001455
Komal Seelamc4b28632016-02-03 15:02:18 +05301456#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001457void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -08001458static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +05301459#endif
1460
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001461/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001462 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001463 * @pdev: txrx pdev
1464 *
1465 * Return: 0 for success
1466 */
1467int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001468ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001469{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001470 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001471 uint16_t i;
1472 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001473 int ret = 0;
1474 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301475 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001476
Leo Chang376398b2015-10-23 14:19:02 -07001477 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1478 union ol_tx_desc_list_elem_t *c_element;
1479 unsigned int sig_bit;
1480 uint16_t desc_per_page;
1481
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001482 if (!osc) {
1483 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001484 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001485 }
1486
1487 /*
1488 * For LL, limit the number of host's tx descriptors to match
1489 * the number of target FW tx descriptors.
1490 * This simplifies the FW, by ensuring the host will never
1491 * download more tx descriptors than the target has space for.
1492 * The FW will drop/free low-priority tx descriptors when it
1493 * starts to run low, so that in theory the host should never
1494 * run out of tx descriptors.
1495 */
1496
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001497 /*
1498 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301499 * HL - wait for a HTT target credit initialization
1500 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001501 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301502 if (pdev->cfg.is_high_latency) {
1503 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001504
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301505 qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
1506 qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001507
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301508 pdev->tx_queue.rsrc_threshold_lo =
1509 ol_txrx_rsrc_threshold_lo(desc_pool_size);
1510 pdev->tx_queue.rsrc_threshold_hi =
1511 ol_txrx_rsrc_threshold_hi(desc_pool_size);
1512
1513 for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
1514 qdf_atomic_init(&pdev->txq_grps[i].credit);
1515
1516 ol_tx_target_credit_init(pdev, desc_pool_size);
1517 } else {
1518 qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
1519 &pdev->target_tx_credit);
1520 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1521 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001522
Nirav Shah76291962016-04-25 10:50:37 +05301523 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1524
Nirav Shah5ff1fd02018-03-11 14:55:53 +05301525 ol_tx_setup_fastpath_ce_handles(osc, pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001526
1527 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1528 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301529 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001530
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001531 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -08001532 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
1533 ret = htt_ipa_uc_attach(pdev->htt_pdev);
1534 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -07001535 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001536 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001537
Leo Chang376398b2015-10-23 14:19:02 -07001538 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301539 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301540 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -07001541 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1542 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1543 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301544 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001545 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -08001546 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001547 goto page_alloc_fail;
1548 }
1549 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1550 pdev->tx_desc.offset_filter = desc_per_page - 1;
1551 /* Calculate page divider to find page number */
1552 sig_bit = 0;
1553 while (desc_per_page) {
1554 sig_bit++;
1555 desc_per_page = desc_per_page >> 1;
1556 }
1557 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -08001558 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -07001559 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1560 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1561 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1562 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001563
1564 /*
1565 * Each SW tx desc (used only within the tx datapath SW) has a
1566 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1567 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1568 * desc now, to avoid doing it during time-critical transmit.
1569 */
1570 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -07001571 pdev->tx_desc.freelist =
1572 (union ol_tx_desc_list_elem_t *)
1573 (*pdev->tx_desc.desc_pages.cacheable_pages);
1574 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001575 for (i = 0; i < desc_pool_size; i++) {
1576 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001577 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301578 qdf_dma_addr_t frag_paddr = 0;
1579 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001580
Leo Chang376398b2015-10-23 14:19:02 -07001581 if (i == (desc_pool_size - 1))
1582 c_element->next = NULL;
1583 else
1584 c_element->next = (union ol_tx_desc_list_elem_t *)
1585 ol_tx_desc_find(pdev, i + 1);
1586
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001587 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001588 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301589 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001590 "%s: failed to alloc HTT tx desc (%d of %d)",
1591 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001592 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -08001593 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001594 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001595 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001596
Leo Chang376398b2015-10-23 14:19:02 -07001597 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001598 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001599 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001600 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001601 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301602 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001603 "%s: failed to alloc HTT frag dsc (%d/%d)",
1604 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001605 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001606 fail_idx = i;
1607 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001608 }
Leo Chang376398b2015-10-23 14:19:02 -07001609 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -07001610 /*
1611 * Initialize the first 6 words (TSO flags)
1612 * of the frag descriptor
1613 */
Leo Chang376398b2015-10-23 14:19:02 -07001614 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1615 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001616 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001617 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001618#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001619 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001620#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001621 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001622 0xffffffff;
1623#endif
1624#endif
Leo Chang376398b2015-10-23 14:19:02 -07001625 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301626 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001627 c_element = c_element->next;
1628 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001629 }
1630
1631 /* link SW tx descs into a freelist */
1632 pdev->tx_desc.num_free = desc_pool_size;
Poddar, Siddarth14521792017-03-14 21:19:42 +05301633 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001634 "%s first tx_desc:0x%pK Last tx desc:0x%pK\n", __func__,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001635 (uint32_t *) pdev->tx_desc.freelist,
1636 (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001637
1638 /* check what format of frames are expected to be delivered by the OS */
1639 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1640 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1641 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1642 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1643 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1644 pdev->htt_pkt_type = htt_pkt_type_eth2;
1645 else
1646 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1647 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301648 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001649 "%s Invalid standard frame type: %d",
1650 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001651 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001652 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001653 }
1654
1655 /* setup the global rx defrag waitlist */
1656 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1657
1658 /* configure where defrag timeout and duplicate detection is handled */
1659 pdev->rx.flags.defrag_timeout_check =
1660 pdev->rx.flags.dup_check =
1661 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1662
1663#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1664 /* Need to revisit this part. Currently,hardcode to riva's caps */
1665 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1666 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1667 /*
1668 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1669 * header for non-first subframe of A-MSDU.
1670 */
1671 pdev->sw_subfrm_hdr_recovery_enable = 1;
1672 /*
1673 * The Riva HW doesn't have the capability to set Protected Frame bit
1674 * in the MAC header for encrypted data frame.
1675 */
1676 pdev->sw_pf_proc_enable = 1;
1677
1678 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001679 /*
1680 * sw llc process is only needed in
1681 * 802.3 to 802.11 transform case
1682 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001683 pdev->sw_tx_llc_proc_enable = 1;
1684 pdev->sw_rx_llc_proc_enable = 1;
1685 } else {
1686 pdev->sw_tx_llc_proc_enable = 0;
1687 pdev->sw_rx_llc_proc_enable = 0;
1688 }
1689
1690 switch (pdev->frame_format) {
1691 case wlan_frm_fmt_raw:
1692 pdev->sw_tx_encap =
1693 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1694 ? 0 : 1;
1695 pdev->sw_rx_decap =
1696 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1697 ? 0 : 1;
1698 break;
1699 case wlan_frm_fmt_native_wifi:
1700 pdev->sw_tx_encap =
1701 pdev->
1702 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1703 ? 0 : 1;
1704 pdev->sw_rx_decap =
1705 pdev->
1706 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1707 ? 0 : 1;
1708 break;
1709 case wlan_frm_fmt_802_3:
1710 pdev->sw_tx_encap =
1711 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1712 ? 0 : 1;
1713 pdev->sw_rx_decap =
1714 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1715 ? 0 : 1;
1716 break;
1717 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301718 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001719 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1720 pdev->frame_format,
1721 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001722 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001723 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001724 }
1725#endif
1726
1727 /*
1728 * Determine what rx processing steps are done within the host.
1729 * Possibilities:
1730 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1731 * (This is unlikely; even if the target is doing rx->tx forwarding,
1732 * the host should be doing rx->tx forwarding too, as a back up for
1733 * the target's rx->tx forwarding, in case the target runs short on
1734 * memory, and can't store rx->tx frames that are waiting for
1735 * missing prior rx frames to arrive.)
1736 * 2. Just rx -> tx forwarding.
1737 * This is the typical configuration for HL, and a likely
1738 * configuration for LL STA or small APs (e.g. retail APs).
1739 * 3. Both PN check and rx -> tx forwarding.
1740 * This is the typical configuration for large LL APs.
1741 * Host-side PN check without rx->tx forwarding is not a valid
1742 * configuration, since the PN check needs to be done prior to
1743 * the rx->tx forwarding.
1744 */
1745 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001746 /*
1747 * PN check, rx-tx forwarding and rx reorder is done by
1748 * the target
1749 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001750 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1751 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1752 else
1753 pdev->rx_opt_proc = ol_rx_fwd_check;
1754 } else {
1755 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1756 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1757 /*
1758 * PN check done on host,
1759 * rx->tx forwarding not done at all.
1760 */
1761 pdev->rx_opt_proc = ol_rx_pn_check_only;
1762 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1763 /*
1764 * Both PN check and rx->tx forwarding done
1765 * on host.
1766 */
1767 pdev->rx_opt_proc = ol_rx_pn_check;
1768 } else {
1769#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1770"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301771 QDF_TRACE(QDF_MODULE_ID_TXRX,
1772 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001773 "%s: %s", __func__, TRACESTR01);
1774#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001775 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001776 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001777 }
1778 } else {
1779 /* PN check done on target */
1780 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1781 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1782 /*
1783 * rx->tx forwarding done on host (possibly as
1784 * back-up for target-side primary rx->tx
1785 * forwarding)
1786 */
1787 pdev->rx_opt_proc = ol_rx_fwd_check;
1788 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001789 /*
1790 * rx->tx forwarding either done in target,
1791 * or not done at all
1792 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001793 pdev->rx_opt_proc = ol_rx_deliver;
1794 }
1795 }
1796 }
1797
1798 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301799 qdf_spinlock_create(&pdev->tx_mutex);
1800 qdf_spinlock_create(&pdev->peer_ref_mutex);
1801 qdf_spinlock_create(&pdev->rx.mutex);
1802 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001803 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001804 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1805
Yun Parkf01f6e22017-01-18 17:27:02 -08001806 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1807 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001808 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001809 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001810
Yun Parkf01f6e22017-01-18 17:27:02 -08001811 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1812 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001813 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001814 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001815
1816#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1817 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1818#endif
1819
1820 /*
1821 * WDI event attach
1822 */
1823 wdi_event_attach(pdev);
1824
1825 /*
1826 * Initialize rx PN check characteristics for different security types.
1827 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301828 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001829
1830 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1831 pdev->rx_pn[htt_sec_type_tkip].len =
1832 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1833 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1834 pdev->rx_pn[htt_sec_type_tkip].cmp =
1835 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1836 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1837
1838 /* WAPI: 128-bit PN */
1839 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1840 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1841
1842 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1843
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001844 ol_txrx_dbg("Created pdev %pK\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001845
1846 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1847
1848#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1849#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1850
1851/* #if 1 -- TODO: clean this up */
1852#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1853 /* avg = 100% * new + 0% * old */ \
1854 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1855/*
Yun Parkeaea8632017-04-09 09:53:45 -07001856 * #else
1857 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1858 * //avg = 25% * new + 25% * old
1859 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1860 * #endif
1861 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001862 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1863 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1864#endif
1865
1866 ol_txrx_local_peer_id_pool_init(pdev);
1867
1868 pdev->cfg.ll_pause_txq_limit =
1869 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1870
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301871 /* TX flow control for peer who is in very bad link status */
1872 ol_tx_badpeer_flow_cl_init(pdev);
1873
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001874#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301875 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301876 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001877
1878 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301879 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001880 {
1881 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001882
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001883 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301884 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001885 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1886 * 1000);
1887 /*
1888 * Compute a factor and shift that together are equal to the
1889 * inverse of the bin_width time, so that rather than dividing
1890 * by the bin width time, approximately the same result can be
1891 * obtained much more efficiently by a multiply + shift.
1892 * multiply_factor >> shift = 1 / bin_width_time, so
1893 * multiply_factor = (1 << shift) / bin_width_time.
1894 *
1895 * Pick the shift semi-arbitrarily.
1896 * If we knew statically what the bin_width would be, we could
1897 * choose a shift that minimizes the error.
1898 * Since the bin_width is determined dynamically, simply use a
1899 * shift that is about half of the uint32_t size. This should
1900 * result in a relatively large multiplier value, which
1901 * minimizes error from rounding the multiplier to an integer.
1902 * The rounding error only becomes significant if the tick units
1903 * are on the order of 1 microsecond. In most systems, it is
1904 * expected that the tick units will be relatively low-res,
1905 * on the order of 1 millisecond. In such systems the rounding
1906 * error is negligible.
1907 * It would be more accurate to dynamically try out different
1908 * shifts and choose the one that results in the smallest
1909 * rounding error, but that extra level of fidelity is
1910 * not needed.
1911 */
1912 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1913 pdev->tx_delay.hist_internal_bin_width_mult =
1914 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1915 1000 + (bin_width_1000ticks >> 1)) /
1916 bin_width_1000ticks;
1917 }
1918#endif /* QCA_COMPUTE_TX_DELAY */
1919
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001920 /* Thermal Mitigation */
1921 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001922
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001923 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001924
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301925 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1926
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001927 ol_tx_register_flow_control(pdev);
1928
1929 return 0; /* success */
1930
Leo Chang376398b2015-10-23 14:19:02 -07001931pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001932 OL_RX_REORDER_TRACE_DETACH(pdev);
1933
Leo Chang376398b2015-10-23 14:19:02 -07001934reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301935 qdf_spinlock_destroy(&pdev->tx_mutex);
1936 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1937 qdf_spinlock_destroy(&pdev->rx.mutex);
1938 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301939 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001940 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1941
Leo Chang376398b2015-10-23 14:19:02 -07001942control_init_fail:
1943desc_alloc_fail:
1944 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001945 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001946 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001947
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301948 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001949 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001950
Leo Chang376398b2015-10-23 14:19:02 -07001951page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001952 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1953 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001954uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001955 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301956htt_attach_fail:
1957 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001958ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001959 return ret; /* fail */
1960}
1961
Dhanashri Atre12a08392016-02-17 13:10:34 -08001962/**
1963 * ol_txrx_pdev_attach_target() - send target configuration
1964 *
1965 * @pdev - the physical device being initialized
1966 *
1967 * The majority of the data SW setup are done by the pdev_attach
1968 * functions, but this function completes the data SW setup by
1969 * sending datapath configuration messages to the target.
1970 *
1971 * Return: 0 - success 1 - failure
1972 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001973static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001974{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001975 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001976
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05301977 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001978}
1979
Dhanashri Atre12a08392016-02-17 13:10:34 -08001980/**
Mohit Khanna54f3a382017-03-13 17:56:32 -07001981 * ol_tx_free_descs_inuse - free tx descriptors which are in use
1982 * @pdev - the physical device for which tx descs need to be freed
1983 *
1984 * Cycle through the list of TX descriptors (for a pdev) which are in use,
1985 * for which TX completion has not been received and free them. Should be
1986 * called only when the interrupts are off and all lower layer RX is stopped.
1987 * Otherwise there may be a race condition with TX completions.
1988 *
1989 * Return: None
1990 */
1991static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1992{
1993 int i;
1994 void *htt_tx_desc;
1995 struct ol_tx_desc_t *tx_desc;
1996 int num_freed_tx_desc = 0;
1997
1998 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1999 tx_desc = ol_tx_desc_find(pdev, i);
2000 /*
2001 * Confirm that each tx descriptor is "empty", i.e. it has
2002 * no tx frame attached.
2003 * In particular, check that there are no frames that have
2004 * been given to the target to transmit, for which the
2005 * target has never provided a response.
2006 */
2007 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
2008 ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
2009 ol_tx_desc_frame_free_nonstd(pdev,
2010 tx_desc, 1);
2011 num_freed_tx_desc++;
2012 }
2013 htt_tx_desc = tx_desc->htt_tx_desc;
2014 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
2015 }
2016
2017 if (num_freed_tx_desc)
2018 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2019 "freed %d tx frames for which no resp from target",
2020 num_freed_tx_desc);
2021
2022}
2023
2024/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302025 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08002026 * @pdev - the data physical device object being removed
2027 * @force - delete the pdev (and its vdevs and peers) even if
2028 * there are outstanding references by the target to the vdevs
2029 * and peers within the pdev
2030 *
2031 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302032 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08002033 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302034 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08002035 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302036static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002037{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002038 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07002039
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002040 /* preconditions */
2041 TXRX_ASSERT2(pdev);
2042
2043 /* check that the pdev has no vdevs allocated */
2044 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
2045
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002046#ifdef QCA_SUPPORT_TX_THROTTLE
2047 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302048 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
2049 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002050#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302051 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
2052 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002053#endif
2054#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002055
2056 if (force) {
2057 /*
2058 * The assertion above confirms that all vdevs within this pdev
2059 * were detached. However, they may not have actually been
2060 * deleted.
2061 * If the vdev had peers which never received a PEER_UNMAP msg
2062 * from the target, then there are still zombie peer objects,
2063 * and the vdev parents of the zombie peers are also zombies,
2064 * hanging around until their final peer gets deleted.
2065 * Go through the peer hash table and delete any peers left.
2066 * As a side effect, this will complete the deletion of any
2067 * vdevs that are waiting for their peers to finish deletion.
2068 */
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002069 ol_txrx_dbg("Force delete for pdev %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002070 pdev);
2071 ol_txrx_peer_find_hash_erase(pdev);
2072 }
2073
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05302074 /* to get flow pool status before freeing descs */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07002075 ol_tx_dump_flow_pool_info((void *)pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07002076 ol_tx_free_descs_inuse(pdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05302077 ol_tx_deregister_flow_control(pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07002078
2079 /*
2080 * ol_tso_seg_list_deinit should happen after
2081 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
2082 * which is being de-initilized in ol_tso_seg_list_deinit
2083 */
2084 ol_tso_seg_list_deinit(pdev);
2085 ol_tso_num_seg_list_deinit(pdev);
2086
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05302087 /* Stop the communication between HTT and target at first */
2088 htt_detach_target(pdev->htt_pdev);
2089
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302090 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07002091 &pdev->tx_desc.desc_pages, 0, true);
2092 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002093
2094 /* Detach micro controller data path offload resource */
2095 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
2096 htt_ipa_uc_detach(pdev->htt_pdev);
2097
2098 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05302099 ol_tx_desc_dup_detect_deinit(pdev);
2100
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302101 qdf_spinlock_destroy(&pdev->tx_mutex);
2102 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
2103 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
2104 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07002105 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002106#ifdef QCA_SUPPORT_TX_THROTTLE
2107 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302108 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002109#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302110
2111 /* TX flow control for peer who is in very bad link status */
2112 ol_tx_badpeer_flow_cl_deinit(pdev);
2113
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002114 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
2115
2116 OL_RX_REORDER_TRACE_DETACH(pdev);
2117 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302118
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002119 /*
2120 * WDI event detach
2121 */
2122 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302123
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002124 ol_txrx_local_peer_id_cleanup(pdev);
2125
2126#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302127 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002128#endif
2129}
2130
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302131/**
2132 * ol_txrx_pdev_detach() - delete the data SW state
2133 * @ppdev - the data physical device object being removed
2134 * @force - delete the pdev (and its vdevs and peers) even if
2135 * there are outstanding references by the target to the vdevs
2136 * and peers within the pdev
2137 *
2138 * This function is used when the WLAN driver is being removed to
2139 * remove the host data component within the driver.
2140 * All virtual devices within the physical device need to be deleted
2141 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
2142 *
2143 * Return: None
2144 */
2145static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
2146{
2147 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rakesh Pillai33942c42018-05-09 11:45:38 +05302148 struct ol_txrx_stats_req_internal *req, *temp_req;
tfyu9fcabd72017-09-26 17:46:48 +08002149 int i = 0;
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302150
2151 /*checking to ensure txrx pdev structure is not NULL */
2152 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302153 ol_txrx_err(
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302154 "NULL pdev passed to %s\n", __func__);
2155 return;
2156 }
2157
2158 htt_pktlogmod_exit(pdev);
2159
tfyu9fcabd72017-09-26 17:46:48 +08002160 qdf_spin_lock_bh(&pdev->req_list_spinlock);
2161 if (pdev->req_list_depth > 0)
2162 ol_txrx_err(
2163 "Warning: the txrx req list is not empty, depth=%d\n",
2164 pdev->req_list_depth
2165 );
Rakesh Pillai33942c42018-05-09 11:45:38 +05302166 TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
tfyu9fcabd72017-09-26 17:46:48 +08002167 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
2168 pdev->req_list_depth--;
2169 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05302170 "%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
tfyu9fcabd72017-09-26 17:46:48 +08002171 i++,
2172 req,
2173 req->base.print.verbose,
2174 req->base.print.concise,
2175 req->base.stats_type_upload_mask,
2176 req->base.stats_type_reset_mask
2177 );
2178 qdf_mem_free(req);
2179 }
2180 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
2181
2182 qdf_spinlock_destroy(&pdev->req_list_spinlock);
2183
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302184 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
2185
2186 if (pdev->cfg.is_high_latency)
2187 ol_tx_sched_detach(pdev);
2188
2189 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
2190
2191 htt_pdev_free(pdev->htt_pdev);
2192 ol_txrx_peer_find_detach(pdev);
2193 ol_txrx_tso_stats_deinit(pdev);
2194
2195 ol_txrx_pdev_txq_log_destroy(pdev);
2196 ol_txrx_pdev_grp_stat_destroy(pdev);
Alok Kumarddd457e2018-04-09 13:51:42 +05302197
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05302198 ol_txrx_debugfs_exit(pdev);
2199
Alok Kumarddd457e2018-04-09 13:51:42 +05302200 qdf_mem_free(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302201}
2202
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302203#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
2204
2205/**
2206 * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
2207 * @vdev: the virtual device object
2208 *
2209 * Return: None
2210 */
2211static inline void
2212ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2213{
2214 qdf_atomic_init(&vdev->tx_desc_count);
2215}
2216#else
2217
2218static inline void
2219ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2220{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302221}
2222#endif
2223
Dhanashri Atre12a08392016-02-17 13:10:34 -08002224/**
2225 * ol_txrx_vdev_attach - Allocate and initialize the data object
2226 * for a new virtual device.
2227 *
2228 * @data_pdev - the physical device the virtual device belongs to
2229 * @vdev_mac_addr - the MAC address of the virtual device
2230 * @vdev_id - the ID used to identify the virtual device to the target
2231 * @op_mode - whether this virtual device is operating as an AP,
2232 * an IBSS, or a STA
2233 *
2234 * Return: success: handle to new data vdev object, failure: NULL
2235 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002236static struct cdp_vdev *
2237ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002238 uint8_t *vdev_mac_addr,
2239 uint8_t vdev_id, enum wlan_op_mode op_mode)
2240{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002241 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002242 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002243 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002244
2245 /* preconditions */
2246 TXRX_ASSERT2(pdev);
2247 TXRX_ASSERT2(vdev_mac_addr);
2248
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302249 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002250 if (!vdev)
2251 return NULL; /* failure */
2252
2253 /* store provided params */
2254 vdev->pdev = pdev;
2255 vdev->vdev_id = vdev_id;
2256 vdev->opmode = op_mode;
2257
2258 vdev->delete.pending = 0;
2259 vdev->safemode = 0;
2260 vdev->drop_unenc = 1;
2261 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05302262 vdev->fwd_tx_packets = 0;
2263 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002264
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302265 ol_txrx_vdev_tx_desc_cnt_init(vdev);
2266
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302267 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002268 OL_TXRX_MAC_ADDR_LEN);
2269
2270 TAILQ_INIT(&vdev->peer_list);
2271 vdev->last_real_peer = NULL;
2272
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002273 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302274
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002275#ifdef QCA_IBSS_SUPPORT
2276 vdev->ibss_peer_num = 0;
2277 vdev->ibss_peer_heart_beat_timer = 0;
2278#endif
2279
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302280 ol_txrx_vdev_txqs_init(vdev);
2281
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302282 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002283 vdev->ll_pause.paused_reason = 0;
2284 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
2285 vdev->ll_pause.txq.depth = 0;
wadesong5e2e8012017-08-21 16:56:03 +08002286 qdf_atomic_init(&vdev->delete.detaching);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302287 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002288 &vdev->ll_pause.timer,
2289 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302290 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302291 qdf_atomic_init(&vdev->os_q_paused);
2292 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002293 vdev->tx_fl_lwm = 0;
2294 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002295 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002296 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05302297 qdf_mem_zero(&vdev->last_peer_mac_addr,
2298 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302299 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002300 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002301 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002302 vdev->osif_fc_ctx = NULL;
2303
Alok Kumar75355aa2018-03-19 17:32:58 +05302304 vdev->txrx_stats.txack_success = 0;
2305 vdev->txrx_stats.txack_failed = 0;
2306
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002307 /* Default MAX Q depth for every VDEV */
2308 vdev->ll_pause.max_q_depth =
2309 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002310 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002311 /* add this vdev into the pdev's list */
2312 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
2313
Poddar, Siddarth14521792017-03-14 21:19:42 +05302314 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002315 "Created vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002316 vdev,
2317 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2318 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2319 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2320
2321 /*
2322 * We've verified that htt_op_mode == wlan_op_mode,
2323 * so no translation is needed.
2324 */
2325 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
2326
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002327 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002328}
2329
Dhanashri Atre12a08392016-02-17 13:10:34 -08002330/**
2331 *ol_txrx_vdev_register - Link a vdev's data object with the
2332 * matching OS shim vdev object.
2333 *
2334 * @txrx_vdev: the virtual device's data object
2335 * @osif_vdev: the virtual device's OS shim object
2336 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
2337 *
2338 * The data object for a virtual device is created by the
2339 * function ol_txrx_vdev_attach. However, rather than fully
2340 * linking the data vdev object with the vdev objects from the
2341 * other subsystems that the data vdev object interacts with,
2342 * the txrx_vdev_attach function focuses primarily on creating
2343 * the data vdev object. After the creation of both the data
2344 * vdev object and the OS shim vdev object, this
2345 * txrx_osif_vdev_attach function is used to connect the two
2346 * vdev objects, so the data SW can use the OS shim vdev handle
2347 * when passing rx data received by a vdev up to the OS shim.
2348 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002349static void ol_txrx_vdev_register(struct cdp_vdev *pvdev,
2350 void *osif_vdev,
2351 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002352{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002353 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002354
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002355 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
2356 qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
2357 qdf_assert(0);
2358 return;
2359 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002360
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002361 vdev->osif_dev = osif_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002362 vdev->rx = txrx_ops->rx.rx;
Poddar, Siddarth3906e172018-01-09 11:24:58 +05302363 vdev->stats_rx = txrx_ops->rx.stats_rx;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002364 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002365}
2366
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002367#ifdef currently_unused
Dhanashri Atre12a08392016-02-17 13:10:34 -08002368/**
2369 * ol_txrx_set_curchan - Setup the current operating channel of
2370 * the device
2371 * @pdev - the data physical device object
2372 * @chan_mhz - the channel frequency (mhz) packets on
2373 *
2374 * Mainly used when populating monitor mode status that requires
2375 * the current operating channel
2376 *
2377 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002378void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
2379{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002380}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002381#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002382
2383void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
2384{
2385 vdev->safemode = val;
2386}
2387
Dhanashri Atre12a08392016-02-17 13:10:34 -08002388/**
2389 * ol_txrx_set_privacy_filters - set the privacy filter
2390 * @vdev - the data virtual device object
2391 * @filter - filters to be set
2392 * @num - the number of filters
2393 *
2394 * Rx related. Set the privacy filters. When rx packets, check
2395 * the ether type, filter type and packet type to decide whether
2396 * discard these packets.
2397 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002398static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002399ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
2400 void *filters, uint32_t num)
2401{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302402 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002403 num * sizeof(struct privacy_exemption));
2404 vdev->num_filters = num;
2405}
2406
2407void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
2408{
2409 vdev->drop_unenc = val;
2410}
2411
gbian016a42e2017-03-01 18:49:11 +08002412#if defined(CONFIG_HL_SUPPORT)
2413
2414static void
2415ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2416{
2417 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2418 int i;
2419 struct ol_tx_desc_t *tx_desc;
2420
2421 qdf_spin_lock_bh(&pdev->tx_mutex);
2422 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2423 tx_desc = ol_tx_desc_find(pdev, i);
2424 if (tx_desc->vdev == vdev)
2425 tx_desc->vdev = NULL;
2426 }
2427 qdf_spin_unlock_bh(&pdev->tx_mutex);
2428}
2429
2430#else
2431
2432static void
2433ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2434{
2435
2436}
2437
2438#endif
2439
Dhanashri Atre12a08392016-02-17 13:10:34 -08002440/**
2441 * ol_txrx_vdev_detach - Deallocate the specified data virtual
2442 * device object.
2443 * @data_vdev: data object for the virtual device in question
2444 * @callback: function to call (if non-NULL) once the vdev has
2445 * been wholly deleted
2446 * @callback_context: context to provide in the callback
2447 *
2448 * All peers associated with the virtual device need to be deleted
2449 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
2450 * However, for the peers to be fully deleted, the peer deletion has to
2451 * percolate through the target data FW and back up to the host data SW.
2452 * Thus, even though the host control SW may have issued a peer_detach
2453 * call for each of the vdev's peers, the peer objects may still be
2454 * allocated, pending removal of all references to them by the target FW.
2455 * In this case, though the vdev_detach function call will still return
2456 * immediately, the vdev itself won't actually be deleted, until the
2457 * deletions of all its peers complete.
2458 * The caller can provide a callback function pointer to be notified when
2459 * the vdev deletion actually happens - whether it's directly within the
2460 * vdev_detach call, or if it's deferred until all in-progress peer
2461 * deletions have completed.
2462 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002463static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002464ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002465 ol_txrx_vdev_delete_cb callback, void *context)
2466{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002467 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
wadesong5e2e8012017-08-21 16:56:03 +08002468 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002469
2470 /* preconditions */
2471 TXRX_ASSERT2(vdev);
wadesong5e2e8012017-08-21 16:56:03 +08002472 pdev = vdev->pdev;
2473
2474 /* prevent anyone from restarting the ll_pause timer again */
2475 qdf_atomic_set(&vdev->delete.detaching, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002476
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302477 ol_txrx_vdev_tx_queue_free(vdev);
2478
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302479 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302480 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002481 vdev->ll_pause.is_q_timer_on = false;
2482 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302483 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07002484
Nirav Shahcbc6d722016-03-01 16:24:53 +05302485 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302486 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002487 vdev->ll_pause.txq.head = next;
2488 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302489 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
wadesong5e2e8012017-08-21 16:56:03 +08002490
2491 /* ll_pause timer should be deleted without any locks held, and
2492 * no timer function should be executed after this point because
2493 * qdf_timer_free is deleting the timer synchronously.
2494 */
2495 qdf_timer_free(&vdev->ll_pause.timer);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302496 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002497
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302498 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002499 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002500 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002501 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302502 qdf_spin_unlock_bh(&vdev->flow_control_lock);
2503 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002504
2505 /* remove the vdev from its parent pdev's list */
2506 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
2507
2508 /*
2509 * Use peer_ref_mutex while accessing peer_list, in case
2510 * a peer is in the process of being removed from the list.
2511 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302512 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002513 /* check that the vdev has no peers allocated */
2514 if (!TAILQ_EMPTY(&vdev->peer_list)) {
2515 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05302516 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002517 "%s: not deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002518 __func__, vdev,
2519 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2520 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2521 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2522 /* indicate that the vdev needs to be deleted */
2523 vdev->delete.pending = 1;
2524 vdev->delete.callback = callback;
2525 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302526 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002527 return;
2528 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302529 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002530 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002531
Poddar, Siddarth14521792017-03-14 21:19:42 +05302532 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002533 "%s: deleting vdev obj %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002534 __func__, vdev,
2535 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2536 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2537 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2538
2539 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2540
2541 /*
Yun Parkeaea8632017-04-09 09:53:45 -07002542 * The ol_tx_desc_free might access the invalid content of vdev referred
2543 * by tx desc, since this vdev might be detached in another thread
2544 * asynchronous.
2545 *
2546 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2547 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2548 * to avoid crash.
2549 *
2550 */
gbian016a42e2017-03-01 18:49:11 +08002551 ol_txrx_tx_desc_reset_vdev(vdev);
2552
2553 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002554 * Doesn't matter if there are outstanding tx frames -
2555 * they will be freed once the target sends a tx completion
2556 * message for them.
2557 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302558 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002559 if (callback)
2560 callback(context);
2561}
2562
2563/**
2564 * ol_txrx_flush_rx_frames() - flush cached rx frames
2565 * @peer: peer
2566 * @drop: set flag to drop frames
2567 *
2568 * Return: None
2569 */
2570void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302571 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002572{
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002573 struct ol_txrx_cached_bufq_t *bufqi;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002574 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302575 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002576 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002577
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302578 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2579 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002580 return;
2581 }
2582
Dhanashri Atre182b0272016-02-17 15:35:07 -08002583 qdf_assert(peer->vdev);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302584 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002585 bufqi = &peer->bufq_info;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002586
Dhanashri Atre50141c52016-04-07 13:15:29 -07002587 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002588 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002589 else
2590 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302591 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002592
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002593 qdf_spin_lock_bh(&bufqi->bufq_lock);
2594 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002595 typeof(*cache_buf), list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002596 while (!list_empty(&bufqi->cached_bufq)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002597 list_del(&cache_buf->list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002598 bufqi->curr--;
2599 qdf_assert(bufqi->curr >= 0);
2600 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002601 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302602 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002603 } else {
2604 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002605 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302606 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302607 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002608 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302609 qdf_mem_free(cache_buf);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002610 qdf_spin_lock_bh(&bufqi->bufq_lock);
2611 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002612 typeof(*cache_buf), list);
2613 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002614 bufqi->qdepth_no_thresh = bufqi->curr;
2615 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302616 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002617}
2618
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07002619static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302620{
2621 uint8_t sta_id;
2622 struct ol_txrx_peer_t *peer;
2623 struct ol_txrx_pdev_t *pdev;
2624
2625 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2626 if (!pdev)
2627 return;
2628
2629 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002630 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2631 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302632 if (!peer)
2633 continue;
2634 ol_txrx_flush_rx_frames(peer, 1);
2635 }
2636}
2637
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302638/* Define short name to use in cds_trigger_recovery */
2639#define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2640
Dhanashri Atre12a08392016-02-17 13:10:34 -08002641/**
Naveen Rawat17c42a82018-02-01 19:18:27 -08002642 * ol_txrx_dump_peer_access_list() - dump peer access list
2643 * @peer: peer handle
2644 *
2645 * This function will dump if any peer debug ids are still accessing peer
2646 *
2647 * Return: None
2648 */
2649static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2650{
2651 u32 i;
2652 u32 pending_ref;
2653
2654 for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2655 pending_ref = qdf_atomic_read(&peer->access_list[i]);
2656 if (pending_ref)
2657 ol_txrx_info_high("id %d pending refs %d",
2658 i, pending_ref);
2659 }
2660}
2661
2662/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08002663 * ol_txrx_peer_attach - Allocate and set up references for a
2664 * data peer object.
2665 * @data_pdev: data physical device object that will indirectly
2666 * own the data_peer object
2667 * @data_vdev - data virtual device object that will directly
2668 * own the data_peer object
2669 * @peer_mac_addr - MAC address of the new peer
2670 *
2671 * When an association with a peer starts, the host's control SW
2672 * uses this function to inform the host data SW.
2673 * The host data SW allocates its own peer object, and stores a
2674 * reference to the control peer object within the data peer object.
2675 * The host data SW also stores a reference to the virtual device
2676 * that the peer is associated with. This virtual device handle is
2677 * used when the data SW delivers rx data frames to the OS shim layer.
2678 * The host data SW returns a handle to the new peer data object,
2679 * so a reference within the control peer object can be set to the
2680 * data peer object.
2681 *
2682 * Return: handle to new data peer object, or NULL if the attach
2683 * fails
2684 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002685static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002686ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002687{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002688 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002689 struct ol_txrx_peer_t *peer;
2690 struct ol_txrx_peer_t *temp_peer;
2691 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002692 bool wait_on_deletion = false;
2693 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002694 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302695 bool cmp_wait_mac = false;
2696 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002697
2698 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002699 TXRX_ASSERT2(vdev);
2700 TXRX_ASSERT2(peer_mac_addr);
2701
Dhanashri Atre12a08392016-02-17 13:10:34 -08002702 pdev = vdev->pdev;
2703 TXRX_ASSERT2(pdev);
2704
Abhishek Singh217d9782017-04-28 23:49:11 +05302705 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2706 QDF_MAC_ADDR_SIZE))
2707 cmp_wait_mac = true;
2708
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302709 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002710 /* check for duplicate existing peer */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002711 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2712 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2713 (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302714 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002715 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exists.\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002716 vdev->vdev_id,
2717 peer_mac_addr[0], peer_mac_addr[1],
2718 peer_mac_addr[2], peer_mac_addr[3],
2719 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302720 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002721 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002722 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002723 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302724 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002725 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302726 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002727 return NULL;
2728 }
2729 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302730 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2731 &temp_peer->mac_addr,
2732 &vdev->last_peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302733 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002734 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exists.\n",
Abhishek Singh217d9782017-04-28 23:49:11 +05302735 vdev->vdev_id,
2736 vdev->last_peer_mac_addr.raw[0],
2737 vdev->last_peer_mac_addr.raw[1],
2738 vdev->last_peer_mac_addr.raw[2],
2739 vdev->last_peer_mac_addr.raw[3],
2740 vdev->last_peer_mac_addr.raw[4],
2741 vdev->last_peer_mac_addr.raw[5]);
2742 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2743 vdev->wait_on_peer_id = temp_peer->local_id;
2744 qdf_event_reset(&vdev->wait_delete_comp);
2745 wait_on_deletion = true;
2746 break;
2747 } else {
2748 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2749 ol_txrx_err("peer not found");
2750 return NULL;
2751 }
2752 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002753 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302754 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002755
Abhishek Singh217d9782017-04-28 23:49:11 +05302756 qdf_mem_zero(&vdev->last_peer_mac_addr,
2757 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002758 if (wait_on_deletion) {
2759 /* wait for peer deletion */
Nachiket Kukade0396b732017-11-14 16:35:16 +05302760 rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002761 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002762 if (QDF_STATUS_SUCCESS != rc) {
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002763 ol_txrx_err("error waiting for peer_id(%d) deletion, status %d\n",
Dustin Brown100201e2017-07-10 11:48:40 -07002764 vdev->wait_on_peer_id, (int) rc);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002765 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08002766 ol_txrx_dump_peer_access_list(temp_peer);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002767 wlan_roam_debug_dump_table();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002768 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Dustin Brown100201e2017-07-10 11:48:40 -07002769
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002770 return NULL;
2771 }
2772 }
2773
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302774 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002775 if (!peer)
2776 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002777
2778 /* store provided params */
2779 peer->vdev = vdev;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302780 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002781 OL_TXRX_MAC_ADDR_LEN);
2782
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302783 ol_txrx_peer_txqs_init(pdev, peer);
2784
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002785 INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302786 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002787 /* add this peer into the vdev's list */
2788 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302789 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002790 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
Frank Liu4362e462018-01-16 11:51:55 +08002791 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2792 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002793 vdev->last_real_peer = peer;
Frank Liu4362e462018-01-16 11:51:55 +08002794 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2795 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002796
2797 peer->rx_opt_proc = pdev->rx_opt_proc;
2798
2799 ol_rx_peer_init(pdev, peer);
2800
2801 /* initialize the peer_id */
2802 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2803 peer->peer_ids[i] = HTT_INVALID_PEER;
2804
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302805 qdf_spinlock_create(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002806 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2807
2808 peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002809
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302810 qdf_atomic_init(&peer->delete_in_progress);
2811 qdf_atomic_init(&peer->flush_in_progress);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302812 qdf_atomic_init(&peer->ref_cnt);
Mohit Khannab7bec722017-11-10 11:43:44 -08002813
2814 for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2815 qdf_atomic_init(&peer->access_list[i]);
2816
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002817 /* keep one reference for attach */
Mohit Khannab7bec722017-11-10 11:43:44 -08002818 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002819
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002820 /* Set a flag to indicate peer create is pending in firmware */
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002821 qdf_atomic_init(&peer->fw_create_pending);
2822 qdf_atomic_set(&peer->fw_create_pending, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002823
2824 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002825 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2826 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002827
2828 ol_txrx_peer_find_hash_add(pdev, peer);
2829
Mohit Khanna47384bc2016-08-15 15:37:05 -07002830 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002831 "vdev %pK created peer %pK ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Mohit Khanna47384bc2016-08-15 15:37:05 -07002832 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002833 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2834 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2835 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2836 /*
2837 * For every peer MAp message search and set if bss_peer
2838 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002839 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2840 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002841 peer->bss_peer = 1;
2842
2843 /*
2844 * The peer starts in the "disc" state while association is in progress.
2845 * Once association completes, the peer will get updated to "auth" state
2846 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2847 * or else to the "conn" state. For non-open mode, the peer will
2848 * progress to "auth" state once the authentication completes.
2849 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002850 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002851 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002852 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002853
2854#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2855 peer->rssi_dbm = HTT_RSSI_INVALID;
2856#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002857 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2858 !pdev->self_peer) {
2859 pdev->self_peer = peer;
2860 /*
2861 * No Tx in monitor mode, otherwise results in target assert.
2862 * Setting disable_intrabss_fwd to true
2863 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002864 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002865 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002866
2867 ol_txrx_local_peer_id_alloc(pdev, peer);
2868
Leo Chang98726762016-10-28 11:07:18 -07002869 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002870}
2871
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302872#undef PEER_DEL_TIMEOUT
2873
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002874/*
2875 * Discarding tx filter - removes all data frames (disconnected state)
2876 */
2877static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2878{
2879 return A_ERROR;
2880}
2881
2882/*
2883 * Non-autentication tx filter - filters out data frames that are not
2884 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2885 * data frames (connected state)
2886 */
2887static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2888{
2889 return
2890 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2891 tx_msdu_info->htt.info.ethertype ==
2892 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2893}
2894
2895/*
2896 * Pass-through tx filter - lets all data frames through (authenticated state)
2897 */
2898static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2899{
2900 return A_OK;
2901}
2902
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002903/**
2904 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2905 * @peer: handle to peer
2906 *
2907 * returns mac addrs for module which do not know peer type
2908 *
2909 * Return: the mac_addr from peer
2910 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002911static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002912ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002913{
Leo Chang98726762016-10-28 11:07:18 -07002914 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002915
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002916 if (!peer)
2917 return NULL;
2918
2919 return peer->mac_addr.raw;
2920}
2921
Abhishek Singhcfb44482017-03-10 12:42:37 +05302922#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002923/**
2924 * ol_txrx_get_pn_info() - Returns pn info from peer
2925 * @peer: handle to peer
2926 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2927 * @last_pn: return last_rmf_pn value from peer.
2928 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2929 *
2930 * Return: NONE
2931 */
2932void
Leo Chang98726762016-10-28 11:07:18 -07002933ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002934 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2935{
Leo Chang98726762016-10-28 11:07:18 -07002936 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002937 *last_pn_valid = &peer->last_rmf_pn_valid;
2938 *last_pn = &peer->last_rmf_pn;
2939 *rmf_pn_replays = &peer->rmf_pn_replays;
2940}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302941#else
2942void
2943ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2944 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2945{
2946}
2947#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002948
2949/**
2950 * ol_txrx_get_opmode() - Return operation mode of vdev
2951 * @vdev: vdev handle
2952 *
2953 * Return: operation mode.
2954 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002955static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002956{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002957 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002958
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002959 return vdev->opmode;
2960}
2961
2962/**
2963 * ol_txrx_get_peer_state() - Return peer state of peer
2964 * @peer: peer handle
2965 *
2966 * Return: return peer state
2967 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002968static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002969{
Leo Chang98726762016-10-28 11:07:18 -07002970 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002971
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002972 return peer->state;
2973}
2974
2975/**
2976 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2977 * @peer: peer handle
2978 *
2979 * Return: vdev handle from peer
2980 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002981static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002982{
Leo Chang98726762016-10-28 11:07:18 -07002983 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002984
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002985 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002986}
2987
2988/**
2989 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2990 * @vdev: vdev handle
2991 *
2992 * Return: vdev mac address
2993 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002994static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002995ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002996{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002997 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002998
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002999 if (!vdev)
3000 return NULL;
3001
3002 return vdev->mac_addr.raw;
3003}
3004
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003005#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003006/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003007 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003008 * vdev
3009 * @vdev: vdev handle
3010 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003011 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003012 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003013struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003014ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
3015{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003016 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003017}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003018#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003019
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003020#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003021/**
3022 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
3023 * @vdev: vdev handle
3024 *
3025 * Return: Handle to pdev
3026 */
3027ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
3028{
3029 return vdev->pdev;
3030}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003031#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003032
3033/**
3034 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
3035 * @vdev: vdev handle
3036 *
3037 * Return: Handle to control pdev
3038 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003039static struct cdp_cfg *
3040ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003041{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003042 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003043
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003044 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003045}
3046
3047/**
3048 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
3049 * @vdev: vdev handle
3050 *
3051 * Return: Rx Fwd disabled status
3052 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003053static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003054ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003055{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003056 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003057 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
3058 vdev->pdev->ctrl_pdev;
3059 return cfg->rx_fwd_disabled;
3060}
3061
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07003062#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003063/**
3064 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
3065 * @vdev: vdev handle
3066 * @peer_num_delta: peer nums to be adjusted
3067 *
3068 * Return: -1 for failure or total peer nums after adjustment.
3069 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003070static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003071ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003072 int16_t peer_num_delta)
3073{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003074 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003075 int16_t new_peer_num;
3076
3077 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07003078 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003079 return OL_TXRX_INVALID_NUM_PEERS;
3080
3081 vdev->ibss_peer_num = new_peer_num;
3082
3083 return new_peer_num;
3084}
3085
3086/**
3087 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
3088 * beat timer
3089 * @vdev: vdev handle
3090 * @timer_value_sec: new heart beat timer value
3091 *
3092 * Return: Old timer value set in vdev.
3093 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003094static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
3095 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003096{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003097 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003098 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
3099
3100 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
3101
3102 return old_timer_value;
3103}
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07003104#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003105
3106/**
3107 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
3108 * @vdev: vdev handle
3109 * @callback: callback function to remove the peer.
3110 * @callback_context: handle for callback function
3111 * @remove_last_peer: Does it required to last peer.
3112 *
3113 * Return: NONE
3114 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003115static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003116ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003117 ol_txrx_vdev_peer_remove_cb callback,
3118 void *callback_context, bool remove_last_peer)
3119{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003120 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003121 ol_txrx_peer_handle peer, temp;
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003122 int self_removed = 0;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003123 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003124 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003125
3126 temp = NULL;
3127 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
3128 peer_list_elem) {
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05303129 if (qdf_atomic_read(&peer->delete_in_progress))
3130 continue;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003131 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003132 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05303133 callback(callback_context, temp->mac_addr.raw,
Jiachao Wu641760e2018-01-21 12:11:31 +08003134 vdev->vdev_id, temp);
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003135 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003136 }
3137 /* self peer is deleted last */
3138 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003139 self_removed = 1;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003140 break;
Yun Parkeaea8632017-04-09 09:53:45 -07003141 }
3142 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003143 }
3144
Mohit Khanna137b97d2016-04-21 16:11:33 -07003145 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
3146
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003147 if (self_removed)
3148 ol_txrx_info("%s: self peer removed by caller ",
3149 __func__);
3150
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003151 if (remove_last_peer) {
3152 /* remove IBSS bss peer last */
3153 peer = TAILQ_FIRST(&vdev->peer_list);
3154 callback(callback_context, (uint8_t *) &vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08003155 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003156 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003157}
3158
3159/**
3160 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
3161 * @vdev: vdev handle
3162 * @callback: callback function to remove the peer.
3163 * @callback_context: handle for callback function
3164 *
3165 * Return: NONE
3166 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003167static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003168ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003169 ol_txrx_vdev_peer_remove_cb callback,
3170 void *callback_context)
3171{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003172 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003173 ol_txrx_peer_handle peer = NULL;
Jiachao Wu641760e2018-01-21 12:11:31 +08003174 ol_txrx_peer_handle tmp_peer = NULL;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003175
Jiachao Wu641760e2018-01-21 12:11:31 +08003176 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, peer_list_elem, tmp_peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05303177 ol_txrx_info_high(
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003178 "%s: peer found for vdev id %d. deleting the peer",
3179 __func__, vdev->vdev_id);
3180 callback(callback_context, (uint8_t *)&vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08003181 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003182 }
3183}
3184
3185/**
3186 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
3187 * @vdev: vdev handle
3188 * @ocb_set_chan: OCB channel information to be set in vdev.
3189 *
3190 * Return: NONE
3191 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003192static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003193 struct ol_txrx_ocb_set_chan ocb_set_chan)
3194{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003195 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003196
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003197 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
3198 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
3199}
3200
3201/**
3202 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
3203 * @vdev: vdev handle
3204 *
3205 * Return: handle to struct ol_txrx_ocb_chan_info
3206 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003207static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003208ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003209{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003210 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003211
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003212 return vdev->ocb_channel_info;
3213}
3214
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003215/**
3216 * @brief specify the peer's authentication state
3217 * @details
3218 * Specify the peer's authentication state (none, connected, authenticated)
3219 * to allow the data SW to determine whether to filter out invalid data frames.
3220 * (In the "connected" state, where security is enabled, but authentication
3221 * has not completed, tx and rx data frames other than EAPOL or WAPI should
3222 * be discarded.)
3223 * This function is only relevant for systems in which the tx and rx filtering
3224 * are done in the host rather than in the target.
3225 *
3226 * @param data_peer - which peer has changed its state
3227 * @param state - the new state of the peer
3228 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003229 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003230 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003231QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003232 uint8_t *peer_mac,
3233 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003234{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003235 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003236 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003237 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003238
Anurag Chouhanc5548422016-02-24 18:33:27 +05303239 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303240 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05303241 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303242 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003243 }
3244
Mohit Khannab7bec722017-11-10 11:43:44 -08003245 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
3246 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003247 if (NULL == peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303248 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303249 "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
3250 __func__,
3251 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
3252 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303253 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003254 }
3255
3256 /* TODO: Should we send WMI command of the connection state? */
3257 /* avoid multiple auth state change. */
3258 if (peer->state == state) {
3259#ifdef TXRX_PRINT_VERBOSE_ENABLE
Poddar, Siddarth14521792017-03-14 21:19:42 +05303260 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003261 "%s: no state change, returns directly\n",
3262 __func__);
3263#endif
Mohit Khannab7bec722017-11-10 11:43:44 -08003264 peer_ref_cnt = ol_txrx_peer_release_ref
3265 (peer,
3266 PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303267 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003268 }
3269
Poddar, Siddarth14521792017-03-14 21:19:42 +05303270 ol_txrx_dbg("%s: change from %d to %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003271 __func__, peer->state, state);
3272
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003273 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003274 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003275 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003276 ? ol_tx_filter_non_auth
3277 : ol_tx_filter_discard);
3278
3279 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003280 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003281 int tid;
3282 /*
3283 * Pause all regular (non-extended) TID tx queues until
3284 * data arrives and ADDBA negotiation has completed.
3285 */
Poddar, Siddarth14521792017-03-14 21:19:42 +05303286 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003287 "%s: pause peer and unpause mgmt/non-qos\n",
3288 __func__);
3289 ol_txrx_peer_pause(peer); /* pause all tx queues */
3290 /* unpause mgmt and non-QoS tx queues */
3291 for (tid = OL_TX_NUM_QOS_TIDS;
3292 tid < OL_TX_NUM_TIDS; tid++)
3293 ol_txrx_peer_tid_unpause(peer, tid);
3294 }
3295 }
Mohit Khannab7bec722017-11-10 11:43:44 -08003296 peer_ref_cnt = ol_txrx_peer_release_ref(peer,
3297 PEER_DEBUG_ID_OL_INTERNAL);
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003298 /*
Mohit Khannab7bec722017-11-10 11:43:44 -08003299 * after ol_txrx_peer_release_ref, peer object cannot be accessed
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003300 * if the return code was 0
3301 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003302 if (peer_ref_cnt > 0)
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003303 /*
3304 * Set the state after the Pause to avoid the race condiction
3305 * with ADDBA check in tx path
3306 */
3307 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303308 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003309}
3310
3311void
3312ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
3313{
3314 peer->keyinstalled = val;
3315}
3316
3317void
3318ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
3319 uint8_t *peer_mac,
3320 union ol_txrx_peer_update_param_t *param,
3321 enum ol_txrx_peer_update_select_t select)
3322{
3323 struct ol_txrx_peer_t *peer;
3324
Mohit Khannab7bec722017-11-10 11:43:44 -08003325 peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
3326 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003327 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303328 ol_txrx_dbg("%s: peer is null",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003329 __func__);
3330 return;
3331 }
3332
3333 switch (select) {
3334 case ol_txrx_peer_update_qos_capable:
3335 {
3336 /* save qos_capable here txrx peer,
3337 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
3338 */
3339 peer->qos_capable = param->qos_capable;
3340 /*
3341 * The following function call assumes that the peer has a
3342 * single ID. This is currently true, and
3343 * is expected to remain true.
3344 */
3345 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
3346 peer->peer_ids[0],
3347 peer->qos_capable);
3348 break;
3349 }
3350 case ol_txrx_peer_update_uapsdMask:
3351 {
3352 peer->uapsd_mask = param->uapsd_mask;
3353 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
3354 peer->peer_ids[0],
3355 peer->uapsd_mask);
3356 break;
3357 }
3358 case ol_txrx_peer_update_peer_security:
3359 {
3360 enum ol_sec_type sec_type = param->sec_type;
3361 enum htt_sec_type peer_sec_type = htt_sec_type_none;
3362
3363 switch (sec_type) {
3364 case ol_sec_type_none:
3365 peer_sec_type = htt_sec_type_none;
3366 break;
3367 case ol_sec_type_wep128:
3368 peer_sec_type = htt_sec_type_wep128;
3369 break;
3370 case ol_sec_type_wep104:
3371 peer_sec_type = htt_sec_type_wep104;
3372 break;
3373 case ol_sec_type_wep40:
3374 peer_sec_type = htt_sec_type_wep40;
3375 break;
3376 case ol_sec_type_tkip:
3377 peer_sec_type = htt_sec_type_tkip;
3378 break;
3379 case ol_sec_type_tkip_nomic:
3380 peer_sec_type = htt_sec_type_tkip_nomic;
3381 break;
3382 case ol_sec_type_aes_ccmp:
3383 peer_sec_type = htt_sec_type_aes_ccmp;
3384 break;
3385 case ol_sec_type_wapi:
3386 peer_sec_type = htt_sec_type_wapi;
3387 break;
3388 default:
3389 peer_sec_type = htt_sec_type_none;
3390 break;
3391 }
3392
3393 peer->security[txrx_sec_ucast].sec_type =
3394 peer->security[txrx_sec_mcast].sec_type =
3395 peer_sec_type;
3396
3397 break;
3398 }
3399 default:
3400 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303401 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003402 "ERROR: unknown param %d in %s", select,
3403 __func__);
3404 break;
3405 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003406 } /* switch */
Mohit Khannab7bec722017-11-10 11:43:44 -08003407 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003408}
3409
3410uint8_t
3411ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3412{
3413
3414 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07003415
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003416 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3417 if (peer)
3418 return peer->uapsd_mask;
3419 return 0;
3420}
3421
3422uint8_t
3423ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3424{
3425
3426 struct ol_txrx_peer_t *peer_t =
3427 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3428 if (peer_t != NULL)
3429 return peer_t->qos_capable;
3430 return 0;
3431}
3432
Mohit Khannab7bec722017-11-10 11:43:44 -08003433/**
Mohit Khannab7bec722017-11-10 11:43:44 -08003434 * ol_txrx_peer_free_tids() - free tids for the peer
3435 * @peer: peer handle
3436 *
3437 * Return: None
3438 */
3439static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
3440{
3441 int i = 0;
3442 /*
3443 * 'array' is allocated in addba handler and is supposed to be
3444 * freed in delba handler. There is the case (for example, in
3445 * SSR) where delba handler is not called. Because array points
3446 * to address of 'base' by default and is reallocated in addba
3447 * handler later, only free the memory when the array does not
3448 * point to base.
3449 */
3450 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
3451 if (peer->tids_rx_reorder[i].array !=
3452 &peer->tids_rx_reorder[i].base) {
3453 ol_txrx_dbg(
3454 "%s, delete reorder arr, tid:%d\n",
3455 __func__, i);
3456 qdf_mem_free(peer->tids_rx_reorder[i].array);
3457 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
3458 (uint8_t)i);
3459 }
3460 }
3461}
3462
3463/**
3464 * ol_txrx_peer_release_ref() - release peer reference
3465 * @peer: peer handle
3466 *
3467 * Release peer reference and delete peer if refcount is 0
3468 *
wadesong9f2b1102017-12-20 22:58:35 +08003469 * Return: Resulting peer ref_cnt after this function is invoked
Mohit Khannab7bec722017-11-10 11:43:44 -08003470 */
3471int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
3472 enum peer_debug_id_type debug_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003473{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003474 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003475 struct ol_txrx_vdev_t *vdev;
3476 struct ol_txrx_pdev_t *pdev;
Jingxiang Ge3badb982018-01-02 17:39:01 +08003477 bool ref_silent = false;
Jingxiang Ge190679b2018-01-30 08:56:19 +08003478 int access_list = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003479
3480 /* preconditions */
3481 TXRX_ASSERT2(peer);
3482
3483 vdev = peer->vdev;
3484 if (NULL == vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303485 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003486 "The vdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003487 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003488 }
3489
3490 pdev = vdev->pdev;
3491 if (NULL == pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303492 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003493 "The pdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003494 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003495 }
3496
Mohit Khannab7bec722017-11-10 11:43:44 -08003497 if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
3498 ol_txrx_err("incorrect debug_id %d ", debug_id);
3499 return -EINVAL;
3500 }
3501
Jingxiang Ge3badb982018-01-02 17:39:01 +08003502 if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
3503 ref_silent = true;
3504
3505 if (!ref_silent)
3506 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3507 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3508 peer, 0,
3509 qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003510
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003511
3512 /*
3513 * Hold the lock all the way from checking if the peer ref count
3514 * is zero until the peer references are removed from the hash
3515 * table and vdev list (if the peer ref count is zero).
3516 * This protects against a new HL tx operation starting to use the
3517 * peer object just after this function concludes it's done being used.
3518 * Furthermore, the lock needs to be held while checking whether the
3519 * vdev's list of peers is empty, to make sure that list is not modified
3520 * concurrently with the empty check.
3521 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303522 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003523
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003524 /*
3525 * Check for the reference count before deleting the peer
3526 * as we noticed that sometimes we are re-entering this
3527 * function again which is leading to dead-lock.
3528 * (A double-free should never happen, so assert if it does.)
3529 */
3530 rc = qdf_atomic_read(&(peer->ref_cnt));
3531
3532 if (rc == 0) {
3533 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3534 ol_txrx_err("The Peer is not present anymore\n");
3535 qdf_assert(0);
3536 return -EACCES;
3537 }
3538 /*
3539 * now decrement rc; this will be the return code.
3540 * 0 : peer deleted
3541 * >0: peer ref removed, but still has other references
3542 * <0: sanity failed - no changes to the state of the peer
3543 */
3544 rc--;
3545
Mohit Khannab7bec722017-11-10 11:43:44 -08003546 if (!qdf_atomic_read(&peer->access_list[debug_id])) {
3547 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
jitiphil8ad8a6f2018-03-01 23:45:05 +05303548 ol_txrx_err("peer %pK ref was not taken by %d",
Mohit Khannab7bec722017-11-10 11:43:44 -08003549 peer, debug_id);
3550 ol_txrx_dump_peer_access_list(peer);
3551 QDF_BUG(0);
3552 return -EACCES;
3553 }
Mohit Khannab7bec722017-11-10 11:43:44 -08003554 qdf_atomic_dec(&peer->access_list[debug_id]);
3555
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07003556 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Mohit Khannab7bec722017-11-10 11:43:44 -08003557 u16 peer_id;
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003558 wlan_roam_debug_log(vdev->vdev_id,
3559 DEBUG_DELETING_PEER_OBJ,
3560 DEBUG_INVALID_PEER_ID,
3561 &peer->mac_addr.raw, peer, 0,
3562 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003563 peer_id = peer->local_id;
3564 /* remove the reference to the peer from the hash table */
3565 ol_txrx_peer_find_hash_remove(pdev, peer);
3566
3567 /* remove the peer from its parent vdev's list */
3568 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3569
3570 /* cleanup the Rx reorder queues for this peer */
3571 ol_rx_peer_cleanup(vdev, peer);
3572
Jingxiang Ge3badb982018-01-02 17:39:01 +08003573 qdf_spinlock_destroy(&peer->peer_info_lock);
3574 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
3575
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003576 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303577 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003578
3579 /*
3580 * Set wait_delete_comp event if the current peer id matches
3581 * with registered peer id.
3582 */
3583 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303584 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003585 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3586 }
3587
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003588 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3589 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003590
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003591 /* check whether the parent vdev has no peers left */
3592 if (TAILQ_EMPTY(&vdev->peer_list)) {
3593 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003594 * Check if the parent vdev was waiting for its peers
3595 * to be deleted, in order for it to be deleted too.
3596 */
3597 if (vdev->delete.pending) {
3598 ol_txrx_vdev_delete_cb vdev_delete_cb =
3599 vdev->delete.callback;
3600 void *vdev_delete_context =
3601 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303602 /*
3603 * Now that there are no references to the peer,
3604 * we can release the peer reference lock.
3605 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303606 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303607
gbian016a42e2017-03-01 18:49:11 +08003608 /*
Yun Parkeaea8632017-04-09 09:53:45 -07003609 * The ol_tx_desc_free might access the invalid
3610 * content of vdev referred by tx desc, since
3611 * this vdev might be detached in another thread
3612 * asynchronous.
3613 *
3614 * Go through tx desc pool to set corresponding
3615 * tx desc's vdev to NULL when detach this vdev,
3616 * and add vdev checking in the ol_tx_desc_free
3617 * to avoid crash.
3618 */
gbian016a42e2017-03-01 18:49:11 +08003619 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05303620 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003621 "%s: deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
Yun Parkeaea8632017-04-09 09:53:45 -07003622 __func__, vdev,
3623 vdev->mac_addr.raw[0],
3624 vdev->mac_addr.raw[1],
3625 vdev->mac_addr.raw[2],
3626 vdev->mac_addr.raw[3],
3627 vdev->mac_addr.raw[4],
3628 vdev->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003629 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303630 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003631 if (vdev_delete_cb)
3632 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303633 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303634 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003635 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303636 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303637 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303638 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003639
jitiphil8ad8a6f2018-03-01 23:45:05 +05303640 ol_txrx_info_high("[%d][%d]: Deleting peer %pK ref_cnt -> %d %s",
Mohit Khannab7bec722017-11-10 11:43:44 -08003641 debug_id,
3642 qdf_atomic_read(&peer->access_list[debug_id]),
3643 peer, rc,
3644 qdf_atomic_read(&peer->fw_create_pending)
3645 == 1 ?
3646 "(No Maps received)" : "");
Mohit Khanna8ee37c62017-08-07 17:15:20 -07003647
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303648 ol_txrx_peer_tx_queue_free(pdev, peer);
3649
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -07003650 /* Remove mappings from peer_id to peer object */
3651 ol_txrx_peer_clear_map_peer(pdev, peer);
3652
wadesong9f2b1102017-12-20 22:58:35 +08003653 /* Remove peer pointer from local peer ID map */
3654 ol_txrx_local_peer_id_free(pdev, peer);
3655
Mohit Khannab7bec722017-11-10 11:43:44 -08003656 ol_txrx_peer_free_tids(peer);
3657
3658 ol_txrx_dump_peer_access_list(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003659
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303660 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003661 } else {
Jingxiang Ge190679b2018-01-30 08:56:19 +08003662 access_list = qdf_atomic_read(
3663 &peer->access_list[debug_id]);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303664 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Jingxiang Ge3badb982018-01-02 17:39:01 +08003665 if (!ref_silent)
jitiphil8ad8a6f2018-03-01 23:45:05 +05303666 ol_txrx_info_high("[%d][%d]: ref delete peer %pK ref_cnt -> %d",
Jingxiang Ge3badb982018-01-02 17:39:01 +08003667 debug_id,
Jingxiang Ge190679b2018-01-30 08:56:19 +08003668 access_list,
Jingxiang Ge3badb982018-01-02 17:39:01 +08003669 peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003670 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003671 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003672}
3673
Dhanashri Atre12a08392016-02-17 13:10:34 -08003674/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003675 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3676 * @peer: pointer to ol txrx peer structure
3677 *
3678 * Return: QDF Status
3679 */
3680static QDF_STATUS
3681ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3682{
3683 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3684 /* Drop pending Rx frames in CDS */
3685 if (sched_ctx)
3686 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3687
3688 /* Purge the cached rx frame queue */
3689 ol_txrx_flush_rx_frames(peer, 1);
3690
3691 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003692 peer->state = OL_TXRX_PEER_STATE_DISC;
3693 qdf_spin_unlock_bh(&peer->peer_info_lock);
3694
3695 return QDF_STATUS_SUCCESS;
3696}
3697
3698/**
3699 * ol_txrx_clear_peer() - clear peer
3700 * @sta_id: sta id
3701 *
3702 * Return: QDF Status
3703 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003704static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003705{
3706 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003707 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003708
3709 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303710 ol_txrx_err("%s: Unable to find pdev!",
Mohit Khanna0696eef2016-04-14 16:14:08 -07003711 __func__);
3712 return QDF_STATUS_E_FAILURE;
3713 }
3714
3715 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303716 ol_txrx_err("Invalid sta id %d", sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003717 return QDF_STATUS_E_INVAL;
3718 }
3719
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003720 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Kabilan Kannanfa163982018-01-30 12:03:41 -08003721
3722 /* Return success, if the peer is already cleared by
3723 * data path via peer detach function.
3724 */
Mohit Khanna0696eef2016-04-14 16:14:08 -07003725 if (!peer)
Kabilan Kannanfa163982018-01-30 12:03:41 -08003726 return QDF_STATUS_SUCCESS;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003727
3728 return ol_txrx_clear_peer_internal(peer);
3729
3730}
3731
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003732void peer_unmap_timer_work_function(void *param)
3733{
3734 WMA_LOGE("Enter: %s", __func__);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003735 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08003736 ol_txrx_dump_peer_access_list(param);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003737 wlan_roam_debug_dump_table();
Anurag Chouhan4085ff72017-10-05 18:09:56 +05303738 cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003739}
3740
Mohit Khanna0696eef2016-04-14 16:14:08 -07003741/**
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003742 * peer_unmap_timer_handler() - peer unmap timer function
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003743 * @data: peer object pointer
3744 *
3745 * Return: none
3746 */
3747void peer_unmap_timer_handler(void *data)
3748{
3749 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003750 ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003751
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003752 ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003753 peer, qdf_atomic_read(&peer->ref_cnt));
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003754 ol_txrx_err("peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003755 peer,
3756 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3757 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3758 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Nachiket Kukadea48fd772017-07-28 18:48:57 +05303759 if (!cds_is_driver_recovering() && !cds_is_fw_down()) {
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003760 qdf_create_work(0, &txrx_pdev->peer_unmap_timer_work,
3761 peer_unmap_timer_work_function,
Naveen Rawat17c42a82018-02-01 19:18:27 -08003762 peer);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003763 qdf_sched_work(0, &txrx_pdev->peer_unmap_timer_work);
Deepak Dhamdhered42ab7c2017-04-13 19:32:16 -07003764 } else {
3765 ol_txrx_err("Recovery is in progress, ignore!");
3766 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003767}
3768
3769
3770/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003771 * ol_txrx_peer_detach() - Delete a peer's data object.
3772 * @peer - the object to detach
Naveen Rawatf4ada152017-09-05 14:56:12 -07003773 * @bitmap - bitmap indicating special handling of request.
Dhanashri Atre12a08392016-02-17 13:10:34 -08003774 *
3775 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003776 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003777 * stored in the control peer object to the data peer
3778 * object (set up by a call to ol_peer_store()) is provided.
3779 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003780 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003781 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003782static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003783{
Leo Chang98726762016-10-28 11:07:18 -07003784 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003785 struct ol_txrx_vdev_t *vdev = peer->vdev;
3786
3787 /* redirect peer's rx delivery function to point to a discard func */
3788 peer->rx_opt_proc = ol_rx_discard;
3789
3790 peer->valid = 0;
3791
Mohit Khanna0696eef2016-04-14 16:14:08 -07003792 /* flush all rx packets before clearing up the peer local_id */
3793 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003794
3795 /* debug print to dump rx reorder state */
3796 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3797
Poddar, Siddarth14521792017-03-14 21:19:42 +05303798 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003799 "%s:peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003800 __func__, peer,
3801 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3802 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3803 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003804
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303805 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003806 if (vdev->last_real_peer == peer)
3807 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303808 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003809 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3810
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003811 /*
3812 * set delete_in_progress to identify that wma
3813 * is waiting for unmap massage for this peer
3814 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303815 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003816
Lin Bai973e6922018-01-08 17:59:19 +08003817 if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
Naveen Rawatf4ada152017-09-05 14:56:12 -07003818 if (vdev->opmode == wlan_op_mode_sta) {
3819 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3820 &peer->mac_addr,
3821 sizeof(union ol_txrx_align_mac_addr_t));
Abhishek Singh217d9782017-04-28 23:49:11 +05303822
Lin Bai973e6922018-01-08 17:59:19 +08003823 /*
3824 * Create a timer to track unmap events when the
3825 * sta peer gets deleted.
3826 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003827 qdf_timer_start(&peer->peer_unmap_timer,
3828 OL_TXRX_PEER_UNMAP_TIMEOUT);
Mohit Khannab7bec722017-11-10 11:43:44 -08003829 ol_txrx_info_high
3830 ("started peer_unmap_timer for peer %pK",
3831 peer);
Naveen Rawatf4ada152017-09-05 14:56:12 -07003832 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003833 }
3834
3835 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003836 * Remove the reference added during peer_attach.
3837 * The peer will still be left allocated until the
3838 * PEER_UNMAP message arrives to remove the other
3839 * reference, added by the PEER_MAP message.
3840 */
Mohit Khannab7bec722017-11-10 11:43:44 -08003841 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003842}
3843
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003844/**
3845 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
Lin Bai973e6922018-01-08 17:59:19 +08003846 * @ppeer - the object to detach
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003847 *
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003848 * Detach a peer and force peer object to be removed. It is called during
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003849 * roaming scenario when the firmware has already deleted a peer.
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003850 * Remove it from the peer_id_to_object map. Peer object is actually freed
3851 * when last reference is deleted.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003852 *
3853 * Return: None
3854 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003855static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003856{
Leo Chang98726762016-10-28 11:07:18 -07003857 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003858 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3859
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003860 ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d",
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003861 __func__, peer, qdf_atomic_read(&peer->ref_cnt));
3862
3863 /* Clear the peer_id_to_obj map entries */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003864 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
Lin Bai973e6922018-01-08 17:59:19 +08003865 ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003866}
3867
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003868/**
3869 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3870 * @txrx_pdev: Pointer to txrx pdev
3871 *
3872 * Return: none
3873 */
3874static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3875{
3876 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003877 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003878
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303879 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3880 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3881 else
3882 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003883
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003884 num_free = ol_tx_get_total_free_desc(pdev);
3885
Kapil Gupta53d9b572017-06-28 17:53:25 +05303886 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303887 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003888 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003889
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003890}
3891
3892/**
3893 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3894 * @timeout: timeout in ms
3895 *
3896 * Wait for tx queue to be empty, return timeout error if
3897 * queue doesn't empty before timeout occurs.
3898 *
3899 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303900 * QDF_STATUS_SUCCESS if the queue empties,
3901 * QDF_STATUS_E_TIMEOUT in case of timeout,
3902 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003903 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003904static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003905{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003906 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003907
3908 if (txrx_pdev == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303909 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003910 "%s: txrx context is null", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303911 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003912 }
3913
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003914 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303915 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003916 if (timeout <= 0) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303917 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303918 "%s: tx frames are pending", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003919 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303920 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003921 }
3922 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3923 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303924 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003925}
3926
3927#ifndef QCA_WIFI_3_0_EMU
Himanshu Agarwal83a87572017-05-25 14:09:50 +05303928#define SUSPEND_DRAIN_WAIT 500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003929#else
3930#define SUSPEND_DRAIN_WAIT 3000
3931#endif
3932
Yue Ma1e11d792016-02-26 18:58:44 -08003933#ifdef FEATURE_RUNTIME_PM
3934/**
3935 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3936 * @txrx_pdev: TXRX pdev context
3937 *
3938 * TXRX is ready to runtime suspend if there are no pending packets
3939 * in the tx queue.
3940 *
3941 * Return: QDF_STATUS
3942 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003943static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003944{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003945 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003946
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003947 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003948 return QDF_STATUS_E_BUSY;
3949 else
3950 return QDF_STATUS_SUCCESS;
3951}
3952
3953/**
3954 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3955 * @txrx_pdev: TXRX pdev context
3956 *
3957 * This is a dummy function for symmetry.
3958 *
3959 * Return: QDF_STATUS_SUCCESS
3960 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003961static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003962{
3963 return QDF_STATUS_SUCCESS;
3964}
3965#endif
3966
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003967/**
3968 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003969 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003970 *
3971 * Ensure that ol_txrx is ready for bus suspend
3972 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303973 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003974 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003975static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003976{
3977 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3978}
3979
3980/**
3981 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003982 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003983 *
3984 * Dummy function for symetry
3985 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303986 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003987 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003988static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003989{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303990 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003991}
3992
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003993/**
3994 * ol_txrx_get_tx_pending - Get the number of pending transmit
3995 * frames that are awaiting completion.
3996 *
3997 * @pdev - the data physical device object
3998 * Mainly used in clean up path to make sure all buffers have been freed
3999 *
4000 * Return: count of pending frames
4001 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004002int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004003{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004004 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004005 uint32_t total;
4006
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304007 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
4008 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
4009 else
4010 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004011
Nirav Shah55b45a02016-01-21 10:00:16 +05304012 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004013}
4014
4015void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
4016{
4017 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07004018 /*
4019 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05304020 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07004021 * which is tha same with normal data send complete path
4022 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004023 htt_tx_pending_discard(pdev_handle->htt_pdev);
4024
4025 TAILQ_INIT(&tx_descs);
4026 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
4027 /* Discard Frames in Discard List */
4028 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
4029
4030 ol_tx_discard_target_frms(pdev_handle);
4031}
4032
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004033static inline
4034uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
4035{
4036 return (uint64_t) ((size_t) req);
4037}
4038
4039static inline
4040struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
4041{
4042 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
4043}
4044
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004045#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004046void
4047ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
4048 uint8_t cfg_stats_type, uint32_t cfg_val)
4049{
4050 uint64_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07004051
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004052 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
4053 0 /* reset mask */,
4054 cfg_stats_type, cfg_val, dummy_cookie);
4055}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004056#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004057
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004058static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004059ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07004060 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004061{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004062 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004063 struct ol_txrx_pdev_t *pdev = vdev->pdev;
4064 uint64_t cookie;
4065 struct ol_txrx_stats_req_internal *non_volatile_req;
4066
4067 if (!pdev ||
4068 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
4069 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
4070 return A_ERROR;
4071 }
4072
4073 /*
4074 * Allocate a non-transient stats request object.
4075 * (The one provided as an argument is likely allocated on the stack.)
4076 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304077 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004078 if (!non_volatile_req)
4079 return A_NO_MEMORY;
4080
4081 /* copy the caller's specifications */
4082 non_volatile_req->base = *req;
4083 non_volatile_req->serviced = 0;
4084 non_volatile_req->offset = 0;
4085
4086 /* use the non-volatile request object's address as the cookie */
4087 cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
4088
tfyu9fcabd72017-09-26 17:46:48 +08004089 if (response_expected) {
4090 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4091 TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
4092 pdev->req_list_depth++;
4093 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4094 }
4095
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004096 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
4097 req->stats_type_upload_mask,
4098 req->stats_type_reset_mask,
4099 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
4100 cookie)) {
tfyu9fcabd72017-09-26 17:46:48 +08004101 if (response_expected) {
4102 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4103 TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem);
4104 pdev->req_list_depth--;
4105 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4106 }
4107
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304108 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004109 return A_ERROR;
4110 }
4111
Nirav Shahd2310422016-01-21 18:58:06 +05304112 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304113 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05304114
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004115 return A_OK;
4116}
Dhanashri Atre12a08392016-02-17 13:10:34 -08004117
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004118void
4119ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
4120 uint64_t cookie, uint8_t *stats_info_list)
4121{
4122 enum htt_dbg_stats_type type;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004123 enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004124 enum htt_dbg_stats_status status;
4125 int length;
4126 uint8_t *stats_data;
tfyu9fcabd72017-09-26 17:46:48 +08004127 struct ol_txrx_stats_req_internal *req, *tmp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004128 int more = 0;
tfyu9fcabd72017-09-26 17:46:48 +08004129 int found = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004130
4131 req = ol_txrx_u64_to_stats_ptr(cookie);
4132
tfyu9fcabd72017-09-26 17:46:48 +08004133 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4134 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4135 if (req == tmp) {
4136 found = 1;
4137 break;
4138 }
4139 }
4140 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4141
4142 if (!found) {
4143 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05304144 "req(%pK) from firmware can't be found in the list\n", req);
tfyu9fcabd72017-09-26 17:46:48 +08004145 return;
4146 }
4147
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004148 do {
4149 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
4150 &length, &stats_data);
4151 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
4152 break;
4153 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
4154 status == HTT_DBG_STATS_STATUS_PARTIAL) {
4155 uint8_t *buf;
4156 int bytes = 0;
4157
4158 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
4159 more = 1;
4160 if (req->base.print.verbose || req->base.print.concise)
4161 /* provide the header along with the data */
4162 htt_t2h_stats_print(stats_info_list,
4163 req->base.print.concise);
4164
4165 switch (type) {
4166 case HTT_DBG_STATS_WAL_PDEV_TXRX:
4167 bytes = sizeof(struct wlan_dbg_stats);
4168 if (req->base.copy.buf) {
4169 int lmt;
4170
4171 lmt = sizeof(struct wlan_dbg_stats);
4172 if (req->base.copy.byte_limit < lmt)
4173 lmt = req->base.copy.byte_limit;
4174 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304175 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004176 }
4177 break;
4178 case HTT_DBG_STATS_RX_REORDER:
4179 bytes = sizeof(struct rx_reorder_stats);
4180 if (req->base.copy.buf) {
4181 int lmt;
4182
4183 lmt = sizeof(struct rx_reorder_stats);
4184 if (req->base.copy.byte_limit < lmt)
4185 lmt = req->base.copy.byte_limit;
4186 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304187 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004188 }
4189 break;
4190 case HTT_DBG_STATS_RX_RATE_INFO:
4191 bytes = sizeof(wlan_dbg_rx_rate_info_t);
4192 if (req->base.copy.buf) {
4193 int lmt;
4194
4195 lmt = sizeof(wlan_dbg_rx_rate_info_t);
4196 if (req->base.copy.byte_limit < lmt)
4197 lmt = req->base.copy.byte_limit;
4198 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304199 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004200 }
4201 break;
4202
4203 case HTT_DBG_STATS_TX_RATE_INFO:
4204 bytes = sizeof(wlan_dbg_tx_rate_info_t);
4205 if (req->base.copy.buf) {
4206 int lmt;
4207
4208 lmt = sizeof(wlan_dbg_tx_rate_info_t);
4209 if (req->base.copy.byte_limit < lmt)
4210 lmt = req->base.copy.byte_limit;
4211 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304212 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004213 }
4214 break;
4215
4216 case HTT_DBG_STATS_TX_PPDU_LOG:
4217 bytes = 0;
4218 /* TO DO: specify how many bytes are present */
4219 /* TO DO: add copying to the requestor's buf */
4220
4221 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07004222 bytes = sizeof(struct
4223 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004224 if (req->base.copy.buf) {
4225 int limit;
4226
Yun Parkeaea8632017-04-09 09:53:45 -07004227 limit = sizeof(struct
4228 rx_remote_buffer_mgmt_stats);
4229 if (req->base.copy.byte_limit < limit)
4230 limit = req->base.copy.
4231 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004232 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304233 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004234 }
4235 break;
4236
4237 case HTT_DBG_STATS_TXBF_INFO:
4238 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
4239 if (req->base.copy.buf) {
4240 int limit;
4241
Yun Parkeaea8632017-04-09 09:53:45 -07004242 limit = sizeof(struct
4243 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004244 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004245 limit = req->base.copy.
4246 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004247 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304248 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004249 }
4250 break;
4251
4252 case HTT_DBG_STATS_SND_INFO:
4253 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
4254 if (req->base.copy.buf) {
4255 int limit;
4256
Yun Parkeaea8632017-04-09 09:53:45 -07004257 limit = sizeof(struct
4258 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004259 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004260 limit = req->base.copy.
4261 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004262 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304263 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004264 }
4265 break;
4266
4267 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07004268 bytes = sizeof(struct
4269 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004270 if (req->base.copy.buf) {
4271 int limit;
4272
Yun Parkeaea8632017-04-09 09:53:45 -07004273 limit = sizeof(struct
4274 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004275 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004276 limit = req->base.copy.
4277 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004278 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304279 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004280 }
4281 break;
4282
4283 case HTT_DBG_STATS_ERROR_INFO:
4284 bytes =
4285 sizeof(struct wlan_dbg_wifi2_error_stats);
4286 if (req->base.copy.buf) {
4287 int limit;
4288
Yun Parkeaea8632017-04-09 09:53:45 -07004289 limit = sizeof(struct
4290 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004291 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004292 limit = req->base.copy.
4293 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004294 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304295 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004296 }
4297 break;
4298
4299 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
4300 bytes =
4301 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
4302 if (req->base.copy.buf) {
4303 int limit;
4304
4305 limit = sizeof(struct
4306 rx_txbf_musu_ndpa_pkts_stats);
4307 if (req->base.copy.byte_limit < limit)
4308 limit =
4309 req->base.copy.byte_limit;
4310 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304311 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004312 }
4313 break;
4314
4315 default:
4316 break;
4317 }
Yun Parkeaea8632017-04-09 09:53:45 -07004318 buf = req->base.copy.buf ?
4319 req->base.copy.buf : stats_data;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004320
4321 /* Not implemented for MCL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004322 if (req->base.callback.fp)
4323 req->base.callback.fp(req->base.callback.ctxt,
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004324 cmn_type, buf, bytes);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004325 }
4326 stats_info_list += length;
4327 } while (1);
4328
4329 if (!more) {
tfyu9fcabd72017-09-26 17:46:48 +08004330 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4331 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4332 if (req == tmp) {
4333 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
4334 pdev->req_list_depth--;
4335 qdf_mem_free(req);
4336 break;
4337 }
4338 }
4339 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004340 }
4341}
4342
4343#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
4344int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
4345{
4346 if (debug_specs & TXRX_DBG_MASK_OBJS) {
4347#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4348 ol_txrx_pdev_display(vdev->pdev, 0);
4349#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304350 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304351 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004352#endif
4353 }
Yun Parkeaea8632017-04-09 09:53:45 -07004354 if (debug_specs & TXRX_DBG_MASK_STATS)
Mohit Khannaca4173b2017-09-12 21:52:19 -07004355 ol_txrx_stats_display(vdev->pdev,
4356 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004357 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
4358#if defined(ENABLE_TXRX_PROT_ANALYZE)
4359 ol_txrx_prot_ans_display(vdev->pdev);
4360#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304361 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304362 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004363#endif
4364 }
4365 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
4366#if defined(ENABLE_RX_REORDER_TRACE)
4367 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4368#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304369 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304370 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004371#endif
4372
4373 }
4374 return 0;
4375}
4376#endif
4377
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004378#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004379int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4380 int max_subfrms_ampdu, int max_subfrms_amsdu)
4381{
4382 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4383 max_subfrms_ampdu, max_subfrms_amsdu);
4384}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004385#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004386
4387#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4388void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4389{
4390 struct ol_txrx_vdev_t *vdev;
4391
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304392 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004393 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304394 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004395 "%*spdev object: %pK", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304396 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004397 "%*svdev list:", indent + 4, " ");
4398 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304399 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004400 }
4401 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304402 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004403 "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004404 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304405 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004406 htt_display(pdev->htt_pdev, indent);
4407}
4408
4409void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4410{
4411 struct ol_txrx_peer_t *peer;
4412
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304413 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004414 "%*stxrx vdev: %pK\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304415 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004416 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304417 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004418 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4419 indent + 4, " ",
4420 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4421 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4422 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304423 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004424 "%*speer list:", indent + 4, " ");
4425 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304426 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004427 }
4428}
4429
4430void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4431{
4432 int i;
4433
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304434 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004435 "%*stxrx peer: %pK", indent, " ", peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004436 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4437 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304438 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004439 "%*sID: %d", indent + 4, " ",
4440 peer->peer_ids[i]);
4441 }
4442 }
4443}
4444#endif /* TXRX_DEBUG_LEVEL */
4445
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004446/**
4447 * ol_txrx_stats() - update ol layer stats
4448 * @vdev_id: vdev_id
4449 * @buffer: pointer to buffer
4450 * @buf_len: length of the buffer
4451 *
4452 * Return: length of string
4453 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004454static int
Yun Parkeaea8632017-04-09 09:53:45 -07004455ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004456{
4457 uint32_t len = 0;
4458
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004459 struct ol_txrx_vdev_t *vdev =
4460 (struct ol_txrx_vdev_t *)
4461 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004462
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004463 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304464 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304465 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004466 snprintf(buffer, buf_len, "vdev not found");
4467 return len;
4468 }
4469
4470 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004471 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304472 ((vdev->ll_pause.is_q_paused == false) ?
4473 "UNPAUSED" : "PAUSED"),
4474 vdev->ll_pause.q_pause_cnt,
4475 vdev->ll_pause.q_unpause_cnt,
4476 vdev->ll_pause.q_overflow_cnt,
4477 ((vdev->ll_pause.is_q_timer_on == false)
4478 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004479 return len;
4480}
4481
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004482#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4483/**
4484 * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4485 * @peer: peer pointer
4486 *
4487 * Return: None
4488 */
4489static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4490{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004491 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4492 "cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4493 peer->bufq_info.curr,
4494 peer->bufq_info.dropped,
4495 peer->bufq_info.high_water_mark,
4496 peer->bufq_info.qdepth_no_thresh,
4497 peer->bufq_info.thresh);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004498}
4499
4500/**
4501 * ol_txrx_disp_peer_stats() - display peer stats
4502 * @pdev: pdev pointer
4503 *
4504 * Return: None
4505 */
4506static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4507{ int i;
4508 struct ol_txrx_peer_t *peer;
4509 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
4510
4511 if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4512 return;
4513
4514 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4515 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4516 peer = pdev->local_peer_ids.map[i];
Frank Liu4362e462018-01-16 11:51:55 +08004517 if (peer) {
4518 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khannab7bec722017-11-10 11:43:44 -08004519 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +08004520 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
4521 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004522 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4523
4524 if (peer) {
4525 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004526 "stats: peer 0x%pK local peer id %d", peer, i);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004527 ol_txrx_disp_peer_cached_bufq_stats(peer);
Mohit Khannab7bec722017-11-10 11:43:44 -08004528 ol_txrx_peer_release_ref(peer,
4529 PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004530 }
4531 }
4532}
4533#else
4534static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4535{
4536 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004537 "peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004538}
4539#endif
4540
Mohit Khannaca4173b2017-09-12 21:52:19 -07004541void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4542 enum qdf_stats_verbosity_level level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004543{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004544 u64 tx_dropped =
4545 pdev->stats.pub.tx.dropped.download_fail.pkts
4546 + pdev->stats.pub.tx.dropped.target_discard.pkts
4547 + pdev->stats.pub.tx.dropped.no_ack.pkts
4548 + pdev->stats.pub.tx.dropped.others.pkts;
4549
4550 if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
4551 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4552 "STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
4553 pdev->tx_desc.num_free,
4554 pdev->tx_desc.pool_size,
4555 pdev->stats.pub.tx.from_stack.pkts,
4556 pdev->stats.pub.tx.tso.tso_pkts.pkts,
4557 pdev->stats.pub.tx.delivered.pkts,
4558 htt_tx_status_download_fail,
4559 pdev->stats.pub.tx.dropped.download_fail.pkts,
4560 htt_tx_status_discard,
4561 pdev->stats.pub.tx.dropped.target_discard.pkts,
4562 htt_tx_status_no_ack,
4563 pdev->stats.pub.tx.dropped.no_ack.pkts,
4564 pdev->stats.pub.tx.dropped.others.pkts,
4565 pdev->stats.pub.tx.dropped.host_reject.pkts,
4566 pdev->stats.pub.rx.delivered.pkts,
4567 pdev->stats.pub.rx.dropped_err.pkts,
4568 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4569 pdev->stats.pub.rx.dropped_mic_err.pkts,
4570 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4571 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4572 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
4573 return;
4574 }
4575
4576 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304577 "TX PATH Statistics:");
Mohit Khannaca4173b2017-09-12 21:52:19 -07004578 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304579 "sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4580 pdev->stats.pub.tx.from_stack.pkts,
4581 pdev->stats.pub.tx.from_stack.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004582 pdev->stats.pub.tx.dropped.host_reject.pkts,
4583 pdev->stats.pub.tx.dropped.host_reject.bytes,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004584 tx_dropped,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004585 pdev->stats.pub.tx.dropped.download_fail.bytes
4586 + pdev->stats.pub.tx.dropped.target_discard.bytes
4587 + pdev->stats.pub.tx.dropped.no_ack.bytes);
Mohit Khannaca4173b2017-09-12 21:52:19 -07004588 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4589 "successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B) others: %lld (%lld B)",
Nirav Shahda008342016-05-17 18:50:40 +05304590 pdev->stats.pub.tx.delivered.pkts,
4591 pdev->stats.pub.tx.delivered.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004592 pdev->stats.pub.tx.dropped.download_fail.pkts,
4593 pdev->stats.pub.tx.dropped.download_fail.bytes,
4594 pdev->stats.pub.tx.dropped.target_discard.pkts,
4595 pdev->stats.pub.tx.dropped.target_discard.bytes,
4596 pdev->stats.pub.tx.dropped.no_ack.pkts,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004597 pdev->stats.pub.tx.dropped.no_ack.bytes,
4598 pdev->stats.pub.tx.dropped.others.pkts,
4599 pdev->stats.pub.tx.dropped.others.bytes);
4600 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304601 "Tx completions per HTT message:\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004602 "Single Packet %d\n"
4603 " 2-10 Packets %d\n"
4604 "11-20 Packets %d\n"
4605 "21-30 Packets %d\n"
4606 "31-40 Packets %d\n"
4607 "41-50 Packets %d\n"
4608 "51-60 Packets %d\n"
4609 " 60+ Packets %d\n",
4610 pdev->stats.pub.tx.comp_histogram.pkts_1,
4611 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4612 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4613 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4614 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4615 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4616 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4617 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304618
Mohit Khannaca4173b2017-09-12 21:52:19 -07004619 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304620 "RX PATH Statistics:");
Mohit Khannaca4173b2017-09-12 21:52:19 -07004621 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304622 "%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
Nirav Shahda008342016-05-17 18:50:40 +05304623 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4624 "msdus with frag_ind: %d msdus with offload_ind: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004625 pdev->stats.priv.rx.normal.ppdus,
4626 pdev->stats.priv.rx.normal.mpdus,
4627 pdev->stats.pub.rx.delivered.pkts,
4628 pdev->stats.pub.rx.delivered.bytes,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304629 pdev->stats.pub.rx.dropped_err.pkts,
4630 pdev->stats.pub.rx.dropped_err.bytes,
4631 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4632 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4633 pdev->stats.pub.rx.dropped_mic_err.pkts,
Nirav Shahda008342016-05-17 18:50:40 +05304634 pdev->stats.pub.rx.dropped_mic_err.bytes,
4635 pdev->stats.pub.rx.msdus_with_frag_ind,
4636 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004637
Mohit Khannaca4173b2017-09-12 21:52:19 -07004638 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004639 " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4640 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4641 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4642 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304643
Mohit Khannaca4173b2017-09-12 21:52:19 -07004644 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304645 "Rx packets per HTT message:\n"
Nirav Shah6a4eee62016-04-25 10:15:04 +05304646 "Single Packet %d\n"
4647 " 2-10 Packets %d\n"
4648 "11-20 Packets %d\n"
4649 "21-30 Packets %d\n"
4650 "31-40 Packets %d\n"
4651 "41-50 Packets %d\n"
4652 "51-60 Packets %d\n"
4653 " 60+ Packets %d\n",
4654 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4655 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4656 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4657 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4658 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4659 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4660 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4661 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004662
4663 ol_txrx_disp_peer_stats(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004664}
4665
4666void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4667{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304668 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004669}
4670
4671#if defined(ENABLE_TXRX_PROT_ANALYZE)
4672
4673void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4674{
4675 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4676 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4677}
4678
4679#endif /* ENABLE_TXRX_PROT_ANALYZE */
4680
4681#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4682int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4683{
4684 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4685 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4686}
4687#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4688
4689#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4690A_STATUS
4691ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4692 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4693{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304694 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304695 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304696 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304697 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004698 return A_OK;
4699}
4700#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4701
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004702static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004703{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004704 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004705
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004706 if (NULL == vdev)
4707 return;
4708
4709 vdev->disable_intrabss_fwd = val;
4710}
4711
Nirav Shahc657ef52016-07-26 14:22:38 +05304712/**
4713 * ol_txrx_update_mac_id() - update mac_id for vdev
4714 * @vdev_id: vdev id
4715 * @mac_id: mac id
4716 *
4717 * Return: none
4718 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004719static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304720{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004721 struct ol_txrx_vdev_t *vdev =
4722 (struct ol_txrx_vdev_t *)
4723 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304724
4725 if (NULL == vdev) {
4726 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4727 "%s: Invalid vdev_id %d", __func__, vdev_id);
4728 return;
4729 }
4730 vdev->mac_id = mac_id;
4731}
4732
Alok Kumar75355aa2018-03-19 17:32:58 +05304733/**
4734 * ol_txrx_get_tx_ack_count() - get tx ack count
4735 * @vdev_id: vdev_id
4736 *
4737 * Return: tx ack count
4738 */
4739static uint32_t ol_txrx_get_tx_ack_stats(uint8_t vdev_id)
4740{
4741 struct ol_txrx_vdev_t *vdev =
4742 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
4743 if (!vdev) {
4744 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4745 "%s: Invalid vdev_id %d", __func__, vdev_id);
4746 return 0;
4747 }
4748 return vdev->txrx_stats.txack_success;
4749}
4750
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004751#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
4752
4753/**
4754 * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
4755 * @sta_id: sta_id
4756 *
4757 * Return: vdev handle
4758 * NULL if not found.
4759 */
4760static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
4761{
4762 struct ol_txrx_peer_t *peer = NULL;
4763 ol_txrx_pdev_handle pdev = NULL;
4764
4765 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304766 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304767 "Invalid sta id passed");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004768 return NULL;
4769 }
4770
Anurag Chouhan6d760662016-02-20 16:05:43 +05304771 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004772 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304773 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304774 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004775 return NULL;
4776 }
4777
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004778 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004779
4780 if (!peer) {
Zhu Jianminf7ffe942017-08-24 10:24:15 +08004781 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304782 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004783 return NULL;
4784 }
4785
4786 return peer->vdev;
4787}
4788
4789/**
4790 * ol_txrx_register_tx_flow_control() - register tx flow control callback
4791 * @vdev_id: vdev_id
4792 * @flowControl: flow control callback
4793 * @osif_fc_ctx: callback context
bings284f8be2017-08-11 10:41:30 +08004794 * @flow_control_is_pause: is vdev paused by flow control
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004795 *
Jeff Johnson5ead5ab2018-05-06 00:11:08 -07004796 * Return: 0 for success or error code
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004797 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004798static int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
bings284f8be2017-08-11 10:41:30 +08004799 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
4800 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004801{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004802 struct ol_txrx_vdev_t *vdev =
4803 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004804
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004805 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304806 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304807 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004808 return -EINVAL;
4809 }
4810
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304811 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004812 vdev->osif_flow_control_cb = flowControl;
bings284f8be2017-08-11 10:41:30 +08004813 vdev->osif_flow_control_is_pause = flow_control_is_pause;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004814 vdev->osif_fc_ctx = osif_fc_ctx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304815 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004816 return 0;
4817}
4818
4819/**
Yun Parkeaea8632017-04-09 09:53:45 -07004820 * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
4821 * callback
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004822 * @vdev_id: vdev_id
4823 *
4824 * Return: 0 for success or error code
4825 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004826static int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004827{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004828 struct ol_txrx_vdev_t *vdev =
4829 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004830
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004831 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304832 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304833 "%s: Invalid vdev_id", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004834 return -EINVAL;
4835 }
4836
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304837 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004838 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08004839 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004840 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304841 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004842 return 0;
4843}
4844
4845/**
4846 * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
4847 * @sta_id: sta id
4848 * @low_watermark: low watermark
4849 * @high_watermark_offset: high watermark offset value
4850 *
4851 * Return: true/false
4852 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004853static bool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004854ol_txrx_get_tx_resource(uint8_t sta_id,
4855 unsigned int low_watermark,
4856 unsigned int high_watermark_offset)
4857{
4858 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004859
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004860 if (NULL == vdev) {
Zhu Jianminf7ffe942017-08-24 10:24:15 +08004861 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304862 "%s: Invalid sta_id %d", __func__, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004863 /* Return true so caller do not understand that resource
4864 * is less than low_watermark.
4865 * sta_id validation will be done in ol_tx_send_data_frame
4866 * and if sta_id is not registered then host will drop
4867 * packet.
4868 */
4869 return true;
4870 }
4871
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304872 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304873
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004874 if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
4875 vdev->tx_fl_lwm = (uint16_t) low_watermark;
4876 vdev->tx_fl_hwm =
4877 (uint16_t) (low_watermark + high_watermark_offset);
4878 /* Not enough free resource, stop TX OS Q */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05304879 qdf_atomic_set(&vdev->os_q_paused, 1);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304880 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004881 return false;
4882 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304883 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004884 return true;
4885}
4886
4887/**
4888 * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
4889 * @vdev_id: vdev id
4890 * @pause_q_depth: pause queue depth
4891 *
4892 * Return: 0 for success or error code
4893 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004894static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004895ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
4896{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004897 struct ol_txrx_vdev_t *vdev =
4898 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004899
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004900 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304901 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304902 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004903 return -EINVAL;
4904 }
4905
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304906 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004907 vdev->ll_pause.max_q_depth = pause_q_depth;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304908 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004909
4910 return 0;
4911}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004912#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
4913
Leo Chang8e073612015-11-13 10:55:34 -08004914/**
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004915 * ol_txrx_display_stats() - Display OL TXRX display stats
4916 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304917 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004918 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304919 */
Mohit Khannaca4173b2017-09-12 21:52:19 -07004920static QDF_STATUS
4921ol_txrx_display_stats(void *soc, uint16_t value,
4922 enum qdf_stats_verbosity_level verb_level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004923{
4924 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004925 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004926
Anurag Chouhan6d760662016-02-20 16:05:43 +05304927 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004928 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304929 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304930 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004931 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004932 }
4933
4934 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004935 case CDP_TXRX_PATH_STATS:
Mohit Khannaca4173b2017-09-12 21:52:19 -07004936 ol_txrx_stats_display(pdev, verb_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004937 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004938 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004939 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004940 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004941 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07004942 ol_tx_dump_flow_pool_info((void *)pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004943 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004944 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304945 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004946 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004947 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4948 htt_display_rx_buf_debug(pdev->htt_pdev);
4949 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304950#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004951 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304952 ol_tx_sched_cur_state_display(pdev);
4953 ol_tx_sched_stats_display(pdev);
4954 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004955 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304956 ol_tx_queue_log_display(pdev);
4957 break;
4958#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004959 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304960 ol_tx_dump_group_credit_stats(pdev);
4961 break;
4962#endif
4963
4964#ifdef DEBUG_HL_LOGGING
Nirav Shaheb017be2018-02-15 11:20:58 +05304965 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304966 htt_dump_bundle_stats(pdev->htt_pdev);
4967 break;
4968#endif
4969#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004970 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004971 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004972 break;
4973 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004974 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004975}
4976
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004977/**
4978 * ol_txrx_clear_stats() - Clear OL TXRX stats
4979 * @value: Module id for which stats needs to be cleared
4980 *
4981 * Return: None
4982 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004983static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004984{
4985 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004986 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004987
Anurag Chouhan6d760662016-02-20 16:05:43 +05304988 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004989 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304990 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304991 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004992 return;
4993 }
4994
4995 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004996 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004997 ol_txrx_stats_clear(pdev);
4998 break;
Yun Park1027e8c2017-10-13 15:17:37 -07004999 case CDP_TXRX_TSO_STATS:
5000 ol_txrx_tso_stats_clear(pdev);
5001 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005002 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005003 ol_tx_clear_flow_pool_stats();
5004 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005005 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05305006 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005007 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305008#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005009 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305010 ol_tx_sched_stats_clear(pdev);
5011 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005012 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305013 ol_tx_queue_log_clear(pdev);
5014 break;
5015#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005016 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305017 ol_tx_clear_group_credit_stats(pdev);
5018 break;
5019#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005020 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305021 htt_clear_bundle_stats(pdev->htt_pdev);
5022 break;
5023#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005024 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07005025 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005026 break;
5027 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07005028
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005029}
5030
5031/**
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005032 * ol_txrx_drop_nbuf_list() - drop an nbuf list
5033 * @buf_list: buffer list to be dropepd
5034 *
5035 * Return: int (number of bufs dropped)
5036 */
5037static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
5038{
5039 int num_dropped = 0;
5040 qdf_nbuf_t buf, next_buf;
5041 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5042
5043 buf = buf_list;
5044 while (buf) {
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05305045 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005046 next_buf = qdf_nbuf_queue_next(buf);
5047 if (pdev)
5048 TXRX_STATS_MSDU_INCR(pdev,
5049 rx.dropped_peer_invalid, buf);
5050 qdf_nbuf_free(buf);
5051 buf = next_buf;
5052 num_dropped++;
5053 }
5054 return num_dropped;
5055}
5056
5057/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005058 * ol_rx_data_cb() - data rx callback
5059 * @peer: peer
5060 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05305061 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005062 *
5063 * Return: None
5064 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05305065static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
5066 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005067{
Mohit Khanna0696eef2016-04-14 16:14:08 -07005068 void *osif_dev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005069 uint8_t drop_count = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05305070 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305071 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08005072 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05305073 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005074
Jeff Johnsondac9e382017-09-24 10:36:08 -07005075 if (qdf_unlikely(!pdev))
Nirav Shah36a87bf2016-02-22 12:38:46 +05305076 goto free_buf;
5077
5078 /* Do not use peer directly. Derive peer from staid to
5079 * make sure that peer is valid.
5080 */
Jingxiang Ge3badb982018-01-02 17:39:01 +08005081 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
5082 staid, PEER_DEBUG_ID_OL_RX_THREAD);
Nirav Shah36a87bf2016-02-22 12:38:46 +05305083 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005084 goto free_buf;
5085
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305086 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07005087 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
5088 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305089 qdf_spin_unlock_bh(&peer->peer_info_lock);
Jingxiang Ge9f297062018-01-24 13:31:31 +08005090 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005091 goto free_buf;
5092 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08005093
5094 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07005095 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305096 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005097
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005098 qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
5099 if (!list_empty(&peer->bufq_info.cached_bufq)) {
5100 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005101 /* Flush the cached frames to HDD before passing new rx frame */
5102 ol_txrx_flush_rx_frames(peer, 0);
5103 } else
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005104 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005105
Jingxiang Ge3badb982018-01-02 17:39:01 +08005106 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
5107
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005108 buf = buf_list;
5109 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05305110 next_buf = qdf_nbuf_queue_next(buf);
5111 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07005112 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305113 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305114 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05305115 if (pdev)
5116 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05305117 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005118 }
5119 buf = next_buf;
5120 }
5121 return;
5122
5123free_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005124 drop_count = ol_txrx_drop_nbuf_list(buf_list);
5125 ol_txrx_warn("%s:Dropped frames %u", __func__, drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005126}
5127
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005128/* print for every 16th packet */
5129#define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
5130struct ol_rx_cached_buf *cache_buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305131
5132/** helper function to drop packets
5133 * Note: caller must hold the cached buq lock before invoking
5134 * this function. Also, it assumes that the pointers passed in
5135 * are valid (non-NULL)
5136 */
5137static inline void ol_txrx_drop_frames(
5138 struct ol_txrx_cached_bufq_t *bufqi,
5139 qdf_nbuf_t rx_buf_list)
5140{
5141 uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005142
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305143 bufqi->dropped += dropped;
5144 bufqi->qdepth_no_thresh += dropped;
5145
5146 if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
5147 bufqi->high_water_mark = bufqi->qdepth_no_thresh;
5148}
5149
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005150static QDF_STATUS ol_txrx_enqueue_rx_frames(
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305151 struct ol_txrx_peer_t *peer,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005152 struct ol_txrx_cached_bufq_t *bufqi,
5153 qdf_nbuf_t rx_buf_list)
5154{
5155 struct ol_rx_cached_buf *cache_buf;
5156 qdf_nbuf_t buf, next_buf;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005157 static uint32_t count;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005158
5159 if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
5160 ol_txrx_info_high(
5161 "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
5162 bufqi->curr, bufqi->dropped);
5163
5164 qdf_spin_lock_bh(&bufqi->bufq_lock);
5165 if (bufqi->curr >= bufqi->thresh) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305166 ol_txrx_drop_frames(bufqi, rx_buf_list);
5167 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5168 return QDF_STATUS_E_FAULT;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005169 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005170 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5171
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005172 buf = rx_buf_list;
5173 while (buf) {
5174 next_buf = qdf_nbuf_queue_next(buf);
5175 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
5176 if (!cache_buf) {
5177 ol_txrx_err(
5178 "Failed to allocate buf to cache the rx frames");
5179 qdf_nbuf_free(buf);
5180 } else {
5181 /* Add NULL terminator */
5182 qdf_nbuf_set_next(buf, NULL);
5183 cache_buf->buf = buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305184 if (peer && peer->valid) {
5185 qdf_spin_lock_bh(&bufqi->bufq_lock);
5186 list_add_tail(&cache_buf->list,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005187 &bufqi->cached_bufq);
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305188 bufqi->curr++;
5189 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5190 } else {
5191 qdf_mem_free(cache_buf);
5192 rx_buf_list = buf;
5193 qdf_nbuf_set_next(rx_buf_list, next_buf);
5194 qdf_spin_lock_bh(&bufqi->bufq_lock);
5195 ol_txrx_drop_frames(bufqi, rx_buf_list);
5196 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5197 return QDF_STATUS_E_FAULT;
5198 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005199 }
5200 buf = next_buf;
5201 }
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305202 return QDF_STATUS_SUCCESS;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005203}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005204/**
5205 * ol_rx_data_process() - process rx frame
5206 * @peer: peer
5207 * @rx_buf_list: rx buffer list
5208 *
5209 * Return: None
5210 */
5211void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05305212 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005213{
Yun Parkeaea8632017-04-09 09:53:45 -07005214 /*
5215 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005216 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07005217 * IPA kernel module API should not be called on SIRQ CTXT
5218 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08005219 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05305220 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005221 uint8_t drop_count;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005222
5223 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305224 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005225 goto drop_rx_buf;
5226 }
5227
Dhanashri Atre182b0272016-02-17 15:35:07 -08005228 qdf_assert(peer->vdev);
5229
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305230 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005231 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08005232 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305233 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005234
5235 /*
5236 * If there is a data frame from peer before the peer is
5237 * registered for data service, enqueue them on to pending queue
5238 * which will be flushed to HDD once that station is registered.
5239 */
5240 if (!data_rx) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305241 if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
5242 rx_buf_list)
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005243 != QDF_STATUS_SUCCESS)
Poddar, Siddarth07eebf32017-04-19 12:40:26 +05305244 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5245 "%s: failed to enqueue rx frm to cached_bufq",
5246 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005247 } else {
5248#ifdef QCA_CONFIG_SMP
5249 /*
5250 * If the kernel is SMP, schedule rx thread to
5251 * better use multicores.
5252 */
5253 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05305254 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005255 } else {
5256 p_cds_sched_context sched_ctx =
5257 get_cds_sched_ctxt();
5258 struct cds_ol_rx_pkt *pkt;
5259
5260 if (unlikely(!sched_ctx))
5261 goto drop_rx_buf;
5262
5263 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5264 if (!pkt) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05305265 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305266 "No available Rx message buffer");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005267 goto drop_rx_buf;
5268 }
5269 pkt->callback = (cds_ol_rx_thread_cb)
5270 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05305271 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005272 pkt->Rxpkt = (void *)rx_buf_list;
5273 pkt->staId = peer->local_id;
5274 cds_indicate_rxpkt(sched_ctx, pkt);
5275 }
5276#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05305277 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005278#endif /* QCA_CONFIG_SMP */
5279 }
5280
5281 return;
5282
5283drop_rx_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005284 drop_count = ol_txrx_drop_nbuf_list(rx_buf_list);
5285 ol_txrx_info_high("Dropped rx packets %u", drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005286}
5287
5288/**
5289 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005290 * @sta_desc: sta descriptor
5291 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05305292 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005293 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005294static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005295{
5296 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05305297 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005298 union ol_txrx_peer_update_param_t param;
5299 struct privacy_exemption privacy_filter;
5300
5301 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305302 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305303 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005304 }
5305
5306 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305307 ol_txrx_err("Invalid sta id :%d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005308 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305309 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005310 }
5311
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005312 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
5313 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005314 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305315 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005316
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305317 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005318 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305319 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005320
5321 param.qos_capable = sta_desc->is_qos_enabled;
5322 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
5323 ol_txrx_peer_update_qos_capable);
5324
5325 if (sta_desc->is_wapi_supported) {
5326 /*Privacy filter to accept unencrypted WAI frames */
5327 privacy_filter.ether_type = ETHERTYPE_WAI;
5328 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
5329 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
5330 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
5331 }
5332
5333 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305334 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005335}
5336
5337/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005338 * ol_txrx_register_ocb_peer - Function to register the OCB peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005339 * @mac_addr: MAC address of the self peer
5340 * @peer_id: Pointer to the peer ID
5341 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305342 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005343 */
Jeff Johnson382bce02017-09-01 14:21:07 -07005344static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005345 uint8_t *peer_id)
5346{
5347 ol_txrx_pdev_handle pdev;
5348 ol_txrx_peer_handle peer;
5349
Anurag Chouhan6d760662016-02-20 16:05:43 +05305350 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005351 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305352 ol_txrx_err("%s: Unable to find pdev!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005353 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305354 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005355 }
5356
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005357 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
5358 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005359 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305360 ol_txrx_err("%s: Unable to find OCB peer!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005361 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305362 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005363 }
5364
5365 ol_txrx_set_ocb_peer(pdev, peer);
5366
5367 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005368 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005369 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005370
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305371 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005372}
5373
5374/**
5375 * ol_txrx_set_ocb_peer - Function to store the OCB peer
5376 * @pdev: Handle to the HTT instance
5377 * @peer: Pointer to the peer
5378 */
5379void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
5380 struct ol_txrx_peer_t *peer)
5381{
5382 if (pdev == NULL)
5383 return;
5384
5385 pdev->ocb_peer = peer;
5386 pdev->ocb_peer_valid = (NULL != peer);
5387}
5388
5389/**
5390 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
5391 * @pdev: Handle to the HTT instance
5392 * @peer: Pointer to the returned peer
5393 *
5394 * Return: true if the peer is valid, false if not
5395 */
5396bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
5397 struct ol_txrx_peer_t **peer)
5398{
5399 int rc;
5400
5401 if ((pdev == NULL) || (peer == NULL)) {
5402 rc = false;
5403 goto exit;
5404 }
5405
5406 if (pdev->ocb_peer_valid) {
5407 *peer = pdev->ocb_peer;
5408 rc = true;
5409 } else {
5410 rc = false;
5411 }
5412
5413exit:
5414 return rc;
5415}
5416
5417#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5418/**
5419 * ol_txrx_register_pause_cb() - register pause callback
5420 * @pause_cb: pause callback
5421 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05305422 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005423 */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005424static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
5425 tx_pause_callback pause_cb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005426{
Anurag Chouhan6d760662016-02-20 16:05:43 +05305427 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -07005428
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005429 if (!pdev || !pause_cb) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305430 ol_txrx_err("pdev or pause_cb is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305431 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005432 }
5433 pdev->pause_cb = pause_cb;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305434 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005435}
5436#endif
5437
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005438#ifdef RECEIVE_OFFLOAD
5439/**
5440 * ol_txrx_offld_flush_handler() - offld flush handler
5441 * @context: dev handle
5442 * @rxpkt: rx data
5443 * @staid: station id
5444 *
5445 * This function handles an offld flush indication.
5446 * If the rx thread is enabled, it will be invoked by the rx
5447 * thread else it will be called in the tasklet context
5448 *
5449 * Return: none
5450 */
5451static void ol_txrx_offld_flush_handler(void *context,
5452 void *rxpkt,
5453 uint16_t staid)
5454{
5455 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5456
5457 if (qdf_unlikely(!pdev)) {
5458 ol_txrx_err("Invalid context");
5459 qdf_assert(0);
5460 return;
5461 }
5462
5463 if (pdev->offld_flush_cb)
5464 pdev->offld_flush_cb(context);
5465 else
5466 ol_txrx_err("offld_flush_cb NULL");
5467}
5468
5469/**
5470 * ol_txrx_offld_flush() - offld flush callback
5471 * @data: opaque data pointer
5472 *
5473 * This is the callback registered with CE to trigger
5474 * an offld flush
5475 *
5476 * Return: none
5477 */
5478static void ol_txrx_offld_flush(void *data)
5479{
5480 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
5481 struct cds_ol_rx_pkt *pkt;
5482 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5483
5484 if (qdf_unlikely(!sched_ctx))
5485 return;
5486
5487 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
5488 ol_txrx_offld_flush_handler(data, NULL, 0);
5489 } else {
5490 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5491 if (qdf_unlikely(!pkt)) {
5492 ol_txrx_err("Not able to allocate context");
5493 return;
5494 }
5495
5496 pkt->callback = ol_txrx_offld_flush_handler;
5497 pkt->context = data;
5498 pkt->Rxpkt = NULL;
5499 pkt->staId = 0;
5500 cds_indicate_rxpkt(sched_ctx, pkt);
5501 }
5502}
5503
5504/**
5505 * ol_register_offld_flush_cb() - register the offld flush callback
5506 * @offld_flush_cb: flush callback function
5507 * @offld_init_cb: Allocate and initialize offld data structure.
5508 *
5509 * Store the offld flush callback provided and in turn
5510 * register OL's offld flush handler with CE
5511 *
5512 * Return: none
5513 */
5514static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
5515{
5516 struct hif_opaque_softc *hif_device;
5517 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5518
5519 if (pdev == NULL) {
5520 ol_txrx_err("pdev NULL!");
5521 TXRX_ASSERT2(0);
5522 goto out;
5523 }
5524 if (pdev->offld_flush_cb != NULL) {
5525 ol_txrx_info("offld already initialised");
5526 if (pdev->offld_flush_cb != offld_flush_cb) {
5527 ol_txrx_err(
5528 "offld_flush_cb is differ to previously registered callback")
5529 TXRX_ASSERT2(0);
5530 goto out;
5531 }
5532 goto out;
5533 }
5534 pdev->offld_flush_cb = offld_flush_cb;
5535 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5536
5537 if (qdf_unlikely(hif_device == NULL)) {
5538 ol_txrx_err("hif_device NULL!");
5539 qdf_assert(0);
5540 goto out;
5541 }
5542
5543 hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
5544
5545out:
5546 return;
5547}
5548
5549/**
5550 * ol_deregister_offld_flush_cb() - deregister the offld flush callback
5551 *
5552 * Remove the offld flush callback provided and in turn
5553 * deregister OL's offld flush handler with CE
5554 *
5555 * Return: none
5556 */
5557static void ol_deregister_offld_flush_cb(void)
5558{
5559 struct hif_opaque_softc *hif_device;
5560 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5561
5562 if (pdev == NULL) {
5563 ol_txrx_err("pdev NULL!");
5564 return;
5565 }
5566 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5567
5568 if (qdf_unlikely(hif_device == NULL)) {
5569 ol_txrx_err("hif_device NULL!");
5570 qdf_assert(0);
5571 return;
5572 }
5573
5574 hif_offld_flush_cb_deregister(hif_device);
5575
5576 pdev->offld_flush_cb = NULL;
5577}
5578#endif /* RECEIVE_OFFLOAD */
5579
Poddar, Siddarth34872782017-08-10 14:08:51 +05305580/**
5581 * ol_register_data_stall_detect_cb() - register data stall callback
5582 * @data_stall_detect_callback: data stall callback function
5583 *
5584 *
5585 * Return: QDF_STATUS Enumeration
5586 */
5587static QDF_STATUS ol_register_data_stall_detect_cb(
5588 data_stall_detect_cb data_stall_detect_callback)
5589{
5590 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5591
5592 if (pdev == NULL) {
5593 ol_txrx_err("%s: pdev NULL!", __func__);
5594 return QDF_STATUS_E_INVAL;
5595 }
5596 pdev->data_stall_detect_callback = data_stall_detect_callback;
5597 return QDF_STATUS_SUCCESS;
5598}
5599
5600/**
5601 * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5602 * @data_stall_detect_callback: data stall callback function
5603 *
5604 *
5605 * Return: QDF_STATUS Enumeration
5606 */
5607static QDF_STATUS ol_deregister_data_stall_detect_cb(
5608 data_stall_detect_cb data_stall_detect_callback)
5609{
5610 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5611
5612 if (pdev == NULL) {
5613 ol_txrx_err("%s: pdev NULL!", __func__);
5614 return QDF_STATUS_E_INVAL;
5615 }
5616 pdev->data_stall_detect_callback = NULL;
5617 return QDF_STATUS_SUCCESS;
5618}
5619
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305620/**
5621 * ol_txrx_post_data_stall_event() - post data stall event
5622 * @indicator: Module triggering data stall
5623 * @data_stall_type: data stall event type
5624 * @pdev_id: pdev id
5625 * @vdev_id_bitmap: vdev id bitmap
5626 * @recovery_type: data stall recovery type
5627 *
5628 * Return: None
5629 */
5630static void ol_txrx_post_data_stall_event(
5631 enum data_stall_log_event_indicator indicator,
5632 enum data_stall_log_event_type data_stall_type,
5633 uint32_t pdev_id, uint32_t vdev_id_bitmap,
5634 enum data_stall_log_recovery_type recovery_type)
5635{
5636 struct scheduler_msg msg = {0};
5637 QDF_STATUS status;
5638 struct data_stall_event_info *data_stall_info;
5639 ol_txrx_pdev_handle pdev;
5640
5641 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5642 if (!pdev) {
5643 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5644 "%s: pdev is NULL.", __func__);
5645 return;
5646 }
5647 data_stall_info = qdf_mem_malloc(sizeof(*data_stall_info));
5648 if (!data_stall_info) {
5649 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5650 "%s: data_stall_info is NULL.", __func__);
5651 return;
5652 }
5653 data_stall_info->indicator = indicator;
5654 data_stall_info->data_stall_type = data_stall_type;
5655 data_stall_info->vdev_id_bitmap = vdev_id_bitmap;
5656 data_stall_info->pdev_id = pdev_id;
5657 data_stall_info->recovery_type = recovery_type;
5658
Poddar, Siddarthb9047592017-10-05 15:48:28 +05305659 if (data_stall_info->data_stall_type ==
5660 DATA_STALL_LOG_FW_RX_REFILL_FAILED)
5661 htt_log_rx_ring_info(pdev->htt_pdev);
5662
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305663 sys_build_message_header(SYS_MSG_ID_DATA_STALL_MSG, &msg);
5664 /* Save callback and data */
5665 msg.callback = pdev->data_stall_detect_callback;
5666 msg.bodyptr = data_stall_info;
5667 msg.bodyval = 0;
5668
5669 status = scheduler_post_msg(QDF_MODULE_ID_SYS, &msg);
5670
5671 if (status != QDF_STATUS_SUCCESS) {
5672 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5673 "%s: failed to post data stall msg to SYS", __func__);
5674 qdf_mem_free(data_stall_info);
5675 }
5676}
5677
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305678void
5679ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5680{
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07005681 qdf_print("%s: Pkt: VA 0x%pK PA 0x%llx len %d\n", __func__,
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305682 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5683 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5684 qdf_nbuf_data(nbuf), len, true);
5685}
5686
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305687#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5688bool
5689ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5690{
Yun Park63661012018-01-04 15:04:22 -08005691 struct ol_tx_flow_pool_t *pool;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305692 bool enough_desc_flag;
5693
5694 if (!vdev)
Yun Parkff5da562017-01-18 14:44:20 -08005695 return false;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305696
5697 pool = vdev->pool;
5698
Yun Parkff5da562017-01-18 14:44:20 -08005699 if (!pool)
5700 return false;
5701
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305702 qdf_spin_lock_bh(&pool->flow_pool_lock);
5703 enough_desc_flag = (pool->avail_desc < (pool->stop_th +
Yun Parkff5da562017-01-18 14:44:20 -08005704 OL_TX_NON_FWD_RESERVE))
5705 ? false : true;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305706 qdf_spin_unlock_bh(&pool->flow_pool_lock);
5707 return enough_desc_flag;
5708}
5709#else
5710bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5711{
5712 return true;
5713}
5714#endif
5715
Dhanashri Atre12a08392016-02-17 13:10:34 -08005716/**
5717 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5718 * @vdev_id: vdev_id
5719 *
5720 * Return: vdev handle
5721 * NULL if not found.
5722 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005723struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005724{
5725 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5726 ol_txrx_vdev_handle vdev = NULL;
5727
5728 if (qdf_unlikely(!pdev))
5729 return NULL;
5730
5731 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5732 if (vdev->vdev_id == vdev_id)
5733 break;
5734 }
5735
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005736 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005737}
Nirav Shah2e583a02016-04-30 14:06:12 +05305738
5739/**
5740 * ol_txrx_set_wisa_mode() - set wisa mode
5741 * @vdev: vdev handle
5742 * @enable: enable flag
5743 *
5744 * Return: QDF STATUS
5745 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005746static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305747{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005748 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005749
Nirav Shah2e583a02016-04-30 14:06:12 +05305750 if (!vdev)
5751 return QDF_STATUS_E_INVAL;
5752
5753 vdev->is_wisa_mode_enable = enable;
5754 return QDF_STATUS_SUCCESS;
5755}
Leo Chang98726762016-10-28 11:07:18 -07005756
5757/**
5758 * ol_txrx_get_vdev_id() - get interface id from interface context
5759 * @pvdev: vdev handle
5760 *
5761 * Return: virtual interface id
5762 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005763static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005764{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005765 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005766
Leo Chang98726762016-10-28 11:07:18 -07005767 return vdev->vdev_id;
5768}
5769
5770/**
5771 * ol_txrx_last_assoc_received() - get time of last assoc received
5772 * @ppeer: peer handle
5773 *
5774 * Return: pointer of the time of last assoc received
5775 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005776static qdf_time_t *ol_txrx_last_assoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005777{
5778 ol_txrx_peer_handle peer = ppeer;
5779
5780 return &peer->last_assoc_rcvd;
5781}
5782
5783/**
5784 * ol_txrx_last_disassoc_received() - get time of last disassoc received
5785 * @ppeer: peer handle
5786 *
5787 * Return: pointer of the time of last disassoc received
5788 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005789static qdf_time_t *ol_txrx_last_disassoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005790{
5791 ol_txrx_peer_handle peer = ppeer;
5792
5793 return &peer->last_disassoc_rcvd;
5794}
5795
5796/**
5797 * ol_txrx_last_deauth_received() - get time of last deauth received
5798 * @ppeer: peer handle
5799 *
5800 * Return: pointer of the time of last deauth received
5801 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005802static qdf_time_t *ol_txrx_last_deauth_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005803{
5804 ol_txrx_peer_handle peer = ppeer;
5805
5806 return &peer->last_deauth_rcvd;
5807}
5808
5809/**
5810 * ol_txrx_soc_attach_target() - attach soc target
5811 * @soc: soc handle
5812 *
5813 * MCL legacy OL do nothing here
5814 *
5815 * Return: 0
5816 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005817static int ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005818{
5819 /* MCL legacy OL do nothing here */
5820 return 0;
5821}
5822
5823/**
5824 * ol_txrx_soc_detach() - detach soc target
5825 * @soc: soc handle
5826 *
5827 * MCL legacy OL do nothing here
5828 *
5829 * Return: noe
5830 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005831static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005832{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005833 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005834}
5835
5836/**
5837 * ol_txrx_pkt_log_con_service() - connect packet log service
5838 * @ppdev: physical device handle
5839 * @scn: device context
5840 *
5841 * Return: noe
5842 */
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305843#ifdef REMOVE_PKT_LOG
5844static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
5845{
5846}
5847#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005848static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005849{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005850 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005851
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005852 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005853 pktlog_htc_attach();
5854}
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305855#endif
Leo Chang98726762016-10-28 11:07:18 -07005856
5857/* OL wrapper functions for CDP abstraction */
5858/**
5859 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5860 * @peer: peer handle
5861 * @drop: rx packets drop or deliver
5862 *
5863 * Return: none
5864 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005865static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005866{
5867 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5868}
5869
5870/**
5871 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5872 * @ppdev: pdev handle
5873 * @vdev_id: interface id
5874 *
5875 * Return: virtual interface instance
5876 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005877static
5878struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5879 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005880{
5881 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5882}
5883
5884/**
5885 * ol_txrx_wrapper_register_peer() - register peer
5886 * @pdev: pdev handle
5887 * @sta_desc: peer description
5888 *
5889 * Return: QDF STATUS
5890 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005891static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005892 struct ol_txrx_desc_type *sta_desc)
5893{
5894 return ol_txrx_register_peer(sta_desc);
5895}
5896
5897/**
5898 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5899 * @pdev - the data physical device object
5900 * @local_peer_id - the ID txrx assigned locally to the peer in question
5901 *
5902 * The control SW typically uses the txrx peer handle to refer to the peer.
5903 * In unusual circumstances, if it is infeasible for the control SW maintain
5904 * the txrx peer handle but it can maintain a small integer local peer ID,
5905 * this function allows the peer handled to be retrieved, based on the local
5906 * peer ID.
5907 *
5908 * @return handle to the txrx peer object
5909 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005910static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005911ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5912 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005913{
5914 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005915 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005916}
5917
5918/**
5919 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5920 * @pdev: pdev handle
5921 *
5922 * Return: 1 high latency bus
5923 * 0 low latency bus
5924 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005925static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005926{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005927 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005928}
5929
5930/**
5931 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5932 * @data_peer - which peer has changed its state
5933 * @state - the new state of the peer
5934 *
5935 * Specify the peer's authentication state (none, connected, authenticated)
5936 * to allow the data SW to determine whether to filter out invalid data frames.
5937 * (In the "connected" state, where security is enabled, but authentication
5938 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5939 * be discarded.)
5940 * This function is only relevant for systems in which the tx and rx filtering
5941 * are done in the host rather than in the target.
5942 *
5943 * Return: QDF Status
5944 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005945static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005946 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5947{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005948 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005949 peer_mac, state);
5950}
5951
5952/**
5953 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5954 * @pdev: pdev handle
Jeff Johnson37df7c32018-05-10 12:30:35 -07005955 * @peer_addr: peer address want to find
Leo Chang98726762016-10-28 11:07:18 -07005956 * @peer_id: peer id
5957 *
5958 * Return: peer instance pointer
5959 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005960static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005961 uint8_t *peer_addr, uint8_t *peer_id)
5962{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005963 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005964 peer_addr, peer_id);
5965}
5966
5967/**
Mohit Khannab7bec722017-11-10 11:43:44 -08005968 * ol_txrx_wrapper_peer_get_ref_by_addr() - get peer reference by address
5969 * @pdev: pdev handle
5970 * @peer_addr: peer address we want to find
5971 * @peer_id: peer id
5972 * @debug_id: peer debug id for tracking
5973 *
5974 * Return: peer instance pointer
5975 */
5976static void *
5977ol_txrx_wrapper_peer_get_ref_by_addr(struct cdp_pdev *pdev,
5978 u8 *peer_addr, uint8_t *peer_id,
5979 enum peer_debug_id_type debug_id)
5980{
5981 return ol_txrx_peer_get_ref_by_addr((ol_txrx_pdev_handle)pdev,
5982 peer_addr, peer_id, debug_id);
5983}
5984
5985/**
5986 * ol_txrx_wrapper_peer_release_ref() - release peer reference
5987 * @peer: peer handle
5988 * @debug_id: peer debug id for tracking
5989 *
5990 * Release peer ref acquired by peer get ref api
5991 *
5992 * Return: void
5993 */
5994static void ol_txrx_wrapper_peer_release_ref(void *peer,
5995 enum peer_debug_id_type debug_id)
5996{
5997 ol_txrx_peer_release_ref(peer, debug_id);
5998}
5999
6000/**
Leo Chang98726762016-10-28 11:07:18 -07006001 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
6002 * @cfg_ctx: cfg context
6003 * @cfg_param: cfg parameters
6004 *
6005 * Return: none
6006 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08006007static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08006008ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
6009 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07006010{
6011 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08006012 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07006013 (struct txrx_pdev_cfg_param_t *)cfg_param);
6014}
6015
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08006016#ifdef WDI_EVENT_ENABLE
6017void *ol_get_pldev(struct cdp_pdev *txrx_pdev)
6018{
6019 struct ol_txrx_pdev_t *pdev =
6020 (struct ol_txrx_pdev_t *)txrx_pdev;
6021 if (pdev != NULL)
6022 return pdev->pl_dev;
6023
6024 return NULL;
6025}
6026#endif
6027
Leo Chang98726762016-10-28 11:07:18 -07006028static struct cdp_cmn_ops ol_ops_cmn = {
6029 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
6030 .txrx_vdev_attach = ol_txrx_vdev_attach,
6031 .txrx_vdev_detach = ol_txrx_vdev_detach,
6032 .txrx_pdev_attach = ol_txrx_pdev_attach,
6033 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
6034 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05306035 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07006036 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08006037 .txrx_peer_create = ol_txrx_peer_attach,
6038 .txrx_peer_setup = NULL,
6039 .txrx_peer_teardown = NULL,
6040 .txrx_peer_delete = ol_txrx_peer_detach,
Leo Chang98726762016-10-28 11:07:18 -07006041 .txrx_vdev_register = ol_txrx_vdev_register,
6042 .txrx_soc_detach = ol_txrx_soc_detach,
6043 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
6044 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
6045 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08006046 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07006047 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
6048 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
6049 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07006050 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07006051 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
6052 .display_stats = ol_txrx_display_stats,
Leo Chang98726762016-10-28 11:07:18 -07006053 /* TODO: Add other functions */
6054};
6055
6056static struct cdp_misc_ops ol_ops_misc = {
6057 .set_ibss_vdev_heart_beat_timer =
6058 ol_txrx_set_ibss_vdev_heart_beat_timer,
6059#ifdef CONFIG_HL_SUPPORT
6060 .set_wmm_param = ol_txrx_set_wmm_param,
6061#endif /* CONFIG_HL_SUPPORT */
6062 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
6063 .bad_peer_txctl_update_threshold =
6064 ol_txrx_bad_peer_txctl_update_threshold,
6065 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
6066 .tx_non_std = ol_tx_non_std,
6067 .get_vdev_id = ol_txrx_get_vdev_id,
Alok Kumar75355aa2018-03-19 17:32:58 +05306068 .get_tx_ack_stats = ol_txrx_get_tx_ack_stats,
Leo Chang98726762016-10-28 11:07:18 -07006069 .set_wisa_mode = ol_txrx_set_wisa_mode,
Poddar, Siddarth34872782017-08-10 14:08:51 +05306070 .txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
6071 .txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
Poddar, Siddarthdb568162017-07-27 18:16:38 +05306072 .txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
Leo Chang98726762016-10-28 11:07:18 -07006073#ifdef FEATURE_RUNTIME_PM
6074 .runtime_suspend = ol_txrx_runtime_suspend,
6075 .runtime_resume = ol_txrx_runtime_resume,
6076#endif /* FEATURE_RUNTIME_PM */
6077 .get_opmode = ol_txrx_get_opmode,
6078 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
6079 .update_mac_id = ol_txrx_update_mac_id,
6080 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
6081 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
6082 .pkt_log_init = htt_pkt_log_init,
6083 .pkt_log_con_service = ol_txrx_pkt_log_con_service
6084};
6085
6086static struct cdp_flowctl_ops ol_ops_flowctl = {
6087#ifdef QCA_LL_TX_FLOW_CONTROL_V2
6088 .register_pause_cb = ol_txrx_register_pause_cb,
6089 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07006090 .dump_flow_pool_info = ol_tx_dump_flow_pool_info,
Leo Chang98726762016-10-28 11:07:18 -07006091#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
6092};
6093
6094static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
6095#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
6096 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
6097 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
6098 .flow_control_cb = ol_txrx_flow_control_cb,
6099 .get_tx_resource = ol_txrx_get_tx_resource,
6100 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
6101 .vdev_flush = ol_txrx_vdev_flush,
6102 .vdev_pause = ol_txrx_vdev_pause,
6103 .vdev_unpause = ol_txrx_vdev_unpause
6104#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
6105};
6106
Leo Chang98726762016-10-28 11:07:18 -07006107#ifdef IPA_OFFLOAD
Yun Parkb4f591d2017-03-29 15:51:01 -07006108static struct cdp_ipa_ops ol_ops_ipa = {
Leo Chang98726762016-10-28 11:07:18 -07006109 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
6110 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
6111 .ipa_set_active = ol_txrx_ipa_uc_set_active,
6112 .ipa_op_response = ol_txrx_ipa_uc_op_response,
6113 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
6114 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
6115 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07006116 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
Yun Parkb4f591d2017-03-29 15:51:01 -07006117 .ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
6118 .ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
6119 .ipa_setup = ol_txrx_ipa_setup,
6120 .ipa_cleanup = ol_txrx_ipa_cleanup,
6121 .ipa_setup_iface = ol_txrx_ipa_setup_iface,
6122 .ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
6123 .ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
6124 .ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
6125 .ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
6126#ifdef FEATURE_METERING
Yun Park637d6482016-10-05 10:51:33 -07006127 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
6128 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Yun Parkb4f591d2017-03-29 15:51:01 -07006129#endif
Leo Chang98726762016-10-28 11:07:18 -07006130};
Yun Parkb4f591d2017-03-29 15:51:01 -07006131#endif
Leo Chang98726762016-10-28 11:07:18 -07006132
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07006133#ifdef RECEIVE_OFFLOAD
6134static struct cdp_rx_offld_ops ol_rx_offld_ops = {
6135 .register_rx_offld_flush_cb = ol_register_offld_flush_cb,
6136 .deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
6137};
6138#endif
6139
Leo Chang98726762016-10-28 11:07:18 -07006140static struct cdp_bus_ops ol_ops_bus = {
6141 .bus_suspend = ol_txrx_bus_suspend,
6142 .bus_resume = ol_txrx_bus_resume
6143};
6144
6145static struct cdp_ocb_ops ol_ops_ocb = {
6146 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
6147 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
6148};
6149
6150static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08006151#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07006152 .throttle_init_period = ol_tx_throttle_init_period,
6153 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08006154#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07006155};
6156
6157static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07006158 .clear_stats = ol_txrx_clear_stats,
6159 .stats = ol_txrx_stats
6160};
6161
6162static struct cdp_cfg_ops ol_ops_cfg = {
6163 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
6164 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
6165 .cfg_attach = ol_pdev_cfg_attach,
6166 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
6167 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
6168 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
6169 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
6170 .set_flow_control_parameters =
6171 ol_txrx_wrapper_set_flow_control_parameters,
6172 .set_flow_steering = ol_set_cfg_flow_steering,
Yu Wang66a250b2017-07-19 11:46:40 +08006173 .set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
Leo Chang98726762016-10-28 11:07:18 -07006174};
6175
6176static struct cdp_peer_ops ol_ops_peer = {
6177 .register_peer = ol_txrx_wrapper_register_peer,
6178 .clear_peer = ol_txrx_clear_peer,
Mohit Khannab7bec722017-11-10 11:43:44 -08006179 .peer_get_ref_by_addr = ol_txrx_wrapper_peer_get_ref_by_addr,
6180 .peer_release_ref = ol_txrx_wrapper_peer_release_ref,
Leo Chang98726762016-10-28 11:07:18 -07006181 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
6182 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
6183 .local_peer_id = ol_txrx_local_peer_id,
6184 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
6185 .peer_state_update = ol_txrx_wrapper_peer_state_update,
6186 .get_vdevid = ol_txrx_get_vdevid,
6187 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
6188 .register_ocb_peer = ol_txrx_register_ocb_peer,
6189 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
6190 .get_peer_state = ol_txrx_get_peer_state,
6191 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
6192 .update_ibss_add_peer_num_of_vdev =
6193 ol_txrx_update_ibss_add_peer_num_of_vdev,
6194 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
6195 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08006196#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07006197 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
6198 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08006199 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
6200 .update_last_real_peer = ol_txrx_update_last_real_peer,
6201#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07006202 .last_assoc_received = ol_txrx_last_assoc_received,
6203 .last_disassoc_received = ol_txrx_last_disassoc_received,
6204 .last_deauth_received = ol_txrx_last_deauth_received,
Leo Chang98726762016-10-28 11:07:18 -07006205 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
6206};
6207
6208static struct cdp_tx_delay_ops ol_ops_delay = {
6209#ifdef QCA_COMPUTE_TX_DELAY
6210 .tx_delay = ol_tx_delay,
6211 .tx_delay_hist = ol_tx_delay_hist,
6212 .tx_packet_count = ol_tx_packet_count,
6213 .tx_set_compute_interval = ol_tx_set_compute_interval
6214#endif /* QCA_COMPUTE_TX_DELAY */
6215};
6216
6217static struct cdp_pmf_ops ol_ops_pmf = {
6218 .get_pn_info = ol_txrx_get_pn_info
6219};
6220
Leo Chang98726762016-10-28 11:07:18 -07006221static struct cdp_ctrl_ops ol_ops_ctrl = {
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05306222 .txrx_get_pldev = ol_get_pldev,
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08006223 .txrx_wdi_event_sub = wdi_event_sub,
6224 .txrx_wdi_event_unsub = wdi_event_unsub,
Leo Chang98726762016-10-28 11:07:18 -07006225};
6226
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05306227/* WINplatform specific structures */
Leo Chang98726762016-10-28 11:07:18 -07006228static struct cdp_me_ops ol_ops_me = {
6229 /* EMPTY FOR MCL */
6230};
6231
6232static struct cdp_mon_ops ol_ops_mon = {
6233 /* EMPTY FOR MCL */
6234};
6235
6236static struct cdp_host_stats_ops ol_ops_host_stats = {
6237 /* EMPTY FOR MCL */
6238};
6239
6240static struct cdp_wds_ops ol_ops_wds = {
6241 /* EMPTY FOR MCL */
6242};
6243
6244static struct cdp_raw_ops ol_ops_raw = {
6245 /* EMPTY FOR MCL */
6246};
6247
6248static struct cdp_ops ol_txrx_ops = {
6249 .cmn_drv_ops = &ol_ops_cmn,
6250 .ctrl_ops = &ol_ops_ctrl,
6251 .me_ops = &ol_ops_me,
6252 .mon_ops = &ol_ops_mon,
6253 .host_stats_ops = &ol_ops_host_stats,
6254 .wds_ops = &ol_ops_wds,
6255 .raw_ops = &ol_ops_raw,
6256 .misc_ops = &ol_ops_misc,
6257 .cfg_ops = &ol_ops_cfg,
6258 .flowctl_ops = &ol_ops_flowctl,
6259 .l_flowctl_ops = &ol_ops_l_flowctl,
Yun Parkb4f591d2017-03-29 15:51:01 -07006260#ifdef IPA_OFFLOAD
Leo Chang98726762016-10-28 11:07:18 -07006261 .ipa_ops = &ol_ops_ipa,
Yun Parkb4f591d2017-03-29 15:51:01 -07006262#endif
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07006263#ifdef RECEIVE_OFFLOAD
6264 .rx_offld_ops = &ol_rx_offld_ops,
6265#endif
Leo Chang98726762016-10-28 11:07:18 -07006266 .bus_ops = &ol_ops_bus,
6267 .ocb_ops = &ol_ops_ocb,
6268 .peer_ops = &ol_ops_peer,
6269 .throttle_ops = &ol_ops_throttle,
6270 .mob_stats_ops = &ol_ops_mob_stats,
6271 .delay_ops = &ol_ops_delay,
6272 .pmf_ops = &ol_ops_pmf
6273};
6274
Jeff Johnson02c37b42017-01-10 14:49:24 -08006275/*
6276 * Local prototype added to temporarily address warning caused by
6277 * -Wmissing-prototypes. A more correct solution, namely to expose
6278 * a prototype in an appropriate header file, will come later.
6279 */
6280struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
6281 struct ol_if_ops *dp_ol_if_ops);
6282struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
6283 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07006284{
6285 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07006286
Leo Chang98726762016-10-28 11:07:18 -07006287 if (!soc) {
6288 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6289 "%s: OL SOC memory allocation failed\n", __func__);
6290 return NULL;
6291 }
6292
6293 soc->ops = &ol_txrx_ops;
6294 return soc;
6295}
6296
6297