blob: bfca9321b6eb045a80919c6b61901081d46bdf12 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
wadesong9f2b1102017-12-20 22:58:35 +08002 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== includes ===*/
20/* header files for OS primitives */
21#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053023#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053024#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053025#include <qdf_atomic.h> /* qdf_atomic_read */
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053026#include <qdf_debugfs.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080027
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053028#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080029/* Required for WLAN_FEATURE_FASTPATH */
30#include <ce_api.h>
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053031#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032/* header files for utilities */
33#include <cds_queue.h> /* TAILQ */
34
35/* header files for configuration API */
36#include <ol_cfg.h> /* ol_cfg_is_high_latency */
37#include <ol_if_athvar.h>
38
39/* header files for HTT API */
40#include <ol_htt_api.h>
41#include <ol_htt_tx_api.h>
42
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080043/* header files for our own APIs */
44#include <ol_txrx_api.h>
45#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070046#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070047#include <ol_txrx_ctrl_api.h>
48#include <cdp_txrx_stats.h>
49#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080050/* header files for our internal definitions */
51#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
52#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080053#include <ol_tx.h> /* ol_tx_ll */
54#include <ol_rx.h> /* ol_rx_deliver */
55#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
56#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
57#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
58#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
59#include <ol_rx_reorder.h>
60#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
61#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
62#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053063#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080064#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070065#include <ol_txrx_types.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080066#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070067#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080068#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070069#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080070#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053071#include "hif.h"
wadesong9e95bd92017-04-14 14:28:40 +080072#include "hif_main.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070073#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053074#ifndef REMOVE_PKT_LOG
75#include "pktlog_ac.h"
76#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070077#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053078#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053079#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080080#include <cdp_txrx_handle.h>
Poddar, Siddarthdb568162017-07-27 18:16:38 +053081#include "wlan_qct_sys.h"
82
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080083#include <htt_internal.h>
Yun Parkb4f591d2017-03-29 15:51:01 -070084#include <ol_txrx_ipa.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070085#include "wlan_roam_debug.h"
Yun Parkb4f591d2017-03-29 15:51:01 -070086
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053087#define DPT_DEBUGFS_PERMS (QDF_FILE_USR_READ | \
88 QDF_FILE_USR_WRITE | \
89 QDF_FILE_GRP_READ | \
90 QDF_FILE_OTH_READ)
91
Rakshith Suresh Patkar83871f72018-05-08 16:31:57 +053092#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Leo Chang98726762016-10-28 11:07:18 -070093ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080094ol_txrx_peer_find_by_local_id(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070095 uint8_t local_peer_id);
Jingxiang Ge3badb982018-01-02 17:39:01 +080096ol_txrx_peer_handle
97ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
98 uint8_t local_peer_id,
99 enum peer_debug_id_type dbg_id);
Leo Chang98726762016-10-28 11:07:18 -0700100#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800101QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -0700102 uint8_t *peer_mac,
103 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800104static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
105 bool val);
106int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700107extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800108ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700109 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700110
Leo Chang98726762016-10-28 11:07:18 -0700111extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
112 uint64_t **last_pn, uint32_t **rmf_pn_replays);
113
Mohit Khanna78cb6bb2017-03-31 17:05:14 -0700114/* thresh for peer's cached buf queue beyond which the elements are dropped */
115#define OL_TXRX_CACHED_BUFQ_THRESH 128
116
Yu Wang053d3e72017-02-08 18:48:24 +0800117#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530118
119/**
120 * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
121 * @vdev: the data virtual device
122 * @bss_addr: bss address
123 *
124 * Return: None
125 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800126static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800127ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530128{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800129 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700130
Frank Liu4362e462018-01-16 11:51:55 +0800131 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530132 if (bss_addr && vdev->last_real_peer &&
Ankit Guptaa5076012016-09-14 11:32:19 -0700133 !qdf_mem_cmp((u8 *)bss_addr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530134 vdev->last_real_peer->mac_addr.raw,
Ankit Guptaa5076012016-09-14 11:32:19 -0700135 IEEE80211_ADDR_LEN))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530136 qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
137 vdev->last_real_peer->mac_addr.raw,
138 OL_TXRX_MAC_ADDR_LEN);
Frank Liu4362e462018-01-16 11:51:55 +0800139 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530140}
141
142/**
143 * ol_txrx_add_last_real_peer() - add last peer
144 * @pdev: the data physical device
145 * @vdev: virtual device
146 * @peer_id: peer id
147 *
148 * Return: None
149 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800150static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800151ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
152 struct cdp_vdev *pvdev, uint8_t *peer_id)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530153{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800154 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
155 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530156 ol_txrx_peer_handle peer;
Yun Parkeaea8632017-04-09 09:53:45 -0700157
Frank Liu4362e462018-01-16 11:51:55 +0800158 peer = ol_txrx_find_peer_by_addr(
159 (struct cdp_pdev *)pdev,
160 vdev->hl_tdls_ap_mac_addr.raw,
161 peer_id);
162
163 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
164 if (!vdev->last_real_peer && peer &&
165 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
166 vdev->last_real_peer = peer;
167 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530168}
169
170/**
171 * is_vdev_restore_last_peer() - check for vdev last peer
172 * @peer: peer object
173 *
174 * Return: true if last peer is not null
175 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800176static bool
Leo Chang98726762016-10-28 11:07:18 -0700177is_vdev_restore_last_peer(void *ppeer)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530178{
Leo Chang98726762016-10-28 11:07:18 -0700179 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530180 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700181
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530182 vdev = peer->vdev;
183 return vdev->last_real_peer && (vdev->last_real_peer == peer);
184}
185
186/**
187 * ol_txrx_update_last_real_peer() - check for vdev last peer
188 * @pdev: the data physical device
189 * @peer: peer device
190 * @peer_id: peer id
191 * @restore_last_peer: restore last peer flag
192 *
193 * Return: None
194 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800195static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800196ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530197 uint8_t *peer_id, bool restore_last_peer)
198{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800199 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -0700200 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530201 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700202
Frank Liu4362e462018-01-16 11:51:55 +0800203 if (!restore_last_peer)
204 return;
205
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530206 vdev = peer->vdev;
Frank Liu4362e462018-01-16 11:51:55 +0800207 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530208 vdev->hl_tdls_ap_mac_addr.raw, peer_id);
Frank Liu4362e462018-01-16 11:51:55 +0800209
210 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
211 if (!vdev->last_real_peer && peer &&
212 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
213 vdev->last_real_peer = peer;
214 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530215}
216#endif
217
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530218/**
219 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
220 * fw is compatible for marking first packet after wow wakeup
221 * @value: 1 for enabled/ 0 for disabled
222 *
223 * Return: None
224 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800225static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530226{
227 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
228
229 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530230 ol_txrx_err(
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530231 "%s: pdev is NULL\n", __func__);
232 return;
233 }
234
235 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
236}
237
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530238u_int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800239ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530240{
241 u_int16_t desc_pool_size;
242 u_int16_t steady_state_tx_lifetime_ms;
243 u_int16_t safety_factor;
244
245 /*
246 * Steady-state tx latency:
247 * roughly 1-2 ms flight time
248 * + roughly 1-2 ms prep time,
249 * + roughly 1-2 ms target->host notification time.
250 * = roughly 6 ms total
251 * Thus, steady state number of frames =
252 * steady state max throughput / frame size * tx latency, e.g.
253 * 1 Gbps / 1500 bytes * 6 ms = 500
254 *
255 */
256 steady_state_tx_lifetime_ms = 6;
257
258 safety_factor = 8;
259
260 desc_pool_size =
261 ol_cfg_max_thruput_mbps(ctrl_pdev) *
262 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
263 (8 * OL_TX_AVG_FRM_BYTES) *
264 steady_state_tx_lifetime_ms *
265 safety_factor;
266
267 /* minimum */
268 if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
269 desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
270
271 /* maximum */
272 if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
273 desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
274
275 return desc_pool_size;
276}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277
278/*=== function definitions ===*/
279
Nirav Shah22bf44d2015-12-10 15:39:48 +0530280/**
281 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
282 * wmi is enabled or not.
283 * @value: 1 for enabled/ 0 for disable
284 *
285 * Return: None
286 */
287void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
288{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530289 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700290
Nirav Shah22bf44d2015-12-10 15:39:48 +0530291 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530292 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530293 return;
294 }
295 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530296}
297
298/**
299 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
300 *
301 * Return: is_mgmt_over_wmi_enabled
302 */
303uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
304{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530305 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700306
Nirav Shah22bf44d2015-12-10 15:39:48 +0530307 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530308 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530309 return 0;
310 }
311 return pdev->is_mgmt_over_wmi_enabled;
312}
313
314
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800316static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800317ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
318 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800319{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800320 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
321 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322 struct ol_txrx_peer_t *peer;
323
324 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
325 if (!peer)
326 return NULL;
327 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800328 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800329 return peer;
330}
331
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800332static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800333{
Leo Chang98726762016-10-28 11:07:18 -0700334 struct ol_txrx_peer_t *peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700335
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800336 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530337 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530338 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530339 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 }
341
342 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530343 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800344}
345
Yun Park0dad1002017-07-14 14:57:01 -0700346static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(struct cdp_pdev *ppdev,
347 uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800348{
Yun Park0dad1002017-07-14 14:57:01 -0700349 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800350 struct ol_txrx_peer_t *peer = NULL;
Yun Park5dd9a122018-01-12 15:00:12 -0800351 ol_txrx_vdev_handle vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800352
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800353 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530354 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530355 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800356 return NULL;
357 }
358
Yun Park5dd9a122018-01-12 15:00:12 -0800359 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev, sta_id,
360 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800361 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530362 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530363 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800364 return NULL;
365 }
366
Yun Park5dd9a122018-01-12 15:00:12 -0800367 vdev = peer->vdev;
368 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
369
370 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800371}
372
Mohit Khannababadb82017-02-21 18:54:19 -0800373/**
374 * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
375 * @ppdev: pointer of type cdp_pdev
376 * @peer_addr: peer mac addr
377 * @peer_id: pointer to fill in the value of peer->local_id for caller
378 *
379 * This function finds a peer with given mac address and returns its peer_id.
380 * Note that this function does not increment the peer->ref_cnt.
381 * This means that the peer may be deleted in some other parallel context after
382 * its been found.
383 *
384 * Return: peer handle if peer is found, NULL if peer is not found.
385 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800386void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Yun Park0dad1002017-07-14 14:57:01 -0700387 uint8_t *peer_addr,
388 uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800389{
390 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800391 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800392
Mohit Khannab7bec722017-11-10 11:43:44 -0800393 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
394 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395 if (!peer)
396 return NULL;
397 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800398 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khannababadb82017-02-21 18:54:19 -0800399 return peer;
400}
401
402/**
Mohit Khannab7bec722017-11-10 11:43:44 -0800403 * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
Mohit Khannababadb82017-02-21 18:54:19 -0800404 * @pdev: pointer of type ol_txrx_pdev_handle
405 * @peer_addr: peer mac addr
406 * @peer_id: pointer to fill in the value of peer->local_id for caller
407 *
408 * This function finds the peer with given mac address and returns its peer_id.
409 * Note that this function increments the peer->ref_cnt.
410 * This makes sure that peer will be valid. This also means the caller needs to
Mohit Khannab7bec722017-11-10 11:43:44 -0800411 * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
Mohit Khannababadb82017-02-21 18:54:19 -0800412 * reference.
413 * Sample usage:
414 * {
415 * //the API call below increments the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800416 * peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800417 *
418 * // Once peer usage is done
419 *
420 * //the API call below decrements the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800421 * ol_txrx_peer_release_ref(peer, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800422 * }
423 *
424 * Return: peer handle if the peer is found, NULL if peer is not found.
425 */
Mohit Khannab7bec722017-11-10 11:43:44 -0800426ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
427 u8 *peer_addr,
428 u8 *peer_id,
429 enum peer_debug_id_type dbg_id)
Mohit Khannababadb82017-02-21 18:54:19 -0800430{
431 struct ol_txrx_peer_t *peer;
432
Mohit Khannab7bec722017-11-10 11:43:44 -0800433 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
434 dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800435 if (!peer)
436 return NULL;
437 *peer_id = peer->local_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800438 return peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800439}
440
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800441static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800442{
Leo Chang98726762016-10-28 11:07:18 -0700443 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700444
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800445 return peer->local_id;
446}
447
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700448/**
449 * @brief Find a txrx peer handle from a peer's local ID
450 * @details
451 * The control SW typically uses the txrx peer handle to refer to the peer.
452 * In unusual circumstances, if it is infeasible for the control SW maintain
453 * the txrx peer handle but it can maintain a small integer local peer ID,
454 * this function allows the peer handled to be retrieved, based on the local
455 * peer ID.
456 *
457 * @param pdev - the data physical device object
458 * @param local_peer_id - the ID txrx assigned locally to the peer in question
459 * @return handle to the txrx peer object
460 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800461ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800462ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800463 uint8_t local_peer_id)
464{
465 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800466 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700467
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800468 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
469 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
470 return NULL;
471 }
472
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530473 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800474 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530475 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800476 return peer;
477}
478
Jingxiang Ge3badb982018-01-02 17:39:01 +0800479/**
480 * @brief Find a txrx peer handle from a peer's local ID
481 * @param pdev - the data physical device object
482 * @param local_peer_id - the ID txrx assigned locally to the peer in question
483 * @dbg_id - debug_id to track caller
484 * @return handle to the txrx peer object
485 * @details
486 * The control SW typically uses the txrx peer handle to refer to the peer.
487 * In unusual circumstances, if it is infeasible for the control SW maintain
488 * the txrx peer handle but it can maintain a small integer local peer ID,
489 * this function allows the peer handled to be retrieved, based on the local
490 * peer ID.
491 *
492 * Note that this function increments the peer->ref_cnt.
493 * This makes sure that peer will be valid. This also means the caller needs to
494 * call the corresponding API -
495 * ol_txrx_peer_release_ref
496 *
497 * reference.
498 * Sample usage:
499 * {
500 * //the API call below increments the peer->ref_cnt
501 * peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
502 *
503 * // Once peer usage is done
504 *
505 * //the API call below decrements the peer->ref_cnt
506 * ol_txrx_peer_release_ref(peer, dbg_id);
507 * }
508 *
509 * Return: peer handle if the peer is found, NULL if peer is not found.
510 */
511ol_txrx_peer_handle
512ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
513 uint8_t local_peer_id,
514 enum peer_debug_id_type dbg_id)
515{
516 struct ol_txrx_peer_t *peer = NULL;
517 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
518
519 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
520 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
521 return NULL;
522 }
523
524 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
525 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
526 peer = pdev->local_peer_ids.map[local_peer_id];
527 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
528 if (peer && peer->valid)
529 ol_txrx_peer_get_ref(peer, dbg_id);
Jingxiang Ge9f297062018-01-24 13:31:31 +0800530 else
531 peer = NULL;
Jingxiang Ge3badb982018-01-02 17:39:01 +0800532 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
533
534 return peer;
535}
536
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
538{
539 int i;
540
541 /* point the freelist to the first ID */
542 pdev->local_peer_ids.freelist = 0;
543
544 /* link each ID to the next one */
545 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
546 pdev->local_peer_ids.pool[i] = i + 1;
547 pdev->local_peer_ids.map[i] = NULL;
548 }
549
550 /* link the last ID to itself, to mark the end of the list */
551 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
552 pdev->local_peer_ids.pool[i] = i;
553
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530554 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800555}
556
557static void
558ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
559 struct ol_txrx_peer_t *peer)
560{
561 int i;
562
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530563 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800564 i = pdev->local_peer_ids.freelist;
565 if (pdev->local_peer_ids.pool[i] == i) {
566 /* the list is empty, except for the list-end marker */
567 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
568 } else {
569 /* take the head ID and advance the freelist */
570 peer->local_id = i;
571 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
572 pdev->local_peer_ids.map[i] = peer;
573 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530574 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800575}
576
577static void
578ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
579 struct ol_txrx_peer_t *peer)
580{
581 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700582
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800583 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
584 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
585 return;
586 }
587 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530588 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800589 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
590 pdev->local_peer_ids.freelist = i;
591 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530592 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800593}
594
595static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
596{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530597 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800598}
599
600#else
601#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
602#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
603#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
604#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
605#endif
606
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530607#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
608
609/**
610 * ol_txrx_update_group_credit() - update group credit for tx queue
611 * @group: for which credit needs to be updated
612 * @credit: credits
613 * @absolute: TXQ group absolute
614 *
615 * Return: allocated pool size
616 */
617void ol_txrx_update_group_credit(
618 struct ol_tx_queue_group_t *group,
619 int32_t credit,
620 u_int8_t absolute)
621{
622 if (absolute)
623 qdf_atomic_set(&group->credit, credit);
624 else
625 qdf_atomic_add(credit, &group->credit);
626}
627
628/**
629 * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
630 * vdev id mask and ac mask is not matching
631 * @pdev: the data physical device
632 * @group_id: TXQ group id
633 * @credit: TXQ group credit count
634 * @absolute: TXQ group absolute
635 * @vdev_id_mask: TXQ vdev group id mask
636 * @ac_mask: TQX access category mask
637 *
638 * Return: None
639 */
640void ol_txrx_update_tx_queue_groups(
641 ol_txrx_pdev_handle pdev,
642 u_int8_t group_id,
643 int32_t credit,
644 u_int8_t absolute,
645 u_int32_t vdev_id_mask,
646 u_int32_t ac_mask
647 )
648{
649 struct ol_tx_queue_group_t *group;
650 u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
651 u_int32_t membership;
652 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700653
Tiger Yu1e553e52018-01-18 16:48:00 +0800654 if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
655 ol_txrx_warn("%s: invalid group_id=%u, ignore update.\n",
656 __func__,
657 group_id);
658 return;
659 }
660
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530661 group = &pdev->txq_grps[group_id];
662
663 membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
664
665 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
666 /*
667 * if the membership (vdev id mask and ac mask)
668 * matches then no need to update tx qeue groups.
669 */
670 if (group->membership == membership)
671 /* Update Credit Only */
672 goto credit_update;
673
674
675 /*
676 * membership (vdev id mask and ac mask) is not matching
677 * TODO: ignoring ac mask for now
678 */
679 group_vdev_id_mask =
680 OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
681
682 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
683 group_vdev_bit_mask =
684 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
685 group_vdev_id_mask, vdev->vdev_id);
686 vdev_bit_mask =
687 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
688 vdev_id_mask, vdev->vdev_id);
689
690 if (group_vdev_bit_mask != vdev_bit_mask) {
691 /*
692 * Change in vdev tx queue group
693 */
694 if (!vdev_bit_mask) {
695 /* Set Group Pointer (vdev and peer) to NULL */
696 ol_tx_set_vdev_group_ptr(
697 pdev, vdev->vdev_id, NULL);
698 } else {
699 /* Set Group Pointer (vdev and peer) */
700 ol_tx_set_vdev_group_ptr(
701 pdev, vdev->vdev_id, group);
702 }
703 }
704 }
705 /* Update membership */
706 group->membership = membership;
707credit_update:
708 /* Update Credit */
709 ol_txrx_update_group_credit(group, credit, absolute);
710 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
711}
712#endif
713
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800714#ifdef WLAN_FEATURE_FASTPATH
715/**
716 * setup_fastpath_ce_handles() Update pdev with ce_handle for fastpath use.
717 *
718 * @osc: pointer to HIF context
719 * @pdev: pointer to ol pdev
720 *
721 * Return: void
722 */
Komal Seelam3d202862016-02-24 18:43:24 +0530723static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
724 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800725{
726 /*
727 * Before the HTT attach, set up the CE handles
728 * CE handles are (struct CE_state *)
729 * This is only required in the fast path
730 */
Komal Seelam7fde14c2016-02-02 13:05:57 +0530731 pdev->ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_H2T_MSG);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800732
733}
734
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800735#else /* not WLAN_FEATURE_FASTPATH */
Komal Seelam3d202862016-02-24 18:43:24 +0530736static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
737 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800738{
739}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800740#endif /* WLAN_FEATURE_FASTPATH */
741
742#ifdef QCA_LL_TX_FLOW_CONTROL_V2
743/**
744 * ol_tx_set_desc_global_pool_size() - set global pool size
745 * @num_msdu_desc: total number of descriptors
746 *
747 * Return: none
748 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800749static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800750{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530751 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700752
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800753 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530754 qdf_print("%s: pdev is NULL\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800755 return;
756 }
Nirav Shah2ae038d2015-12-23 20:36:11 +0530757 pdev->num_msdu_desc = num_msdu_desc;
758 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
759 pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
Kapil Gupta53d9b572017-06-28 17:53:25 +0530760 ol_txrx_info_high("Global pool size: %d\n",
Nirav Shah2ae038d2015-12-23 20:36:11 +0530761 pdev->num_msdu_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800762}
763
764/**
765 * ol_tx_get_desc_global_pool_size() - get global pool size
766 * @pdev: pdev handle
767 *
768 * Return: global pool size
769 */
770static inline
771uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
772{
773 return pdev->num_msdu_desc;
774}
Nirav Shah55b45a02016-01-21 10:00:16 +0530775
776/**
777 * ol_tx_get_total_free_desc() - get total free descriptors
778 * @pdev: pdev handle
779 *
780 * Return: total free descriptors
781 */
782static inline
783uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
784{
785 struct ol_tx_flow_pool_t *pool = NULL;
786 uint32_t free_desc;
787
788 free_desc = pdev->tx_desc.num_free;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530789 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530790 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
791 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530792 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530793 free_desc += pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530794 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530795 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530796 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530797
798 return free_desc;
799}
800
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800801#else
802/**
803 * ol_tx_get_desc_global_pool_size() - get global pool size
804 * @pdev: pdev handle
805 *
806 * Return: global pool size
807 */
808static inline
809uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
810{
811 return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
812}
Nirav Shah55b45a02016-01-21 10:00:16 +0530813
814/**
815 * ol_tx_get_total_free_desc() - get total free descriptors
816 * @pdev: pdev handle
817 *
818 * Return: total free descriptors
819 */
820static inline
821uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
822{
823 return pdev->tx_desc.num_free;
824}
825
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800826#endif
827
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530828#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
829
830/**
831 * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
832 * margin replenishment
833 * @desc_pool_size: tx desc pool size
834 *
835 * Return: threshold low
836 */
837static inline uint16_t
838ol_txrx_rsrc_threshold_lo(int desc_pool_size)
839{
840 int threshold_low;
Yun Parkeaea8632017-04-09 09:53:45 -0700841
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530842 /*
Yun Parkeaea8632017-04-09 09:53:45 -0700843 * 5% margin of unallocated desc is too much for per
844 * vdev mechanism.
Jeff Johnsonfa7d9602018-05-06 11:25:31 -0700845 * Define the value separately.
Yun Parkeaea8632017-04-09 09:53:45 -0700846 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530847 threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
848
849 return threshold_low;
850}
851
852/**
853 * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
854 * during tx desc margin replenishment
855 * @desc_pool_size: tx desc pool size
856 *
857 * Return: threshold high
858 */
859static inline uint16_t
860ol_txrx_rsrc_threshold_hi(int desc_pool_size)
861{
862 int threshold_high;
863 /* when freeing up descriptors,
864 * keep going until there's a 7.5% margin
865 */
866 threshold_high = ((15 * desc_pool_size)/100)/2;
867
868 return threshold_high;
869}
870#else
871
872static inline uint16_t
873ol_txrx_rsrc_threshold_lo(int desc_pool_size)
874{
875 int threshold_low;
876 /* always maintain a 5% margin of unallocated descriptors */
877 threshold_low = (5 * desc_pool_size)/100;
878
879 return threshold_low;
880}
881
882static inline uint16_t
883ol_txrx_rsrc_threshold_hi(int desc_pool_size)
884{
885 int threshold_high;
886 /* when freeing up descriptors, keep going until
887 * there's a 15% margin
888 */
889 threshold_high = (15 * desc_pool_size)/100;
890
891 return threshold_high;
892}
893#endif
894
895#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
896
897/**
898 * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
899 * @pdev: the physical device object
900 *
901 * Return: None
902 */
903static void
904ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
905{
906 qdf_spinlock_create(&pdev->txq_log_spinlock);
907 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
908 pdev->txq_log.oldest_record_offset = 0;
909 pdev->txq_log.offset = 0;
910 pdev->txq_log.allow_wrap = 1;
911 pdev->txq_log.wrapped = 0;
912}
913
914/**
915 * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
916 * @pdev: the physical device object
917 *
918 * Return: None
919 */
920static inline void
921ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
922{
923 qdf_spinlock_destroy(&pdev->txq_log_spinlock);
924}
925
926#else
927
928static inline void
929ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
930{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530931}
932
933static inline void
934ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
935{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530936}
937
938
939#endif
940
941#if defined(DEBUG_HL_LOGGING)
942
943/**
944 * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
945 * @pdev: the physical device object
946 *
947 * Return: None
948 */
949static inline void
950ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
951{
952 qdf_spinlock_create(&pdev->grp_stat_spinlock);
953 pdev->grp_stats.last_valid_index = -1;
954 pdev->grp_stats.wrap_around = 0;
955}
956
957/**
958 * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
959 * @pdev: the physical device object
960 *
961 * Return: None
962 */
963static inline void
964ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
965{
966 qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
967}
968#else
969
970static inline void
971ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
972{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530973}
974
975static inline void
976ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
977{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530978}
979#endif
980
981#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
982
983/**
984 * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
985 * @vdev: the virtual device object
986 * @flag: flag
987 *
988 * Return: None
989 */
990void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800991ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530992{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800993 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700994
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530995 vdev->hlTdlsFlag = flag;
996}
997#endif
998
999#if defined(CONFIG_HL_SUPPORT)
1000
1001/**
1002 * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
1003 * @vdev: the virtual device object
1004 *
1005 * Return: None
1006 */
1007static void
1008ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
1009{
1010 u_int8_t i;
Yun Parkeaea8632017-04-09 09:53:45 -07001011
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301012 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1013 TAILQ_INIT(&vdev->txqs[i].head);
1014 vdev->txqs[i].paused_count.total = 0;
1015 vdev->txqs[i].frms = 0;
1016 vdev->txqs[i].bytes = 0;
1017 vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
1018 vdev->txqs[i].flag = ol_tx_queue_empty;
1019 /* aggregation is not applicable for vdev tx queues */
1020 vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
1021 ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
1022 ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
1023 }
1024}
1025
1026/**
1027 * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
1028 * @vdev: the virtual device object
1029 *
1030 * Return: None
1031 */
1032static void
1033ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1034{
1035 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1036 struct ol_tx_frms_queue_t *txq;
1037 int i;
1038
1039 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1040 txq = &vdev->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +05301041 ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301042 }
1043}
1044
1045/**
1046 * ol_txrx_peer_txqs_init() - initialise peer tx queues
1047 * @pdev: the physical device object
1048 * @peer: peer object
1049 *
1050 * Return: None
1051 */
1052static void
1053ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1054 struct ol_txrx_peer_t *peer)
1055{
1056 uint8_t i;
1057 struct ol_txrx_vdev_t *vdev = peer->vdev;
Yun Parkeaea8632017-04-09 09:53:45 -07001058
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301059 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1060 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1061 TAILQ_INIT(&peer->txqs[i].head);
1062 peer->txqs[i].paused_count.total = 0;
1063 peer->txqs[i].frms = 0;
1064 peer->txqs[i].bytes = 0;
1065 peer->txqs[i].ext_tid = i;
1066 peer->txqs[i].flag = ol_tx_queue_empty;
1067 peer->txqs[i].aggr_state = ol_tx_aggr_untried;
1068 ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
1069 ol_txrx_set_txq_peer(&peer->txqs[i], peer);
1070 }
1071 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1072
1073 /* aggregation is not applicable for mgmt and non-QoS tx queues */
1074 for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
1075 peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
1076
1077 ol_txrx_peer_pause(peer);
1078}
1079
1080/**
1081 * ol_txrx_peer_tx_queue_free() - free peer tx queues
1082 * @pdev: the physical device object
1083 * @peer: peer object
1084 *
1085 * Return: None
1086 */
1087static void
1088ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
1089 struct ol_txrx_peer_t *peer)
1090{
1091 struct ol_tx_frms_queue_t *txq;
1092 uint8_t i;
1093
1094 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1095 txq = &peer->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +05301096 ol_tx_queue_free(pdev, txq, i, true);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301097 }
1098}
1099#else
1100
1101static inline void
1102ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
1103{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301104}
1105
1106static inline void
1107ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1108{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301109}
1110
1111static inline void
1112ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1113 struct ol_txrx_peer_t *peer)
1114{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301115}
1116
1117static inline void
1118ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
1119 struct ol_txrx_peer_t *peer)
1120{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301121}
1122#endif
1123
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301124#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
1125static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1126{
1127 qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
1128}
1129
1130static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1131{
1132 qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
1133}
1134
1135static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1136{
1137 int msdu_idx;
1138 int seg_idx;
1139
Mohit Khannaca4173b2017-09-12 21:52:19 -07001140 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1141 "TSO Statistics:");
1142 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1143 "TSO pkts %lld, bytes %lld\n",
1144 pdev->stats.pub.tx.tso.tso_pkts.pkts,
1145 pdev->stats.pub.tx.tso.tso_pkts.bytes);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301146
Mohit Khannaca4173b2017-09-12 21:52:19 -07001147 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1148 "TSO Histogram for numbers of segments:\n"
1149 "Single segment %d\n"
1150 " 2-5 segments %d\n"
1151 " 6-10 segments %d\n"
1152 "11-15 segments %d\n"
1153 "16-20 segments %d\n"
1154 " 20+ segments %d\n",
1155 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
1156 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
1157 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
1158 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
1159 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
1160 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301161
Mohit Khannaca4173b2017-09-12 21:52:19 -07001162 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1163 "TSO History Buffer: Total size %d, current_index %d",
1164 NUM_MAX_TSO_MSDUS,
1165 TXRX_STATS_TSO_MSDU_IDX(pdev));
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301166
1167 for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
1168 if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
1169 continue;
Mohit Khannaca4173b2017-09-12 21:52:19 -07001170 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1171 "jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
1172 msdu_idx,
1173 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
1174 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
1175 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
1176 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301177
1178 for (seg_idx = 0;
1179 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
1180 msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
1181 seg_idx++) {
1182 struct qdf_tso_seg_t tso_seg =
1183 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
1184
Mohit Khannaca4173b2017-09-12 21:52:19 -07001185 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1186 "seg idx: %d", seg_idx);
1187 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1188 "tso_enable: %d",
1189 tso_seg.tso_flags.tso_enable);
1190 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1191 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
1192 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
1193 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
1194 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
1195 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
1196 tso_seg.tso_flags.ns);
1197 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1198 "tcp_seq_num: 0x%x ip_id: %d",
1199 tso_seg.tso_flags.tcp_seq_num,
1200 tso_seg.tso_flags.ip_id);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301201 }
1202 }
1203}
Yun Park1027e8c2017-10-13 15:17:37 -07001204
1205static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
1206{
1207 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_pkts,
1208 sizeof(struct ol_txrx_stats_elem));
1209#if defined(FEATURE_TSO)
1210 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_info,
1211 sizeof(struct ol_txrx_stats_tso_info));
1212 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_hist,
1213 sizeof(struct ol_txrx_tso_histogram));
1214#endif
1215}
1216
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301217#else
Yun Park1027e8c2017-10-13 15:17:37 -07001218
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301219static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1220{
1221 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1222 "TSO is not supported\n");
1223}
1224
1225static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1226{
1227 /*
1228 * keeping the body empty and not keeping an error print as print will
1229 * will show up everytime during driver load if TSO is not enabled.
1230 */
1231}
1232
1233static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1234{
1235 /*
1236 * keeping the body empty and not keeping an error print as print will
1237 * will show up everytime during driver unload if TSO is not enabled.
1238 */
1239}
1240
Yun Park1027e8c2017-10-13 15:17:37 -07001241static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
1242{
1243 /*
1244 * keeping the body empty and not keeping an error print as print will
1245 * will show up everytime during driver unload if TSO is not enabled.
1246 */
1247}
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301248#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
1249
Nirav Shahd21a2e32018-04-20 16:34:43 +05301250#if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001251/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301252 * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
1253 * @file: file to read
1254 * @arg: pdev object
1255 *
1256 * Return: QDF_STATUS
1257 */
1258static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
1259 void *arg)
1260{
1261 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
1262 uint32_t i = 0;
1263 QDF_STATUS status = QDF_STATUS_SUCCESS;
1264
1265 if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
1266 return QDF_STATUS_E_INVAL;
1267 else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
1268 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
1269 return QDF_STATUS_SUCCESS;
1270 }
1271
1272 i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
1273 status = qdf_dpt_dump_stats_debugfs(file, i);
1274 if (status == QDF_STATUS_E_FAILURE)
1275 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
1276 else if (status == QDF_STATUS_SUCCESS)
1277 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
1278
1279 return status;
1280}
1281
1282/**
1283 * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
1284 * @priv: pdev object
1285 * @buf: buff to get value for dpt parameters
1286 * @len: buf length
1287 *
1288 * Return: QDF_STATUS
1289 */
1290static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
1291 const char *buf,
1292 qdf_size_t len)
1293{
1294 return QDF_STATUS_SUCCESS;
1295}
1296
1297static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
1298{
1299 pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
1300 pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
1301 pdev->dpt_debugfs_fops.priv = pdev;
1302
1303 pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
1304
1305 if (!pdev->dpt_stats_log_dir) {
1306 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1307 "%s: error while creating debugfs dir for %s",
1308 __func__, "dpt_stats");
1309 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
1310 return -EBUSY;
1311 }
1312
1313 if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
1314 pdev->dpt_stats_log_dir,
1315 &pdev->dpt_debugfs_fops)) {
1316 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1317 "%s: debug Entry creation failed!",
1318 __func__);
1319 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
1320 return -EBUSY;
1321 }
1322
1323 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
1324 return 0;
1325}
1326
Nirav Shahd21a2e32018-04-20 16:34:43 +05301327static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
1328{
1329 qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
1330}
1331#else
1332static inline int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
1333{
1334 return 0;
1335}
1336
1337static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
1338{
1339}
1340#endif
1341
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301342/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001343 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001344 * @ctrl_pdev: cfg pdev
1345 * @htc_pdev: HTC pdev
1346 * @osdev: os dev
1347 *
1348 * Return: txrx pdev handle
1349 * NULL for failure
1350 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001351static struct cdp_pdev *
1352ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -07001353 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001354{
1355 struct ol_txrx_pdev_t *pdev;
hqufd227fe2017-06-26 17:01:14 +08001356 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001357
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301358 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001359 if (!pdev)
1360 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001361
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301362 /* init LL/HL cfg here */
1363 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001364 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
1365
1366 /* store provided params */
1367 pdev->ctrl_pdev = ctrl_pdev;
1368 pdev->osdev = osdev;
1369
1370 for (i = 0; i < htt_num_sec_types; i++)
1371 pdev->sec_types[i] = (enum ol_sec_type)i;
1372
1373 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301374 ol_txrx_tso_stats_init(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001375
1376 TAILQ_INIT(&pdev->vdev_list);
1377
tfyu9fcabd72017-09-26 17:46:48 +08001378 TAILQ_INIT(&pdev->req_list);
1379 pdev->req_list_depth = 0;
1380 qdf_spinlock_create(&pdev->req_list_spinlock);
1381
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382 /* do initial set up of the peer ID -> peer object lookup map */
1383 if (ol_txrx_peer_find_attach(pdev))
1384 goto fail1;
1385
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301386 /* initialize the counter of the target's tx buffer availability */
1387 qdf_atomic_init(&pdev->target_tx_credit);
1388 qdf_atomic_init(&pdev->orig_target_tx_credit);
1389
1390 if (ol_cfg_is_high_latency(ctrl_pdev)) {
1391 qdf_spinlock_create(&pdev->tx_queue_spinlock);
1392 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
1393 if (pdev->tx_sched.scheduler == NULL)
1394 goto fail2;
1395 }
1396 ol_txrx_pdev_txq_log_init(pdev);
1397 ol_txrx_pdev_grp_stats_init(pdev);
1398
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001399 pdev->htt_pdev =
1400 htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
1401 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301402 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001403
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301404 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
1405 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +08001406
1407 /*
1408 * Init the tid --> category table.
1409 * Regular tids (0-15) map to their AC.
1410 * Extension tids get their own categories.
1411 */
1412 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
1413 int ac = TXRX_TID_TO_WMM_AC(tid);
1414
1415 pdev->tid_to_ac[tid] = ac;
1416 }
1417 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
1418 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
1419 pdev->tid_to_ac[OL_TX_MGMT_TID] =
1420 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
1421 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
1422 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
1423 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
1424 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
1425
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301426 ol_txrx_debugfs_init(pdev);
1427
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001428 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001429
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301430fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001431 ol_txrx_peer_find_detach(pdev);
1432
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301433fail2:
1434 if (ol_cfg_is_high_latency(ctrl_pdev))
1435 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
1436
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001437fail1:
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301438 ol_txrx_tso_stats_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301439 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001440
1441fail0:
1442 return NULL;
1443}
1444
Komal Seelamc4b28632016-02-03 15:02:18 +05301445#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1446/**
1447 * htt_pkt_log_init() - API to initialize packet log
1448 * @handle: pdev handle
1449 * @scn: HIF context
1450 *
1451 * Return: void
1452 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001453void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +05301454{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001455 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001456
Komal Seelamc4b28632016-02-03 15:02:18 +05301457 if (handle->pkt_log_init)
1458 return;
1459
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301460 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001461 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -07001462 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08001463 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Komal Seelamc4b28632016-02-03 15:02:18 +05301464 if (pktlogmod_init(scn))
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301465 qdf_print("%s: pktlogmod_init failed", __func__);
Komal Seelamc4b28632016-02-03 15:02:18 +05301466 else
1467 handle->pkt_log_init = true;
1468 }
1469}
1470
1471/**
1472 * htt_pktlogmod_exit() - API to cleanup pktlog info
1473 * @handle: Pdev handle
1474 * @scn: HIF Context
1475 *
1476 * Return: void
1477 */
Houston Hoffman8c485042017-02-08 13:40:21 -08001478static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +05301479{
Houston Hoffman8c485042017-02-08 13:40:21 -08001480 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001481 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +05301482 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -08001483 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +05301484 handle->pkt_log_init = false;
1485 }
1486}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001487
Komal Seelamc4b28632016-02-03 15:02:18 +05301488#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001489void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -08001490static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +05301491#endif
1492
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001493/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001494 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001495 * @pdev: txrx pdev
1496 *
1497 * Return: 0 for success
1498 */
1499int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001500ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001501{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001502 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001503 uint16_t i;
1504 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001505 int ret = 0;
1506 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301507 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001508
Leo Chang376398b2015-10-23 14:19:02 -07001509 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1510 union ol_tx_desc_list_elem_t *c_element;
1511 unsigned int sig_bit;
1512 uint16_t desc_per_page;
1513
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001514 if (!osc) {
1515 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001516 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001517 }
1518
1519 /*
1520 * For LL, limit the number of host's tx descriptors to match
1521 * the number of target FW tx descriptors.
1522 * This simplifies the FW, by ensuring the host will never
1523 * download more tx descriptors than the target has space for.
1524 * The FW will drop/free low-priority tx descriptors when it
1525 * starts to run low, so that in theory the host should never
1526 * run out of tx descriptors.
1527 */
1528
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001529 /*
1530 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301531 * HL - wait for a HTT target credit initialization
1532 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001533 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301534 if (pdev->cfg.is_high_latency) {
1535 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001536
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301537 qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
1538 qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001539
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301540 pdev->tx_queue.rsrc_threshold_lo =
1541 ol_txrx_rsrc_threshold_lo(desc_pool_size);
1542 pdev->tx_queue.rsrc_threshold_hi =
1543 ol_txrx_rsrc_threshold_hi(desc_pool_size);
1544
1545 for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
1546 qdf_atomic_init(&pdev->txq_grps[i].credit);
1547
1548 ol_tx_target_credit_init(pdev, desc_pool_size);
1549 } else {
1550 qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
1551 &pdev->target_tx_credit);
1552 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1553 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001554
Nirav Shah76291962016-04-25 10:50:37 +05301555 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1556
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001557 setup_fastpath_ce_handles(osc, pdev);
1558
1559 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1560 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301561 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001562
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001563 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -08001564 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
1565 ret = htt_ipa_uc_attach(pdev->htt_pdev);
1566 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -07001567 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001568 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001569
Leo Chang376398b2015-10-23 14:19:02 -07001570 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301571 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301572 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -07001573 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1574 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1575 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301576 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001577 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -08001578 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001579 goto page_alloc_fail;
1580 }
1581 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1582 pdev->tx_desc.offset_filter = desc_per_page - 1;
1583 /* Calculate page divider to find page number */
1584 sig_bit = 0;
1585 while (desc_per_page) {
1586 sig_bit++;
1587 desc_per_page = desc_per_page >> 1;
1588 }
1589 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -08001590 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -07001591 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1592 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1593 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1594 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001595
1596 /*
1597 * Each SW tx desc (used only within the tx datapath SW) has a
1598 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1599 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1600 * desc now, to avoid doing it during time-critical transmit.
1601 */
1602 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -07001603 pdev->tx_desc.freelist =
1604 (union ol_tx_desc_list_elem_t *)
1605 (*pdev->tx_desc.desc_pages.cacheable_pages);
1606 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001607 for (i = 0; i < desc_pool_size; i++) {
1608 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001609 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301610 qdf_dma_addr_t frag_paddr = 0;
1611 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001612
Leo Chang376398b2015-10-23 14:19:02 -07001613 if (i == (desc_pool_size - 1))
1614 c_element->next = NULL;
1615 else
1616 c_element->next = (union ol_tx_desc_list_elem_t *)
1617 ol_tx_desc_find(pdev, i + 1);
1618
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001619 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001620 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301621 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001622 "%s: failed to alloc HTT tx desc (%d of %d)",
1623 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001624 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -08001625 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001626 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001627 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001628
Leo Chang376398b2015-10-23 14:19:02 -07001629 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001630 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001631 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001632 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001633 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301634 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001635 "%s: failed to alloc HTT frag dsc (%d/%d)",
1636 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001637 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001638 fail_idx = i;
1639 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001640 }
Leo Chang376398b2015-10-23 14:19:02 -07001641 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -07001642 /*
1643 * Initialize the first 6 words (TSO flags)
1644 * of the frag descriptor
1645 */
Leo Chang376398b2015-10-23 14:19:02 -07001646 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1647 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001648 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001649 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001650#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001651 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001652#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001653 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001654 0xffffffff;
1655#endif
1656#endif
Leo Chang376398b2015-10-23 14:19:02 -07001657 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301658 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001659 c_element = c_element->next;
1660 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001661 }
1662
1663 /* link SW tx descs into a freelist */
1664 pdev->tx_desc.num_free = desc_pool_size;
Poddar, Siddarth14521792017-03-14 21:19:42 +05301665 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001666 "%s first tx_desc:0x%pK Last tx desc:0x%pK\n", __func__,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001667 (uint32_t *) pdev->tx_desc.freelist,
1668 (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001669
1670 /* check what format of frames are expected to be delivered by the OS */
1671 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1672 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1673 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1674 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1675 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1676 pdev->htt_pkt_type = htt_pkt_type_eth2;
1677 else
1678 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1679 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301680 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001681 "%s Invalid standard frame type: %d",
1682 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001683 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001684 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001685 }
1686
1687 /* setup the global rx defrag waitlist */
1688 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1689
1690 /* configure where defrag timeout and duplicate detection is handled */
1691 pdev->rx.flags.defrag_timeout_check =
1692 pdev->rx.flags.dup_check =
1693 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1694
1695#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1696 /* Need to revisit this part. Currently,hardcode to riva's caps */
1697 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1698 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1699 /*
1700 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1701 * header for non-first subframe of A-MSDU.
1702 */
1703 pdev->sw_subfrm_hdr_recovery_enable = 1;
1704 /*
1705 * The Riva HW doesn't have the capability to set Protected Frame bit
1706 * in the MAC header for encrypted data frame.
1707 */
1708 pdev->sw_pf_proc_enable = 1;
1709
1710 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001711 /*
1712 * sw llc process is only needed in
1713 * 802.3 to 802.11 transform case
1714 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001715 pdev->sw_tx_llc_proc_enable = 1;
1716 pdev->sw_rx_llc_proc_enable = 1;
1717 } else {
1718 pdev->sw_tx_llc_proc_enable = 0;
1719 pdev->sw_rx_llc_proc_enable = 0;
1720 }
1721
1722 switch (pdev->frame_format) {
1723 case wlan_frm_fmt_raw:
1724 pdev->sw_tx_encap =
1725 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1726 ? 0 : 1;
1727 pdev->sw_rx_decap =
1728 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1729 ? 0 : 1;
1730 break;
1731 case wlan_frm_fmt_native_wifi:
1732 pdev->sw_tx_encap =
1733 pdev->
1734 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1735 ? 0 : 1;
1736 pdev->sw_rx_decap =
1737 pdev->
1738 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1739 ? 0 : 1;
1740 break;
1741 case wlan_frm_fmt_802_3:
1742 pdev->sw_tx_encap =
1743 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1744 ? 0 : 1;
1745 pdev->sw_rx_decap =
1746 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1747 ? 0 : 1;
1748 break;
1749 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301750 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001751 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1752 pdev->frame_format,
1753 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001754 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001755 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001756 }
1757#endif
1758
1759 /*
1760 * Determine what rx processing steps are done within the host.
1761 * Possibilities:
1762 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1763 * (This is unlikely; even if the target is doing rx->tx forwarding,
1764 * the host should be doing rx->tx forwarding too, as a back up for
1765 * the target's rx->tx forwarding, in case the target runs short on
1766 * memory, and can't store rx->tx frames that are waiting for
1767 * missing prior rx frames to arrive.)
1768 * 2. Just rx -> tx forwarding.
1769 * This is the typical configuration for HL, and a likely
1770 * configuration for LL STA or small APs (e.g. retail APs).
1771 * 3. Both PN check and rx -> tx forwarding.
1772 * This is the typical configuration for large LL APs.
1773 * Host-side PN check without rx->tx forwarding is not a valid
1774 * configuration, since the PN check needs to be done prior to
1775 * the rx->tx forwarding.
1776 */
1777 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001778 /*
1779 * PN check, rx-tx forwarding and rx reorder is done by
1780 * the target
1781 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001782 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1783 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1784 else
1785 pdev->rx_opt_proc = ol_rx_fwd_check;
1786 } else {
1787 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1788 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1789 /*
1790 * PN check done on host,
1791 * rx->tx forwarding not done at all.
1792 */
1793 pdev->rx_opt_proc = ol_rx_pn_check_only;
1794 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1795 /*
1796 * Both PN check and rx->tx forwarding done
1797 * on host.
1798 */
1799 pdev->rx_opt_proc = ol_rx_pn_check;
1800 } else {
1801#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1802"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301803 QDF_TRACE(QDF_MODULE_ID_TXRX,
1804 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001805 "%s: %s", __func__, TRACESTR01);
1806#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001807 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001808 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001809 }
1810 } else {
1811 /* PN check done on target */
1812 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1813 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1814 /*
1815 * rx->tx forwarding done on host (possibly as
1816 * back-up for target-side primary rx->tx
1817 * forwarding)
1818 */
1819 pdev->rx_opt_proc = ol_rx_fwd_check;
1820 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001821 /*
1822 * rx->tx forwarding either done in target,
1823 * or not done at all
1824 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001825 pdev->rx_opt_proc = ol_rx_deliver;
1826 }
1827 }
1828 }
1829
1830 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301831 qdf_spinlock_create(&pdev->tx_mutex);
1832 qdf_spinlock_create(&pdev->peer_ref_mutex);
1833 qdf_spinlock_create(&pdev->rx.mutex);
1834 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001835 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001836 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1837
Yun Parkf01f6e22017-01-18 17:27:02 -08001838 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1839 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001840 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001841 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001842
Yun Parkf01f6e22017-01-18 17:27:02 -08001843 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1844 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001845 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001846 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001847
1848#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1849 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1850#endif
1851
1852 /*
1853 * WDI event attach
1854 */
1855 wdi_event_attach(pdev);
1856
1857 /*
1858 * Initialize rx PN check characteristics for different security types.
1859 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301860 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001861
1862 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1863 pdev->rx_pn[htt_sec_type_tkip].len =
1864 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1865 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1866 pdev->rx_pn[htt_sec_type_tkip].cmp =
1867 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1868 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1869
1870 /* WAPI: 128-bit PN */
1871 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1872 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1873
1874 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1875
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001876 ol_txrx_dbg("Created pdev %pK\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001877
1878 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1879
1880#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1881#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1882
1883/* #if 1 -- TODO: clean this up */
1884#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1885 /* avg = 100% * new + 0% * old */ \
1886 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1887/*
Yun Parkeaea8632017-04-09 09:53:45 -07001888 * #else
1889 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1890 * //avg = 25% * new + 25% * old
1891 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1892 * #endif
1893 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001894 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1895 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1896#endif
1897
1898 ol_txrx_local_peer_id_pool_init(pdev);
1899
1900 pdev->cfg.ll_pause_txq_limit =
1901 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1902
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301903 /* TX flow control for peer who is in very bad link status */
1904 ol_tx_badpeer_flow_cl_init(pdev);
1905
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001906#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301907 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301908 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001909
1910 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301911 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001912 {
1913 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001914
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001915 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301916 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001917 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1918 * 1000);
1919 /*
1920 * Compute a factor and shift that together are equal to the
1921 * inverse of the bin_width time, so that rather than dividing
1922 * by the bin width time, approximately the same result can be
1923 * obtained much more efficiently by a multiply + shift.
1924 * multiply_factor >> shift = 1 / bin_width_time, so
1925 * multiply_factor = (1 << shift) / bin_width_time.
1926 *
1927 * Pick the shift semi-arbitrarily.
1928 * If we knew statically what the bin_width would be, we could
1929 * choose a shift that minimizes the error.
1930 * Since the bin_width is determined dynamically, simply use a
1931 * shift that is about half of the uint32_t size. This should
1932 * result in a relatively large multiplier value, which
1933 * minimizes error from rounding the multiplier to an integer.
1934 * The rounding error only becomes significant if the tick units
1935 * are on the order of 1 microsecond. In most systems, it is
1936 * expected that the tick units will be relatively low-res,
1937 * on the order of 1 millisecond. In such systems the rounding
1938 * error is negligible.
1939 * It would be more accurate to dynamically try out different
1940 * shifts and choose the one that results in the smallest
1941 * rounding error, but that extra level of fidelity is
1942 * not needed.
1943 */
1944 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1945 pdev->tx_delay.hist_internal_bin_width_mult =
1946 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1947 1000 + (bin_width_1000ticks >> 1)) /
1948 bin_width_1000ticks;
1949 }
1950#endif /* QCA_COMPUTE_TX_DELAY */
1951
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001952 /* Thermal Mitigation */
1953 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001954
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001955 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001956
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301957 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1958
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001959 ol_tx_register_flow_control(pdev);
1960
1961 return 0; /* success */
1962
Leo Chang376398b2015-10-23 14:19:02 -07001963pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001964 OL_RX_REORDER_TRACE_DETACH(pdev);
1965
Leo Chang376398b2015-10-23 14:19:02 -07001966reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301967 qdf_spinlock_destroy(&pdev->tx_mutex);
1968 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1969 qdf_spinlock_destroy(&pdev->rx.mutex);
1970 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301971 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001972 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1973
Leo Chang376398b2015-10-23 14:19:02 -07001974control_init_fail:
1975desc_alloc_fail:
1976 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001977 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001978 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001979
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301980 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001981 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001982
Leo Chang376398b2015-10-23 14:19:02 -07001983page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001984 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1985 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001986uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001987 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301988htt_attach_fail:
1989 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001990ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001991 return ret; /* fail */
1992}
1993
Dhanashri Atre12a08392016-02-17 13:10:34 -08001994/**
1995 * ol_txrx_pdev_attach_target() - send target configuration
1996 *
1997 * @pdev - the physical device being initialized
1998 *
1999 * The majority of the data SW setup are done by the pdev_attach
2000 * functions, but this function completes the data SW setup by
2001 * sending datapath configuration messages to the target.
2002 *
2003 * Return: 0 - success 1 - failure
2004 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002005static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002006{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002007 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002008
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05302009 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002010}
2011
Dhanashri Atre12a08392016-02-17 13:10:34 -08002012/**
Mohit Khanna54f3a382017-03-13 17:56:32 -07002013 * ol_tx_free_descs_inuse - free tx descriptors which are in use
2014 * @pdev - the physical device for which tx descs need to be freed
2015 *
2016 * Cycle through the list of TX descriptors (for a pdev) which are in use,
2017 * for which TX completion has not been received and free them. Should be
2018 * called only when the interrupts are off and all lower layer RX is stopped.
2019 * Otherwise there may be a race condition with TX completions.
2020 *
2021 * Return: None
2022 */
2023static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
2024{
2025 int i;
2026 void *htt_tx_desc;
2027 struct ol_tx_desc_t *tx_desc;
2028 int num_freed_tx_desc = 0;
2029
2030 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2031 tx_desc = ol_tx_desc_find(pdev, i);
2032 /*
2033 * Confirm that each tx descriptor is "empty", i.e. it has
2034 * no tx frame attached.
2035 * In particular, check that there are no frames that have
2036 * been given to the target to transmit, for which the
2037 * target has never provided a response.
2038 */
2039 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
2040 ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
2041 ol_tx_desc_frame_free_nonstd(pdev,
2042 tx_desc, 1);
2043 num_freed_tx_desc++;
2044 }
2045 htt_tx_desc = tx_desc->htt_tx_desc;
2046 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
2047 }
2048
2049 if (num_freed_tx_desc)
2050 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2051 "freed %d tx frames for which no resp from target",
2052 num_freed_tx_desc);
2053
2054}
2055
2056/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302057 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08002058 * @pdev - the data physical device object being removed
2059 * @force - delete the pdev (and its vdevs and peers) even if
2060 * there are outstanding references by the target to the vdevs
2061 * and peers within the pdev
2062 *
2063 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302064 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08002065 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302066 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08002067 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302068static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002069{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002070 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07002071
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002072 /* preconditions */
2073 TXRX_ASSERT2(pdev);
2074
2075 /* check that the pdev has no vdevs allocated */
2076 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
2077
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002078#ifdef QCA_SUPPORT_TX_THROTTLE
2079 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302080 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
2081 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002082#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302083 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
2084 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002085#endif
2086#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002087
2088 if (force) {
2089 /*
2090 * The assertion above confirms that all vdevs within this pdev
2091 * were detached. However, they may not have actually been
2092 * deleted.
2093 * If the vdev had peers which never received a PEER_UNMAP msg
2094 * from the target, then there are still zombie peer objects,
2095 * and the vdev parents of the zombie peers are also zombies,
2096 * hanging around until their final peer gets deleted.
2097 * Go through the peer hash table and delete any peers left.
2098 * As a side effect, this will complete the deletion of any
2099 * vdevs that are waiting for their peers to finish deletion.
2100 */
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002101 ol_txrx_dbg("Force delete for pdev %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002102 pdev);
2103 ol_txrx_peer_find_hash_erase(pdev);
2104 }
2105
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05302106 /* to get flow pool status before freeing descs */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07002107 ol_tx_dump_flow_pool_info((void *)pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07002108 ol_tx_free_descs_inuse(pdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05302109 ol_tx_deregister_flow_control(pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07002110
2111 /*
2112 * ol_tso_seg_list_deinit should happen after
2113 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
2114 * which is being de-initilized in ol_tso_seg_list_deinit
2115 */
2116 ol_tso_seg_list_deinit(pdev);
2117 ol_tso_num_seg_list_deinit(pdev);
2118
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05302119 /* Stop the communication between HTT and target at first */
2120 htt_detach_target(pdev->htt_pdev);
2121
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302122 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07002123 &pdev->tx_desc.desc_pages, 0, true);
2124 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002125
2126 /* Detach micro controller data path offload resource */
2127 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
2128 htt_ipa_uc_detach(pdev->htt_pdev);
2129
2130 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05302131 ol_tx_desc_dup_detect_deinit(pdev);
2132
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302133 qdf_spinlock_destroy(&pdev->tx_mutex);
2134 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
2135 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
2136 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07002137 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002138#ifdef QCA_SUPPORT_TX_THROTTLE
2139 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302140 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002141#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302142
2143 /* TX flow control for peer who is in very bad link status */
2144 ol_tx_badpeer_flow_cl_deinit(pdev);
2145
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002146 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
2147
2148 OL_RX_REORDER_TRACE_DETACH(pdev);
2149 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302150
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002151 /*
2152 * WDI event detach
2153 */
2154 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302155
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002156 ol_txrx_local_peer_id_cleanup(pdev);
2157
2158#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302159 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002160#endif
2161}
2162
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302163/**
2164 * ol_txrx_pdev_detach() - delete the data SW state
2165 * @ppdev - the data physical device object being removed
2166 * @force - delete the pdev (and its vdevs and peers) even if
2167 * there are outstanding references by the target to the vdevs
2168 * and peers within the pdev
2169 *
2170 * This function is used when the WLAN driver is being removed to
2171 * remove the host data component within the driver.
2172 * All virtual devices within the physical device need to be deleted
2173 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
2174 *
2175 * Return: None
2176 */
2177static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
2178{
2179 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rakesh Pillai33942c42018-05-09 11:45:38 +05302180 struct ol_txrx_stats_req_internal *req, *temp_req;
tfyu9fcabd72017-09-26 17:46:48 +08002181 int i = 0;
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302182
2183 /*checking to ensure txrx pdev structure is not NULL */
2184 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302185 ol_txrx_err(
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302186 "NULL pdev passed to %s\n", __func__);
2187 return;
2188 }
2189
2190 htt_pktlogmod_exit(pdev);
2191
tfyu9fcabd72017-09-26 17:46:48 +08002192 qdf_spin_lock_bh(&pdev->req_list_spinlock);
2193 if (pdev->req_list_depth > 0)
2194 ol_txrx_err(
2195 "Warning: the txrx req list is not empty, depth=%d\n",
2196 pdev->req_list_depth
2197 );
Rakesh Pillai33942c42018-05-09 11:45:38 +05302198 TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
tfyu9fcabd72017-09-26 17:46:48 +08002199 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
2200 pdev->req_list_depth--;
2201 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05302202 "%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
tfyu9fcabd72017-09-26 17:46:48 +08002203 i++,
2204 req,
2205 req->base.print.verbose,
2206 req->base.print.concise,
2207 req->base.stats_type_upload_mask,
2208 req->base.stats_type_reset_mask
2209 );
2210 qdf_mem_free(req);
2211 }
2212 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
2213
2214 qdf_spinlock_destroy(&pdev->req_list_spinlock);
2215
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302216 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
2217
2218 if (pdev->cfg.is_high_latency)
2219 ol_tx_sched_detach(pdev);
2220
2221 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
2222
2223 htt_pdev_free(pdev->htt_pdev);
2224 ol_txrx_peer_find_detach(pdev);
2225 ol_txrx_tso_stats_deinit(pdev);
2226
2227 ol_txrx_pdev_txq_log_destroy(pdev);
2228 ol_txrx_pdev_grp_stat_destroy(pdev);
Alok Kumarddd457e2018-04-09 13:51:42 +05302229
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05302230 ol_txrx_debugfs_exit(pdev);
2231
Alok Kumarddd457e2018-04-09 13:51:42 +05302232 qdf_mem_free(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302233}
2234
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302235#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
2236
2237/**
2238 * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
2239 * @vdev: the virtual device object
2240 *
2241 * Return: None
2242 */
2243static inline void
2244ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2245{
2246 qdf_atomic_init(&vdev->tx_desc_count);
2247}
2248#else
2249
2250static inline void
2251ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2252{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302253}
2254#endif
2255
Dhanashri Atre12a08392016-02-17 13:10:34 -08002256/**
2257 * ol_txrx_vdev_attach - Allocate and initialize the data object
2258 * for a new virtual device.
2259 *
2260 * @data_pdev - the physical device the virtual device belongs to
2261 * @vdev_mac_addr - the MAC address of the virtual device
2262 * @vdev_id - the ID used to identify the virtual device to the target
2263 * @op_mode - whether this virtual device is operating as an AP,
2264 * an IBSS, or a STA
2265 *
2266 * Return: success: handle to new data vdev object, failure: NULL
2267 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002268static struct cdp_vdev *
2269ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002270 uint8_t *vdev_mac_addr,
2271 uint8_t vdev_id, enum wlan_op_mode op_mode)
2272{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002273 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002274 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002275 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002276
2277 /* preconditions */
2278 TXRX_ASSERT2(pdev);
2279 TXRX_ASSERT2(vdev_mac_addr);
2280
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302281 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002282 if (!vdev)
2283 return NULL; /* failure */
2284
2285 /* store provided params */
2286 vdev->pdev = pdev;
2287 vdev->vdev_id = vdev_id;
2288 vdev->opmode = op_mode;
2289
2290 vdev->delete.pending = 0;
2291 vdev->safemode = 0;
2292 vdev->drop_unenc = 1;
2293 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05302294 vdev->fwd_tx_packets = 0;
2295 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002296
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302297 ol_txrx_vdev_tx_desc_cnt_init(vdev);
2298
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302299 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002300 OL_TXRX_MAC_ADDR_LEN);
2301
2302 TAILQ_INIT(&vdev->peer_list);
2303 vdev->last_real_peer = NULL;
2304
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002305 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302306
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002307#ifdef QCA_IBSS_SUPPORT
2308 vdev->ibss_peer_num = 0;
2309 vdev->ibss_peer_heart_beat_timer = 0;
2310#endif
2311
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302312 ol_txrx_vdev_txqs_init(vdev);
2313
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302314 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002315 vdev->ll_pause.paused_reason = 0;
2316 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
2317 vdev->ll_pause.txq.depth = 0;
wadesong5e2e8012017-08-21 16:56:03 +08002318 qdf_atomic_init(&vdev->delete.detaching);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302319 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002320 &vdev->ll_pause.timer,
2321 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302322 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302323 qdf_atomic_init(&vdev->os_q_paused);
2324 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002325 vdev->tx_fl_lwm = 0;
2326 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002327 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002328 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05302329 qdf_mem_zero(&vdev->last_peer_mac_addr,
2330 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302331 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002332 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002333 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002334 vdev->osif_fc_ctx = NULL;
2335
Alok Kumar75355aa2018-03-19 17:32:58 +05302336 vdev->txrx_stats.txack_success = 0;
2337 vdev->txrx_stats.txack_failed = 0;
2338
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002339 /* Default MAX Q depth for every VDEV */
2340 vdev->ll_pause.max_q_depth =
2341 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002342 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002343 /* add this vdev into the pdev's list */
2344 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
2345
Poddar, Siddarth14521792017-03-14 21:19:42 +05302346 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002347 "Created vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002348 vdev,
2349 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2350 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2351 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2352
2353 /*
2354 * We've verified that htt_op_mode == wlan_op_mode,
2355 * so no translation is needed.
2356 */
2357 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
2358
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002359 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002360}
2361
Dhanashri Atre12a08392016-02-17 13:10:34 -08002362/**
2363 *ol_txrx_vdev_register - Link a vdev's data object with the
2364 * matching OS shim vdev object.
2365 *
2366 * @txrx_vdev: the virtual device's data object
2367 * @osif_vdev: the virtual device's OS shim object
2368 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
2369 *
2370 * The data object for a virtual device is created by the
2371 * function ol_txrx_vdev_attach. However, rather than fully
2372 * linking the data vdev object with the vdev objects from the
2373 * other subsystems that the data vdev object interacts with,
2374 * the txrx_vdev_attach function focuses primarily on creating
2375 * the data vdev object. After the creation of both the data
2376 * vdev object and the OS shim vdev object, this
2377 * txrx_osif_vdev_attach function is used to connect the two
2378 * vdev objects, so the data SW can use the OS shim vdev handle
2379 * when passing rx data received by a vdev up to the OS shim.
2380 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002381static void ol_txrx_vdev_register(struct cdp_vdev *pvdev,
2382 void *osif_vdev,
2383 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002384{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002385 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002386
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002387 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
2388 qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
2389 qdf_assert(0);
2390 return;
2391 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002392
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002393 vdev->osif_dev = osif_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002394 vdev->rx = txrx_ops->rx.rx;
Poddar, Siddarth3906e172018-01-09 11:24:58 +05302395 vdev->stats_rx = txrx_ops->rx.stats_rx;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002396 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002397}
2398
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002399#ifdef currently_unused
Dhanashri Atre12a08392016-02-17 13:10:34 -08002400/**
2401 * ol_txrx_set_curchan - Setup the current operating channel of
2402 * the device
2403 * @pdev - the data physical device object
2404 * @chan_mhz - the channel frequency (mhz) packets on
2405 *
2406 * Mainly used when populating monitor mode status that requires
2407 * the current operating channel
2408 *
2409 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002410void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
2411{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002412}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002413#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002414
2415void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
2416{
2417 vdev->safemode = val;
2418}
2419
Dhanashri Atre12a08392016-02-17 13:10:34 -08002420/**
2421 * ol_txrx_set_privacy_filters - set the privacy filter
2422 * @vdev - the data virtual device object
2423 * @filter - filters to be set
2424 * @num - the number of filters
2425 *
2426 * Rx related. Set the privacy filters. When rx packets, check
2427 * the ether type, filter type and packet type to decide whether
2428 * discard these packets.
2429 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002430static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002431ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
2432 void *filters, uint32_t num)
2433{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302434 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002435 num * sizeof(struct privacy_exemption));
2436 vdev->num_filters = num;
2437}
2438
2439void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
2440{
2441 vdev->drop_unenc = val;
2442}
2443
gbian016a42e2017-03-01 18:49:11 +08002444#if defined(CONFIG_HL_SUPPORT)
2445
2446static void
2447ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2448{
2449 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2450 int i;
2451 struct ol_tx_desc_t *tx_desc;
2452
2453 qdf_spin_lock_bh(&pdev->tx_mutex);
2454 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2455 tx_desc = ol_tx_desc_find(pdev, i);
2456 if (tx_desc->vdev == vdev)
2457 tx_desc->vdev = NULL;
2458 }
2459 qdf_spin_unlock_bh(&pdev->tx_mutex);
2460}
2461
2462#else
2463
2464static void
2465ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2466{
2467
2468}
2469
2470#endif
2471
Dhanashri Atre12a08392016-02-17 13:10:34 -08002472/**
2473 * ol_txrx_vdev_detach - Deallocate the specified data virtual
2474 * device object.
2475 * @data_vdev: data object for the virtual device in question
2476 * @callback: function to call (if non-NULL) once the vdev has
2477 * been wholly deleted
2478 * @callback_context: context to provide in the callback
2479 *
2480 * All peers associated with the virtual device need to be deleted
2481 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
2482 * However, for the peers to be fully deleted, the peer deletion has to
2483 * percolate through the target data FW and back up to the host data SW.
2484 * Thus, even though the host control SW may have issued a peer_detach
2485 * call for each of the vdev's peers, the peer objects may still be
2486 * allocated, pending removal of all references to them by the target FW.
2487 * In this case, though the vdev_detach function call will still return
2488 * immediately, the vdev itself won't actually be deleted, until the
2489 * deletions of all its peers complete.
2490 * The caller can provide a callback function pointer to be notified when
2491 * the vdev deletion actually happens - whether it's directly within the
2492 * vdev_detach call, or if it's deferred until all in-progress peer
2493 * deletions have completed.
2494 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002495static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002496ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002497 ol_txrx_vdev_delete_cb callback, void *context)
2498{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002499 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
wadesong5e2e8012017-08-21 16:56:03 +08002500 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002501
2502 /* preconditions */
2503 TXRX_ASSERT2(vdev);
wadesong5e2e8012017-08-21 16:56:03 +08002504 pdev = vdev->pdev;
2505
2506 /* prevent anyone from restarting the ll_pause timer again */
2507 qdf_atomic_set(&vdev->delete.detaching, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002508
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302509 ol_txrx_vdev_tx_queue_free(vdev);
2510
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302511 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302512 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002513 vdev->ll_pause.is_q_timer_on = false;
2514 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302515 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07002516
Nirav Shahcbc6d722016-03-01 16:24:53 +05302517 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302518 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002519 vdev->ll_pause.txq.head = next;
2520 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302521 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
wadesong5e2e8012017-08-21 16:56:03 +08002522
2523 /* ll_pause timer should be deleted without any locks held, and
2524 * no timer function should be executed after this point because
2525 * qdf_timer_free is deleting the timer synchronously.
2526 */
2527 qdf_timer_free(&vdev->ll_pause.timer);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302528 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002529
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302530 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002531 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002532 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002533 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302534 qdf_spin_unlock_bh(&vdev->flow_control_lock);
2535 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002536
2537 /* remove the vdev from its parent pdev's list */
2538 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
2539
2540 /*
2541 * Use peer_ref_mutex while accessing peer_list, in case
2542 * a peer is in the process of being removed from the list.
2543 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302544 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002545 /* check that the vdev has no peers allocated */
2546 if (!TAILQ_EMPTY(&vdev->peer_list)) {
2547 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05302548 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002549 "%s: not deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002550 __func__, vdev,
2551 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2552 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2553 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2554 /* indicate that the vdev needs to be deleted */
2555 vdev->delete.pending = 1;
2556 vdev->delete.callback = callback;
2557 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302558 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002559 return;
2560 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302561 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002562 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002563
Poddar, Siddarth14521792017-03-14 21:19:42 +05302564 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002565 "%s: deleting vdev obj %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002566 __func__, vdev,
2567 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2568 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2569 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2570
2571 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2572
2573 /*
Yun Parkeaea8632017-04-09 09:53:45 -07002574 * The ol_tx_desc_free might access the invalid content of vdev referred
2575 * by tx desc, since this vdev might be detached in another thread
2576 * asynchronous.
2577 *
2578 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2579 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2580 * to avoid crash.
2581 *
2582 */
gbian016a42e2017-03-01 18:49:11 +08002583 ol_txrx_tx_desc_reset_vdev(vdev);
2584
2585 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002586 * Doesn't matter if there are outstanding tx frames -
2587 * they will be freed once the target sends a tx completion
2588 * message for them.
2589 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302590 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002591 if (callback)
2592 callback(context);
2593}
2594
2595/**
2596 * ol_txrx_flush_rx_frames() - flush cached rx frames
2597 * @peer: peer
2598 * @drop: set flag to drop frames
2599 *
2600 * Return: None
2601 */
2602void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302603 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002604{
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002605 struct ol_txrx_cached_bufq_t *bufqi;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002606 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302607 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002608 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002609
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302610 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2611 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002612 return;
2613 }
2614
Dhanashri Atre182b0272016-02-17 15:35:07 -08002615 qdf_assert(peer->vdev);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302616 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002617 bufqi = &peer->bufq_info;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002618
Dhanashri Atre50141c52016-04-07 13:15:29 -07002619 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002620 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002621 else
2622 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302623 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002624
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002625 qdf_spin_lock_bh(&bufqi->bufq_lock);
2626 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002627 typeof(*cache_buf), list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002628 while (!list_empty(&bufqi->cached_bufq)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002629 list_del(&cache_buf->list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002630 bufqi->curr--;
2631 qdf_assert(bufqi->curr >= 0);
2632 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002633 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302634 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002635 } else {
2636 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002637 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302638 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302639 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002640 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302641 qdf_mem_free(cache_buf);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002642 qdf_spin_lock_bh(&bufqi->bufq_lock);
2643 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002644 typeof(*cache_buf), list);
2645 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002646 bufqi->qdepth_no_thresh = bufqi->curr;
2647 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302648 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002649}
2650
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07002651static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302652{
2653 uint8_t sta_id;
2654 struct ol_txrx_peer_t *peer;
2655 struct ol_txrx_pdev_t *pdev;
2656
2657 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2658 if (!pdev)
2659 return;
2660
2661 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002662 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2663 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302664 if (!peer)
2665 continue;
2666 ol_txrx_flush_rx_frames(peer, 1);
2667 }
2668}
2669
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302670/* Define short name to use in cds_trigger_recovery */
2671#define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2672
Dhanashri Atre12a08392016-02-17 13:10:34 -08002673/**
Naveen Rawat17c42a82018-02-01 19:18:27 -08002674 * ol_txrx_dump_peer_access_list() - dump peer access list
2675 * @peer: peer handle
2676 *
2677 * This function will dump if any peer debug ids are still accessing peer
2678 *
2679 * Return: None
2680 */
2681static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2682{
2683 u32 i;
2684 u32 pending_ref;
2685
2686 for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2687 pending_ref = qdf_atomic_read(&peer->access_list[i]);
2688 if (pending_ref)
2689 ol_txrx_info_high("id %d pending refs %d",
2690 i, pending_ref);
2691 }
2692}
2693
2694/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08002695 * ol_txrx_peer_attach - Allocate and set up references for a
2696 * data peer object.
2697 * @data_pdev: data physical device object that will indirectly
2698 * own the data_peer object
2699 * @data_vdev - data virtual device object that will directly
2700 * own the data_peer object
2701 * @peer_mac_addr - MAC address of the new peer
2702 *
2703 * When an association with a peer starts, the host's control SW
2704 * uses this function to inform the host data SW.
2705 * The host data SW allocates its own peer object, and stores a
2706 * reference to the control peer object within the data peer object.
2707 * The host data SW also stores a reference to the virtual device
2708 * that the peer is associated with. This virtual device handle is
2709 * used when the data SW delivers rx data frames to the OS shim layer.
2710 * The host data SW returns a handle to the new peer data object,
2711 * so a reference within the control peer object can be set to the
2712 * data peer object.
2713 *
2714 * Return: handle to new data peer object, or NULL if the attach
2715 * fails
2716 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002717static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002718ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002719{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002720 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002721 struct ol_txrx_peer_t *peer;
2722 struct ol_txrx_peer_t *temp_peer;
2723 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002724 bool wait_on_deletion = false;
2725 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002726 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302727 bool cmp_wait_mac = false;
2728 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002729
2730 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002731 TXRX_ASSERT2(vdev);
2732 TXRX_ASSERT2(peer_mac_addr);
2733
Dhanashri Atre12a08392016-02-17 13:10:34 -08002734 pdev = vdev->pdev;
2735 TXRX_ASSERT2(pdev);
2736
Abhishek Singh217d9782017-04-28 23:49:11 +05302737 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2738 QDF_MAC_ADDR_SIZE))
2739 cmp_wait_mac = true;
2740
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302741 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002742 /* check for duplicate existing peer */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002743 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2744 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2745 (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302746 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002747 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exists.\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002748 vdev->vdev_id,
2749 peer_mac_addr[0], peer_mac_addr[1],
2750 peer_mac_addr[2], peer_mac_addr[3],
2751 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302752 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002753 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002754 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002755 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302756 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002757 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302758 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002759 return NULL;
2760 }
2761 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302762 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2763 &temp_peer->mac_addr,
2764 &vdev->last_peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302765 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002766 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exists.\n",
Abhishek Singh217d9782017-04-28 23:49:11 +05302767 vdev->vdev_id,
2768 vdev->last_peer_mac_addr.raw[0],
2769 vdev->last_peer_mac_addr.raw[1],
2770 vdev->last_peer_mac_addr.raw[2],
2771 vdev->last_peer_mac_addr.raw[3],
2772 vdev->last_peer_mac_addr.raw[4],
2773 vdev->last_peer_mac_addr.raw[5]);
2774 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2775 vdev->wait_on_peer_id = temp_peer->local_id;
2776 qdf_event_reset(&vdev->wait_delete_comp);
2777 wait_on_deletion = true;
2778 break;
2779 } else {
2780 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2781 ol_txrx_err("peer not found");
2782 return NULL;
2783 }
2784 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002785 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302786 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002787
Abhishek Singh217d9782017-04-28 23:49:11 +05302788 qdf_mem_zero(&vdev->last_peer_mac_addr,
2789 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002790 if (wait_on_deletion) {
2791 /* wait for peer deletion */
Nachiket Kukade0396b732017-11-14 16:35:16 +05302792 rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002793 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002794 if (QDF_STATUS_SUCCESS != rc) {
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002795 ol_txrx_err("error waiting for peer_id(%d) deletion, status %d\n",
Dustin Brown100201e2017-07-10 11:48:40 -07002796 vdev->wait_on_peer_id, (int) rc);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002797 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08002798 ol_txrx_dump_peer_access_list(temp_peer);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002799 wlan_roam_debug_dump_table();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002800 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Dustin Brown100201e2017-07-10 11:48:40 -07002801
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002802 return NULL;
2803 }
2804 }
2805
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302806 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002807 if (!peer)
2808 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002809
2810 /* store provided params */
2811 peer->vdev = vdev;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302812 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002813 OL_TXRX_MAC_ADDR_LEN);
2814
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302815 ol_txrx_peer_txqs_init(pdev, peer);
2816
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002817 INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302818 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002819 /* add this peer into the vdev's list */
2820 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302821 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002822 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
Frank Liu4362e462018-01-16 11:51:55 +08002823 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2824 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002825 vdev->last_real_peer = peer;
Frank Liu4362e462018-01-16 11:51:55 +08002826 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2827 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002828
2829 peer->rx_opt_proc = pdev->rx_opt_proc;
2830
2831 ol_rx_peer_init(pdev, peer);
2832
2833 /* initialize the peer_id */
2834 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2835 peer->peer_ids[i] = HTT_INVALID_PEER;
2836
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302837 qdf_spinlock_create(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002838 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2839
2840 peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002841
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302842 qdf_atomic_init(&peer->delete_in_progress);
2843 qdf_atomic_init(&peer->flush_in_progress);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302844 qdf_atomic_init(&peer->ref_cnt);
Mohit Khannab7bec722017-11-10 11:43:44 -08002845
2846 for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2847 qdf_atomic_init(&peer->access_list[i]);
2848
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002849 /* keep one reference for attach */
Mohit Khannab7bec722017-11-10 11:43:44 -08002850 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002851
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002852 /* Set a flag to indicate peer create is pending in firmware */
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002853 qdf_atomic_init(&peer->fw_create_pending);
2854 qdf_atomic_set(&peer->fw_create_pending, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002855
2856 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002857 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2858 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002859
2860 ol_txrx_peer_find_hash_add(pdev, peer);
2861
Mohit Khanna47384bc2016-08-15 15:37:05 -07002862 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002863 "vdev %pK created peer %pK ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Mohit Khanna47384bc2016-08-15 15:37:05 -07002864 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002865 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2866 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2867 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2868 /*
2869 * For every peer MAp message search and set if bss_peer
2870 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002871 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2872 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002873 peer->bss_peer = 1;
2874
2875 /*
2876 * The peer starts in the "disc" state while association is in progress.
2877 * Once association completes, the peer will get updated to "auth" state
2878 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2879 * or else to the "conn" state. For non-open mode, the peer will
2880 * progress to "auth" state once the authentication completes.
2881 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002882 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002883 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002884 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002885
2886#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2887 peer->rssi_dbm = HTT_RSSI_INVALID;
2888#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002889 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2890 !pdev->self_peer) {
2891 pdev->self_peer = peer;
2892 /*
2893 * No Tx in monitor mode, otherwise results in target assert.
2894 * Setting disable_intrabss_fwd to true
2895 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002896 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002897 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002898
2899 ol_txrx_local_peer_id_alloc(pdev, peer);
2900
Leo Chang98726762016-10-28 11:07:18 -07002901 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002902}
2903
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302904#undef PEER_DEL_TIMEOUT
2905
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002906/*
2907 * Discarding tx filter - removes all data frames (disconnected state)
2908 */
2909static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2910{
2911 return A_ERROR;
2912}
2913
2914/*
2915 * Non-autentication tx filter - filters out data frames that are not
2916 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2917 * data frames (connected state)
2918 */
2919static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2920{
2921 return
2922 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2923 tx_msdu_info->htt.info.ethertype ==
2924 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2925}
2926
2927/*
2928 * Pass-through tx filter - lets all data frames through (authenticated state)
2929 */
2930static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2931{
2932 return A_OK;
2933}
2934
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002935/**
2936 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2937 * @peer: handle to peer
2938 *
2939 * returns mac addrs for module which do not know peer type
2940 *
2941 * Return: the mac_addr from peer
2942 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002943static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002944ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002945{
Leo Chang98726762016-10-28 11:07:18 -07002946 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002947
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002948 if (!peer)
2949 return NULL;
2950
2951 return peer->mac_addr.raw;
2952}
2953
Abhishek Singhcfb44482017-03-10 12:42:37 +05302954#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002955/**
2956 * ol_txrx_get_pn_info() - Returns pn info from peer
2957 * @peer: handle to peer
2958 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2959 * @last_pn: return last_rmf_pn value from peer.
2960 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2961 *
2962 * Return: NONE
2963 */
2964void
Leo Chang98726762016-10-28 11:07:18 -07002965ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002966 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2967{
Leo Chang98726762016-10-28 11:07:18 -07002968 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002969 *last_pn_valid = &peer->last_rmf_pn_valid;
2970 *last_pn = &peer->last_rmf_pn;
2971 *rmf_pn_replays = &peer->rmf_pn_replays;
2972}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302973#else
2974void
2975ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2976 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2977{
2978}
2979#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002980
2981/**
2982 * ol_txrx_get_opmode() - Return operation mode of vdev
2983 * @vdev: vdev handle
2984 *
2985 * Return: operation mode.
2986 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002987static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002988{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002989 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002990
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002991 return vdev->opmode;
2992}
2993
2994/**
2995 * ol_txrx_get_peer_state() - Return peer state of peer
2996 * @peer: peer handle
2997 *
2998 * Return: return peer state
2999 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003000static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003001{
Leo Chang98726762016-10-28 11:07:18 -07003002 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003003
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003004 return peer->state;
3005}
3006
3007/**
3008 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
3009 * @peer: peer handle
3010 *
3011 * Return: vdev handle from peer
3012 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003013static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003014{
Leo Chang98726762016-10-28 11:07:18 -07003015 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003016
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003017 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003018}
3019
3020/**
3021 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
3022 * @vdev: vdev handle
3023 *
3024 * Return: vdev mac address
3025 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003026static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003027ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003028{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003029 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003030
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003031 if (!vdev)
3032 return NULL;
3033
3034 return vdev->mac_addr.raw;
3035}
3036
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003037#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003038/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003039 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003040 * vdev
3041 * @vdev: vdev handle
3042 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003043 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003044 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003045struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003046ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
3047{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003048 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003049}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003050#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003051
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003052#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003053/**
3054 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
3055 * @vdev: vdev handle
3056 *
3057 * Return: Handle to pdev
3058 */
3059ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
3060{
3061 return vdev->pdev;
3062}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003063#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003064
3065/**
3066 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
3067 * @vdev: vdev handle
3068 *
3069 * Return: Handle to control pdev
3070 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003071static struct cdp_cfg *
3072ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003073{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003074 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003075
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003076 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003077}
3078
3079/**
3080 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
3081 * @vdev: vdev handle
3082 *
3083 * Return: Rx Fwd disabled status
3084 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003085static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003086ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003087{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003088 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003089 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
3090 vdev->pdev->ctrl_pdev;
3091 return cfg->rx_fwd_disabled;
3092}
3093
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07003094#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003095/**
3096 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
3097 * @vdev: vdev handle
3098 * @peer_num_delta: peer nums to be adjusted
3099 *
3100 * Return: -1 for failure or total peer nums after adjustment.
3101 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003102static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003103ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003104 int16_t peer_num_delta)
3105{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003106 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003107 int16_t new_peer_num;
3108
3109 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07003110 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003111 return OL_TXRX_INVALID_NUM_PEERS;
3112
3113 vdev->ibss_peer_num = new_peer_num;
3114
3115 return new_peer_num;
3116}
3117
3118/**
3119 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
3120 * beat timer
3121 * @vdev: vdev handle
3122 * @timer_value_sec: new heart beat timer value
3123 *
3124 * Return: Old timer value set in vdev.
3125 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003126static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
3127 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003128{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003129 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003130 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
3131
3132 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
3133
3134 return old_timer_value;
3135}
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07003136#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003137
3138/**
3139 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
3140 * @vdev: vdev handle
3141 * @callback: callback function to remove the peer.
3142 * @callback_context: handle for callback function
3143 * @remove_last_peer: Does it required to last peer.
3144 *
3145 * Return: NONE
3146 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003147static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003148ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003149 ol_txrx_vdev_peer_remove_cb callback,
3150 void *callback_context, bool remove_last_peer)
3151{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003152 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003153 ol_txrx_peer_handle peer, temp;
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003154 int self_removed = 0;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003155 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003156 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003157
3158 temp = NULL;
3159 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
3160 peer_list_elem) {
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05303161 if (qdf_atomic_read(&peer->delete_in_progress))
3162 continue;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003163 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003164 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05303165 callback(callback_context, temp->mac_addr.raw,
Jiachao Wu641760e2018-01-21 12:11:31 +08003166 vdev->vdev_id, temp);
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003167 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003168 }
3169 /* self peer is deleted last */
3170 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003171 self_removed = 1;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003172 break;
Yun Parkeaea8632017-04-09 09:53:45 -07003173 }
3174 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003175 }
3176
Mohit Khanna137b97d2016-04-21 16:11:33 -07003177 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
3178
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003179 if (self_removed)
3180 ol_txrx_info("%s: self peer removed by caller ",
3181 __func__);
3182
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003183 if (remove_last_peer) {
3184 /* remove IBSS bss peer last */
3185 peer = TAILQ_FIRST(&vdev->peer_list);
3186 callback(callback_context, (uint8_t *) &vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08003187 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003188 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003189}
3190
3191/**
3192 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
3193 * @vdev: vdev handle
3194 * @callback: callback function to remove the peer.
3195 * @callback_context: handle for callback function
3196 *
3197 * Return: NONE
3198 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003199static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003200ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003201 ol_txrx_vdev_peer_remove_cb callback,
3202 void *callback_context)
3203{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003204 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003205 ol_txrx_peer_handle peer = NULL;
Jiachao Wu641760e2018-01-21 12:11:31 +08003206 ol_txrx_peer_handle tmp_peer = NULL;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003207
Jiachao Wu641760e2018-01-21 12:11:31 +08003208 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, peer_list_elem, tmp_peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05303209 ol_txrx_info_high(
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003210 "%s: peer found for vdev id %d. deleting the peer",
3211 __func__, vdev->vdev_id);
3212 callback(callback_context, (uint8_t *)&vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08003213 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003214 }
3215}
3216
3217/**
3218 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
3219 * @vdev: vdev handle
3220 * @ocb_set_chan: OCB channel information to be set in vdev.
3221 *
3222 * Return: NONE
3223 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003224static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003225 struct ol_txrx_ocb_set_chan ocb_set_chan)
3226{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003227 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003228
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003229 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
3230 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
3231}
3232
3233/**
3234 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
3235 * @vdev: vdev handle
3236 *
3237 * Return: handle to struct ol_txrx_ocb_chan_info
3238 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003239static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003240ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003241{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003242 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003243
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003244 return vdev->ocb_channel_info;
3245}
3246
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003247/**
3248 * @brief specify the peer's authentication state
3249 * @details
3250 * Specify the peer's authentication state (none, connected, authenticated)
3251 * to allow the data SW to determine whether to filter out invalid data frames.
3252 * (In the "connected" state, where security is enabled, but authentication
3253 * has not completed, tx and rx data frames other than EAPOL or WAPI should
3254 * be discarded.)
3255 * This function is only relevant for systems in which the tx and rx filtering
3256 * are done in the host rather than in the target.
3257 *
3258 * @param data_peer - which peer has changed its state
3259 * @param state - the new state of the peer
3260 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003261 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003262 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003263QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003264 uint8_t *peer_mac,
3265 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003266{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003267 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003268 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003269 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003270
Anurag Chouhanc5548422016-02-24 18:33:27 +05303271 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303272 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05303273 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303274 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003275 }
3276
Mohit Khannab7bec722017-11-10 11:43:44 -08003277 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
3278 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003279 if (NULL == peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303280 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303281 "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
3282 __func__,
3283 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
3284 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303285 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003286 }
3287
3288 /* TODO: Should we send WMI command of the connection state? */
3289 /* avoid multiple auth state change. */
3290 if (peer->state == state) {
3291#ifdef TXRX_PRINT_VERBOSE_ENABLE
Poddar, Siddarth14521792017-03-14 21:19:42 +05303292 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003293 "%s: no state change, returns directly\n",
3294 __func__);
3295#endif
Mohit Khannab7bec722017-11-10 11:43:44 -08003296 peer_ref_cnt = ol_txrx_peer_release_ref
3297 (peer,
3298 PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303299 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003300 }
3301
Poddar, Siddarth14521792017-03-14 21:19:42 +05303302 ol_txrx_dbg("%s: change from %d to %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003303 __func__, peer->state, state);
3304
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003305 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003306 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003307 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003308 ? ol_tx_filter_non_auth
3309 : ol_tx_filter_discard);
3310
3311 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003312 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003313 int tid;
3314 /*
3315 * Pause all regular (non-extended) TID tx queues until
3316 * data arrives and ADDBA negotiation has completed.
3317 */
Poddar, Siddarth14521792017-03-14 21:19:42 +05303318 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003319 "%s: pause peer and unpause mgmt/non-qos\n",
3320 __func__);
3321 ol_txrx_peer_pause(peer); /* pause all tx queues */
3322 /* unpause mgmt and non-QoS tx queues */
3323 for (tid = OL_TX_NUM_QOS_TIDS;
3324 tid < OL_TX_NUM_TIDS; tid++)
3325 ol_txrx_peer_tid_unpause(peer, tid);
3326 }
3327 }
Mohit Khannab7bec722017-11-10 11:43:44 -08003328 peer_ref_cnt = ol_txrx_peer_release_ref(peer,
3329 PEER_DEBUG_ID_OL_INTERNAL);
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003330 /*
Mohit Khannab7bec722017-11-10 11:43:44 -08003331 * after ol_txrx_peer_release_ref, peer object cannot be accessed
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003332 * if the return code was 0
3333 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003334 if (peer_ref_cnt > 0)
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003335 /*
3336 * Set the state after the Pause to avoid the race condiction
3337 * with ADDBA check in tx path
3338 */
3339 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303340 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003341}
3342
3343void
3344ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
3345{
3346 peer->keyinstalled = val;
3347}
3348
3349void
3350ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
3351 uint8_t *peer_mac,
3352 union ol_txrx_peer_update_param_t *param,
3353 enum ol_txrx_peer_update_select_t select)
3354{
3355 struct ol_txrx_peer_t *peer;
3356
Mohit Khannab7bec722017-11-10 11:43:44 -08003357 peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
3358 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003359 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303360 ol_txrx_dbg("%s: peer is null",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003361 __func__);
3362 return;
3363 }
3364
3365 switch (select) {
3366 case ol_txrx_peer_update_qos_capable:
3367 {
3368 /* save qos_capable here txrx peer,
3369 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
3370 */
3371 peer->qos_capable = param->qos_capable;
3372 /*
3373 * The following function call assumes that the peer has a
3374 * single ID. This is currently true, and
3375 * is expected to remain true.
3376 */
3377 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
3378 peer->peer_ids[0],
3379 peer->qos_capable);
3380 break;
3381 }
3382 case ol_txrx_peer_update_uapsdMask:
3383 {
3384 peer->uapsd_mask = param->uapsd_mask;
3385 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
3386 peer->peer_ids[0],
3387 peer->uapsd_mask);
3388 break;
3389 }
3390 case ol_txrx_peer_update_peer_security:
3391 {
3392 enum ol_sec_type sec_type = param->sec_type;
3393 enum htt_sec_type peer_sec_type = htt_sec_type_none;
3394
3395 switch (sec_type) {
3396 case ol_sec_type_none:
3397 peer_sec_type = htt_sec_type_none;
3398 break;
3399 case ol_sec_type_wep128:
3400 peer_sec_type = htt_sec_type_wep128;
3401 break;
3402 case ol_sec_type_wep104:
3403 peer_sec_type = htt_sec_type_wep104;
3404 break;
3405 case ol_sec_type_wep40:
3406 peer_sec_type = htt_sec_type_wep40;
3407 break;
3408 case ol_sec_type_tkip:
3409 peer_sec_type = htt_sec_type_tkip;
3410 break;
3411 case ol_sec_type_tkip_nomic:
3412 peer_sec_type = htt_sec_type_tkip_nomic;
3413 break;
3414 case ol_sec_type_aes_ccmp:
3415 peer_sec_type = htt_sec_type_aes_ccmp;
3416 break;
3417 case ol_sec_type_wapi:
3418 peer_sec_type = htt_sec_type_wapi;
3419 break;
3420 default:
3421 peer_sec_type = htt_sec_type_none;
3422 break;
3423 }
3424
3425 peer->security[txrx_sec_ucast].sec_type =
3426 peer->security[txrx_sec_mcast].sec_type =
3427 peer_sec_type;
3428
3429 break;
3430 }
3431 default:
3432 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303433 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003434 "ERROR: unknown param %d in %s", select,
3435 __func__);
3436 break;
3437 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003438 } /* switch */
Mohit Khannab7bec722017-11-10 11:43:44 -08003439 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003440}
3441
3442uint8_t
3443ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3444{
3445
3446 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07003447
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003448 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3449 if (peer)
3450 return peer->uapsd_mask;
3451 return 0;
3452}
3453
3454uint8_t
3455ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3456{
3457
3458 struct ol_txrx_peer_t *peer_t =
3459 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3460 if (peer_t != NULL)
3461 return peer_t->qos_capable;
3462 return 0;
3463}
3464
Mohit Khannab7bec722017-11-10 11:43:44 -08003465/**
Mohit Khannab7bec722017-11-10 11:43:44 -08003466 * ol_txrx_peer_free_tids() - free tids for the peer
3467 * @peer: peer handle
3468 *
3469 * Return: None
3470 */
3471static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
3472{
3473 int i = 0;
3474 /*
3475 * 'array' is allocated in addba handler and is supposed to be
3476 * freed in delba handler. There is the case (for example, in
3477 * SSR) where delba handler is not called. Because array points
3478 * to address of 'base' by default and is reallocated in addba
3479 * handler later, only free the memory when the array does not
3480 * point to base.
3481 */
3482 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
3483 if (peer->tids_rx_reorder[i].array !=
3484 &peer->tids_rx_reorder[i].base) {
3485 ol_txrx_dbg(
3486 "%s, delete reorder arr, tid:%d\n",
3487 __func__, i);
3488 qdf_mem_free(peer->tids_rx_reorder[i].array);
3489 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
3490 (uint8_t)i);
3491 }
3492 }
3493}
3494
3495/**
3496 * ol_txrx_peer_release_ref() - release peer reference
3497 * @peer: peer handle
3498 *
3499 * Release peer reference and delete peer if refcount is 0
3500 *
wadesong9f2b1102017-12-20 22:58:35 +08003501 * Return: Resulting peer ref_cnt after this function is invoked
Mohit Khannab7bec722017-11-10 11:43:44 -08003502 */
3503int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
3504 enum peer_debug_id_type debug_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003505{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003506 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003507 struct ol_txrx_vdev_t *vdev;
3508 struct ol_txrx_pdev_t *pdev;
Jingxiang Ge3badb982018-01-02 17:39:01 +08003509 bool ref_silent = false;
Jingxiang Ge190679b2018-01-30 08:56:19 +08003510 int access_list = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003511
3512 /* preconditions */
3513 TXRX_ASSERT2(peer);
3514
3515 vdev = peer->vdev;
3516 if (NULL == vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303517 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003518 "The vdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003519 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003520 }
3521
3522 pdev = vdev->pdev;
3523 if (NULL == pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303524 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003525 "The pdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003526 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003527 }
3528
Mohit Khannab7bec722017-11-10 11:43:44 -08003529 if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
3530 ol_txrx_err("incorrect debug_id %d ", debug_id);
3531 return -EINVAL;
3532 }
3533
Jingxiang Ge3badb982018-01-02 17:39:01 +08003534 if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
3535 ref_silent = true;
3536
3537 if (!ref_silent)
3538 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3539 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3540 peer, 0,
3541 qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003542
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003543
3544 /*
3545 * Hold the lock all the way from checking if the peer ref count
3546 * is zero until the peer references are removed from the hash
3547 * table and vdev list (if the peer ref count is zero).
3548 * This protects against a new HL tx operation starting to use the
3549 * peer object just after this function concludes it's done being used.
3550 * Furthermore, the lock needs to be held while checking whether the
3551 * vdev's list of peers is empty, to make sure that list is not modified
3552 * concurrently with the empty check.
3553 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303554 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003555
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003556 /*
3557 * Check for the reference count before deleting the peer
3558 * as we noticed that sometimes we are re-entering this
3559 * function again which is leading to dead-lock.
3560 * (A double-free should never happen, so assert if it does.)
3561 */
3562 rc = qdf_atomic_read(&(peer->ref_cnt));
3563
3564 if (rc == 0) {
3565 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3566 ol_txrx_err("The Peer is not present anymore\n");
3567 qdf_assert(0);
3568 return -EACCES;
3569 }
3570 /*
3571 * now decrement rc; this will be the return code.
3572 * 0 : peer deleted
3573 * >0: peer ref removed, but still has other references
3574 * <0: sanity failed - no changes to the state of the peer
3575 */
3576 rc--;
3577
Mohit Khannab7bec722017-11-10 11:43:44 -08003578 if (!qdf_atomic_read(&peer->access_list[debug_id])) {
3579 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
jitiphil8ad8a6f2018-03-01 23:45:05 +05303580 ol_txrx_err("peer %pK ref was not taken by %d",
Mohit Khannab7bec722017-11-10 11:43:44 -08003581 peer, debug_id);
3582 ol_txrx_dump_peer_access_list(peer);
3583 QDF_BUG(0);
3584 return -EACCES;
3585 }
Mohit Khannab7bec722017-11-10 11:43:44 -08003586 qdf_atomic_dec(&peer->access_list[debug_id]);
3587
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07003588 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Mohit Khannab7bec722017-11-10 11:43:44 -08003589 u16 peer_id;
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003590 wlan_roam_debug_log(vdev->vdev_id,
3591 DEBUG_DELETING_PEER_OBJ,
3592 DEBUG_INVALID_PEER_ID,
3593 &peer->mac_addr.raw, peer, 0,
3594 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003595 peer_id = peer->local_id;
3596 /* remove the reference to the peer from the hash table */
3597 ol_txrx_peer_find_hash_remove(pdev, peer);
3598
3599 /* remove the peer from its parent vdev's list */
3600 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3601
3602 /* cleanup the Rx reorder queues for this peer */
3603 ol_rx_peer_cleanup(vdev, peer);
3604
Jingxiang Ge3badb982018-01-02 17:39:01 +08003605 qdf_spinlock_destroy(&peer->peer_info_lock);
3606 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
3607
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003608 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303609 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003610
3611 /*
3612 * Set wait_delete_comp event if the current peer id matches
3613 * with registered peer id.
3614 */
3615 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303616 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003617 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3618 }
3619
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003620 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3621 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003622
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003623 /* check whether the parent vdev has no peers left */
3624 if (TAILQ_EMPTY(&vdev->peer_list)) {
3625 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003626 * Check if the parent vdev was waiting for its peers
3627 * to be deleted, in order for it to be deleted too.
3628 */
3629 if (vdev->delete.pending) {
3630 ol_txrx_vdev_delete_cb vdev_delete_cb =
3631 vdev->delete.callback;
3632 void *vdev_delete_context =
3633 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303634 /*
3635 * Now that there are no references to the peer,
3636 * we can release the peer reference lock.
3637 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303638 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303639
gbian016a42e2017-03-01 18:49:11 +08003640 /*
Yun Parkeaea8632017-04-09 09:53:45 -07003641 * The ol_tx_desc_free might access the invalid
3642 * content of vdev referred by tx desc, since
3643 * this vdev might be detached in another thread
3644 * asynchronous.
3645 *
3646 * Go through tx desc pool to set corresponding
3647 * tx desc's vdev to NULL when detach this vdev,
3648 * and add vdev checking in the ol_tx_desc_free
3649 * to avoid crash.
3650 */
gbian016a42e2017-03-01 18:49:11 +08003651 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05303652 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003653 "%s: deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
Yun Parkeaea8632017-04-09 09:53:45 -07003654 __func__, vdev,
3655 vdev->mac_addr.raw[0],
3656 vdev->mac_addr.raw[1],
3657 vdev->mac_addr.raw[2],
3658 vdev->mac_addr.raw[3],
3659 vdev->mac_addr.raw[4],
3660 vdev->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003661 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303662 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003663 if (vdev_delete_cb)
3664 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303665 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303666 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003667 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303668 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303669 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303670 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003671
jitiphil8ad8a6f2018-03-01 23:45:05 +05303672 ol_txrx_info_high("[%d][%d]: Deleting peer %pK ref_cnt -> %d %s",
Mohit Khannab7bec722017-11-10 11:43:44 -08003673 debug_id,
3674 qdf_atomic_read(&peer->access_list[debug_id]),
3675 peer, rc,
3676 qdf_atomic_read(&peer->fw_create_pending)
3677 == 1 ?
3678 "(No Maps received)" : "");
Mohit Khanna8ee37c62017-08-07 17:15:20 -07003679
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303680 ol_txrx_peer_tx_queue_free(pdev, peer);
3681
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -07003682 /* Remove mappings from peer_id to peer object */
3683 ol_txrx_peer_clear_map_peer(pdev, peer);
3684
wadesong9f2b1102017-12-20 22:58:35 +08003685 /* Remove peer pointer from local peer ID map */
3686 ol_txrx_local_peer_id_free(pdev, peer);
3687
Mohit Khannab7bec722017-11-10 11:43:44 -08003688 ol_txrx_peer_free_tids(peer);
3689
3690 ol_txrx_dump_peer_access_list(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003691
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303692 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003693 } else {
Jingxiang Ge190679b2018-01-30 08:56:19 +08003694 access_list = qdf_atomic_read(
3695 &peer->access_list[debug_id]);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303696 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Jingxiang Ge3badb982018-01-02 17:39:01 +08003697 if (!ref_silent)
jitiphil8ad8a6f2018-03-01 23:45:05 +05303698 ol_txrx_info_high("[%d][%d]: ref delete peer %pK ref_cnt -> %d",
Jingxiang Ge3badb982018-01-02 17:39:01 +08003699 debug_id,
Jingxiang Ge190679b2018-01-30 08:56:19 +08003700 access_list,
Jingxiang Ge3badb982018-01-02 17:39:01 +08003701 peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003702 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003703 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003704}
3705
Dhanashri Atre12a08392016-02-17 13:10:34 -08003706/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003707 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3708 * @peer: pointer to ol txrx peer structure
3709 *
3710 * Return: QDF Status
3711 */
3712static QDF_STATUS
3713ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3714{
3715 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3716 /* Drop pending Rx frames in CDS */
3717 if (sched_ctx)
3718 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3719
3720 /* Purge the cached rx frame queue */
3721 ol_txrx_flush_rx_frames(peer, 1);
3722
3723 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003724 peer->state = OL_TXRX_PEER_STATE_DISC;
3725 qdf_spin_unlock_bh(&peer->peer_info_lock);
3726
3727 return QDF_STATUS_SUCCESS;
3728}
3729
3730/**
3731 * ol_txrx_clear_peer() - clear peer
3732 * @sta_id: sta id
3733 *
3734 * Return: QDF Status
3735 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003736static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003737{
3738 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003739 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003740
3741 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303742 ol_txrx_err("%s: Unable to find pdev!",
Mohit Khanna0696eef2016-04-14 16:14:08 -07003743 __func__);
3744 return QDF_STATUS_E_FAILURE;
3745 }
3746
3747 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303748 ol_txrx_err("Invalid sta id %d", sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003749 return QDF_STATUS_E_INVAL;
3750 }
3751
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003752 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Kabilan Kannanfa163982018-01-30 12:03:41 -08003753
3754 /* Return success, if the peer is already cleared by
3755 * data path via peer detach function.
3756 */
Mohit Khanna0696eef2016-04-14 16:14:08 -07003757 if (!peer)
Kabilan Kannanfa163982018-01-30 12:03:41 -08003758 return QDF_STATUS_SUCCESS;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003759
3760 return ol_txrx_clear_peer_internal(peer);
3761
3762}
3763
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003764void peer_unmap_timer_work_function(void *param)
3765{
3766 WMA_LOGE("Enter: %s", __func__);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003767 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08003768 ol_txrx_dump_peer_access_list(param);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003769 wlan_roam_debug_dump_table();
Anurag Chouhan4085ff72017-10-05 18:09:56 +05303770 cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003771}
3772
Mohit Khanna0696eef2016-04-14 16:14:08 -07003773/**
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003774 * peer_unmap_timer_handler() - peer unmap timer function
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003775 * @data: peer object pointer
3776 *
3777 * Return: none
3778 */
3779void peer_unmap_timer_handler(void *data)
3780{
3781 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003782 ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003783
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003784 ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003785 peer, qdf_atomic_read(&peer->ref_cnt));
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003786 ol_txrx_err("peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003787 peer,
3788 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3789 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3790 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Nachiket Kukadea48fd772017-07-28 18:48:57 +05303791 if (!cds_is_driver_recovering() && !cds_is_fw_down()) {
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003792 qdf_create_work(0, &txrx_pdev->peer_unmap_timer_work,
3793 peer_unmap_timer_work_function,
Naveen Rawat17c42a82018-02-01 19:18:27 -08003794 peer);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003795 qdf_sched_work(0, &txrx_pdev->peer_unmap_timer_work);
Deepak Dhamdhered42ab7c2017-04-13 19:32:16 -07003796 } else {
3797 ol_txrx_err("Recovery is in progress, ignore!");
3798 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003799}
3800
3801
3802/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003803 * ol_txrx_peer_detach() - Delete a peer's data object.
3804 * @peer - the object to detach
Naveen Rawatf4ada152017-09-05 14:56:12 -07003805 * @bitmap - bitmap indicating special handling of request.
Dhanashri Atre12a08392016-02-17 13:10:34 -08003806 *
3807 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003808 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003809 * stored in the control peer object to the data peer
3810 * object (set up by a call to ol_peer_store()) is provided.
3811 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003812 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003813 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003814static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003815{
Leo Chang98726762016-10-28 11:07:18 -07003816 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003817 struct ol_txrx_vdev_t *vdev = peer->vdev;
3818
3819 /* redirect peer's rx delivery function to point to a discard func */
3820 peer->rx_opt_proc = ol_rx_discard;
3821
3822 peer->valid = 0;
3823
Mohit Khanna0696eef2016-04-14 16:14:08 -07003824 /* flush all rx packets before clearing up the peer local_id */
3825 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003826
3827 /* debug print to dump rx reorder state */
3828 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3829
Poddar, Siddarth14521792017-03-14 21:19:42 +05303830 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003831 "%s:peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003832 __func__, peer,
3833 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3834 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3835 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003836
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303837 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003838 if (vdev->last_real_peer == peer)
3839 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303840 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003841 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3842
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003843 /*
3844 * set delete_in_progress to identify that wma
3845 * is waiting for unmap massage for this peer
3846 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303847 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003848
Lin Bai973e6922018-01-08 17:59:19 +08003849 if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
Naveen Rawatf4ada152017-09-05 14:56:12 -07003850 if (vdev->opmode == wlan_op_mode_sta) {
3851 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3852 &peer->mac_addr,
3853 sizeof(union ol_txrx_align_mac_addr_t));
Abhishek Singh217d9782017-04-28 23:49:11 +05303854
Lin Bai973e6922018-01-08 17:59:19 +08003855 /*
3856 * Create a timer to track unmap events when the
3857 * sta peer gets deleted.
3858 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003859 qdf_timer_start(&peer->peer_unmap_timer,
3860 OL_TXRX_PEER_UNMAP_TIMEOUT);
Mohit Khannab7bec722017-11-10 11:43:44 -08003861 ol_txrx_info_high
3862 ("started peer_unmap_timer for peer %pK",
3863 peer);
Naveen Rawatf4ada152017-09-05 14:56:12 -07003864 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003865 }
3866
3867 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003868 * Remove the reference added during peer_attach.
3869 * The peer will still be left allocated until the
3870 * PEER_UNMAP message arrives to remove the other
3871 * reference, added by the PEER_MAP message.
3872 */
Mohit Khannab7bec722017-11-10 11:43:44 -08003873 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003874}
3875
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003876/**
3877 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
Lin Bai973e6922018-01-08 17:59:19 +08003878 * @ppeer - the object to detach
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003879 *
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003880 * Detach a peer and force peer object to be removed. It is called during
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003881 * roaming scenario when the firmware has already deleted a peer.
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003882 * Remove it from the peer_id_to_object map. Peer object is actually freed
3883 * when last reference is deleted.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003884 *
3885 * Return: None
3886 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003887static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003888{
Leo Chang98726762016-10-28 11:07:18 -07003889 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003890 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3891
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003892 ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d",
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003893 __func__, peer, qdf_atomic_read(&peer->ref_cnt));
3894
3895 /* Clear the peer_id_to_obj map entries */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003896 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
Lin Bai973e6922018-01-08 17:59:19 +08003897 ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003898}
3899
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003900/**
3901 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3902 * @txrx_pdev: Pointer to txrx pdev
3903 *
3904 * Return: none
3905 */
3906static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3907{
3908 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003909 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003910
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303911 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3912 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3913 else
3914 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003915
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003916 num_free = ol_tx_get_total_free_desc(pdev);
3917
Kapil Gupta53d9b572017-06-28 17:53:25 +05303918 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303919 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003920 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003921
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003922}
3923
3924/**
3925 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3926 * @timeout: timeout in ms
3927 *
3928 * Wait for tx queue to be empty, return timeout error if
3929 * queue doesn't empty before timeout occurs.
3930 *
3931 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303932 * QDF_STATUS_SUCCESS if the queue empties,
3933 * QDF_STATUS_E_TIMEOUT in case of timeout,
3934 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003935 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003936static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003937{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003938 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003939
3940 if (txrx_pdev == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303941 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003942 "%s: txrx context is null", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303943 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003944 }
3945
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003946 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303947 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003948 if (timeout <= 0) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303949 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303950 "%s: tx frames are pending", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003951 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303952 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003953 }
3954 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3955 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303956 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003957}
3958
3959#ifndef QCA_WIFI_3_0_EMU
Himanshu Agarwal83a87572017-05-25 14:09:50 +05303960#define SUSPEND_DRAIN_WAIT 500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003961#else
3962#define SUSPEND_DRAIN_WAIT 3000
3963#endif
3964
Yue Ma1e11d792016-02-26 18:58:44 -08003965#ifdef FEATURE_RUNTIME_PM
3966/**
3967 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3968 * @txrx_pdev: TXRX pdev context
3969 *
3970 * TXRX is ready to runtime suspend if there are no pending packets
3971 * in the tx queue.
3972 *
3973 * Return: QDF_STATUS
3974 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003975static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003976{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003977 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003978
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003979 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003980 return QDF_STATUS_E_BUSY;
3981 else
3982 return QDF_STATUS_SUCCESS;
3983}
3984
3985/**
3986 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3987 * @txrx_pdev: TXRX pdev context
3988 *
3989 * This is a dummy function for symmetry.
3990 *
3991 * Return: QDF_STATUS_SUCCESS
3992 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003993static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003994{
3995 return QDF_STATUS_SUCCESS;
3996}
3997#endif
3998
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003999/**
4000 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07004001 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004002 *
4003 * Ensure that ol_txrx is ready for bus suspend
4004 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304005 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004006 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07004007static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004008{
4009 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
4010}
4011
4012/**
4013 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07004014 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004015 *
4016 * Dummy function for symetry
4017 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304018 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004019 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07004020static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004021{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304022 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004023}
4024
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004025/**
4026 * ol_txrx_get_tx_pending - Get the number of pending transmit
4027 * frames that are awaiting completion.
4028 *
4029 * @pdev - the data physical device object
4030 * Mainly used in clean up path to make sure all buffers have been freed
4031 *
4032 * Return: count of pending frames
4033 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004034int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004035{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004036 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004037 uint32_t total;
4038
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304039 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
4040 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
4041 else
4042 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004043
Nirav Shah55b45a02016-01-21 10:00:16 +05304044 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004045}
4046
4047void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
4048{
4049 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07004050 /*
4051 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05304052 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07004053 * which is tha same with normal data send complete path
4054 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004055 htt_tx_pending_discard(pdev_handle->htt_pdev);
4056
4057 TAILQ_INIT(&tx_descs);
4058 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
4059 /* Discard Frames in Discard List */
4060 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
4061
4062 ol_tx_discard_target_frms(pdev_handle);
4063}
4064
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004065static inline
4066uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
4067{
4068 return (uint64_t) ((size_t) req);
4069}
4070
4071static inline
4072struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
4073{
4074 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
4075}
4076
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004077#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004078void
4079ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
4080 uint8_t cfg_stats_type, uint32_t cfg_val)
4081{
4082 uint64_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07004083
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004084 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
4085 0 /* reset mask */,
4086 cfg_stats_type, cfg_val, dummy_cookie);
4087}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004088#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004089
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004090static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004091ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07004092 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004093{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004094 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004095 struct ol_txrx_pdev_t *pdev = vdev->pdev;
4096 uint64_t cookie;
4097 struct ol_txrx_stats_req_internal *non_volatile_req;
4098
4099 if (!pdev ||
4100 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
4101 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
4102 return A_ERROR;
4103 }
4104
4105 /*
4106 * Allocate a non-transient stats request object.
4107 * (The one provided as an argument is likely allocated on the stack.)
4108 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304109 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004110 if (!non_volatile_req)
4111 return A_NO_MEMORY;
4112
4113 /* copy the caller's specifications */
4114 non_volatile_req->base = *req;
4115 non_volatile_req->serviced = 0;
4116 non_volatile_req->offset = 0;
4117
4118 /* use the non-volatile request object's address as the cookie */
4119 cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
4120
tfyu9fcabd72017-09-26 17:46:48 +08004121 if (response_expected) {
4122 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4123 TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
4124 pdev->req_list_depth++;
4125 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4126 }
4127
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004128 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
4129 req->stats_type_upload_mask,
4130 req->stats_type_reset_mask,
4131 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
4132 cookie)) {
tfyu9fcabd72017-09-26 17:46:48 +08004133 if (response_expected) {
4134 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4135 TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem);
4136 pdev->req_list_depth--;
4137 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4138 }
4139
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304140 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004141 return A_ERROR;
4142 }
4143
Nirav Shahd2310422016-01-21 18:58:06 +05304144 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304145 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05304146
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004147 return A_OK;
4148}
Dhanashri Atre12a08392016-02-17 13:10:34 -08004149
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004150void
4151ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
4152 uint64_t cookie, uint8_t *stats_info_list)
4153{
4154 enum htt_dbg_stats_type type;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004155 enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004156 enum htt_dbg_stats_status status;
4157 int length;
4158 uint8_t *stats_data;
tfyu9fcabd72017-09-26 17:46:48 +08004159 struct ol_txrx_stats_req_internal *req, *tmp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004160 int more = 0;
tfyu9fcabd72017-09-26 17:46:48 +08004161 int found = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004162
4163 req = ol_txrx_u64_to_stats_ptr(cookie);
4164
tfyu9fcabd72017-09-26 17:46:48 +08004165 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4166 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4167 if (req == tmp) {
4168 found = 1;
4169 break;
4170 }
4171 }
4172 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4173
4174 if (!found) {
4175 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05304176 "req(%pK) from firmware can't be found in the list\n", req);
tfyu9fcabd72017-09-26 17:46:48 +08004177 return;
4178 }
4179
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004180 do {
4181 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
4182 &length, &stats_data);
4183 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
4184 break;
4185 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
4186 status == HTT_DBG_STATS_STATUS_PARTIAL) {
4187 uint8_t *buf;
4188 int bytes = 0;
4189
4190 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
4191 more = 1;
4192 if (req->base.print.verbose || req->base.print.concise)
4193 /* provide the header along with the data */
4194 htt_t2h_stats_print(stats_info_list,
4195 req->base.print.concise);
4196
4197 switch (type) {
4198 case HTT_DBG_STATS_WAL_PDEV_TXRX:
4199 bytes = sizeof(struct wlan_dbg_stats);
4200 if (req->base.copy.buf) {
4201 int lmt;
4202
4203 lmt = sizeof(struct wlan_dbg_stats);
4204 if (req->base.copy.byte_limit < lmt)
4205 lmt = req->base.copy.byte_limit;
4206 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304207 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004208 }
4209 break;
4210 case HTT_DBG_STATS_RX_REORDER:
4211 bytes = sizeof(struct rx_reorder_stats);
4212 if (req->base.copy.buf) {
4213 int lmt;
4214
4215 lmt = sizeof(struct rx_reorder_stats);
4216 if (req->base.copy.byte_limit < lmt)
4217 lmt = req->base.copy.byte_limit;
4218 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304219 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004220 }
4221 break;
4222 case HTT_DBG_STATS_RX_RATE_INFO:
4223 bytes = sizeof(wlan_dbg_rx_rate_info_t);
4224 if (req->base.copy.buf) {
4225 int lmt;
4226
4227 lmt = sizeof(wlan_dbg_rx_rate_info_t);
4228 if (req->base.copy.byte_limit < lmt)
4229 lmt = req->base.copy.byte_limit;
4230 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304231 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004232 }
4233 break;
4234
4235 case HTT_DBG_STATS_TX_RATE_INFO:
4236 bytes = sizeof(wlan_dbg_tx_rate_info_t);
4237 if (req->base.copy.buf) {
4238 int lmt;
4239
4240 lmt = sizeof(wlan_dbg_tx_rate_info_t);
4241 if (req->base.copy.byte_limit < lmt)
4242 lmt = req->base.copy.byte_limit;
4243 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304244 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004245 }
4246 break;
4247
4248 case HTT_DBG_STATS_TX_PPDU_LOG:
4249 bytes = 0;
4250 /* TO DO: specify how many bytes are present */
4251 /* TO DO: add copying to the requestor's buf */
4252
4253 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07004254 bytes = sizeof(struct
4255 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004256 if (req->base.copy.buf) {
4257 int limit;
4258
Yun Parkeaea8632017-04-09 09:53:45 -07004259 limit = sizeof(struct
4260 rx_remote_buffer_mgmt_stats);
4261 if (req->base.copy.byte_limit < limit)
4262 limit = req->base.copy.
4263 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004264 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304265 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004266 }
4267 break;
4268
4269 case HTT_DBG_STATS_TXBF_INFO:
4270 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
4271 if (req->base.copy.buf) {
4272 int limit;
4273
Yun Parkeaea8632017-04-09 09:53:45 -07004274 limit = sizeof(struct
4275 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004276 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004277 limit = req->base.copy.
4278 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004279 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304280 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004281 }
4282 break;
4283
4284 case HTT_DBG_STATS_SND_INFO:
4285 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
4286 if (req->base.copy.buf) {
4287 int limit;
4288
Yun Parkeaea8632017-04-09 09:53:45 -07004289 limit = sizeof(struct
4290 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004291 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004292 limit = req->base.copy.
4293 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004294 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304295 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004296 }
4297 break;
4298
4299 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07004300 bytes = sizeof(struct
4301 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004302 if (req->base.copy.buf) {
4303 int limit;
4304
Yun Parkeaea8632017-04-09 09:53:45 -07004305 limit = sizeof(struct
4306 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004307 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004308 limit = req->base.copy.
4309 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004310 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304311 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004312 }
4313 break;
4314
4315 case HTT_DBG_STATS_ERROR_INFO:
4316 bytes =
4317 sizeof(struct wlan_dbg_wifi2_error_stats);
4318 if (req->base.copy.buf) {
4319 int limit;
4320
Yun Parkeaea8632017-04-09 09:53:45 -07004321 limit = sizeof(struct
4322 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004323 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004324 limit = req->base.copy.
4325 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004326 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304327 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004328 }
4329 break;
4330
4331 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
4332 bytes =
4333 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
4334 if (req->base.copy.buf) {
4335 int limit;
4336
4337 limit = sizeof(struct
4338 rx_txbf_musu_ndpa_pkts_stats);
4339 if (req->base.copy.byte_limit < limit)
4340 limit =
4341 req->base.copy.byte_limit;
4342 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304343 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004344 }
4345 break;
4346
4347 default:
4348 break;
4349 }
Yun Parkeaea8632017-04-09 09:53:45 -07004350 buf = req->base.copy.buf ?
4351 req->base.copy.buf : stats_data;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004352
4353 /* Not implemented for MCL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004354 if (req->base.callback.fp)
4355 req->base.callback.fp(req->base.callback.ctxt,
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004356 cmn_type, buf, bytes);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004357 }
4358 stats_info_list += length;
4359 } while (1);
4360
4361 if (!more) {
tfyu9fcabd72017-09-26 17:46:48 +08004362 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4363 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4364 if (req == tmp) {
4365 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
4366 pdev->req_list_depth--;
4367 qdf_mem_free(req);
4368 break;
4369 }
4370 }
4371 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004372 }
4373}
4374
4375#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
4376int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
4377{
4378 if (debug_specs & TXRX_DBG_MASK_OBJS) {
4379#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4380 ol_txrx_pdev_display(vdev->pdev, 0);
4381#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304382 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304383 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004384#endif
4385 }
Yun Parkeaea8632017-04-09 09:53:45 -07004386 if (debug_specs & TXRX_DBG_MASK_STATS)
Mohit Khannaca4173b2017-09-12 21:52:19 -07004387 ol_txrx_stats_display(vdev->pdev,
4388 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004389 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
4390#if defined(ENABLE_TXRX_PROT_ANALYZE)
4391 ol_txrx_prot_ans_display(vdev->pdev);
4392#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304393 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304394 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004395#endif
4396 }
4397 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
4398#if defined(ENABLE_RX_REORDER_TRACE)
4399 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4400#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304401 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304402 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004403#endif
4404
4405 }
4406 return 0;
4407}
4408#endif
4409
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004410#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004411int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4412 int max_subfrms_ampdu, int max_subfrms_amsdu)
4413{
4414 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4415 max_subfrms_ampdu, max_subfrms_amsdu);
4416}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004417#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004418
4419#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4420void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4421{
4422 struct ol_txrx_vdev_t *vdev;
4423
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304424 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004425 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304426 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004427 "%*spdev object: %pK", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304428 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004429 "%*svdev list:", indent + 4, " ");
4430 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304431 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004432 }
4433 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304434 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004435 "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004436 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304437 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004438 htt_display(pdev->htt_pdev, indent);
4439}
4440
4441void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4442{
4443 struct ol_txrx_peer_t *peer;
4444
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304445 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004446 "%*stxrx vdev: %pK\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304447 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004448 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304449 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004450 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4451 indent + 4, " ",
4452 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4453 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4454 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304455 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004456 "%*speer list:", indent + 4, " ");
4457 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304458 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004459 }
4460}
4461
4462void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4463{
4464 int i;
4465
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304466 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004467 "%*stxrx peer: %pK", indent, " ", peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004468 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4469 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304470 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004471 "%*sID: %d", indent + 4, " ",
4472 peer->peer_ids[i]);
4473 }
4474 }
4475}
4476#endif /* TXRX_DEBUG_LEVEL */
4477
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004478/**
4479 * ol_txrx_stats() - update ol layer stats
4480 * @vdev_id: vdev_id
4481 * @buffer: pointer to buffer
4482 * @buf_len: length of the buffer
4483 *
4484 * Return: length of string
4485 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004486static int
Yun Parkeaea8632017-04-09 09:53:45 -07004487ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004488{
4489 uint32_t len = 0;
4490
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004491 struct ol_txrx_vdev_t *vdev =
4492 (struct ol_txrx_vdev_t *)
4493 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004494
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004495 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304496 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304497 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004498 snprintf(buffer, buf_len, "vdev not found");
4499 return len;
4500 }
4501
4502 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004503 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304504 ((vdev->ll_pause.is_q_paused == false) ?
4505 "UNPAUSED" : "PAUSED"),
4506 vdev->ll_pause.q_pause_cnt,
4507 vdev->ll_pause.q_unpause_cnt,
4508 vdev->ll_pause.q_overflow_cnt,
4509 ((vdev->ll_pause.is_q_timer_on == false)
4510 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004511 return len;
4512}
4513
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004514#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4515/**
4516 * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4517 * @peer: peer pointer
4518 *
4519 * Return: None
4520 */
4521static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4522{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004523 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4524 "cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4525 peer->bufq_info.curr,
4526 peer->bufq_info.dropped,
4527 peer->bufq_info.high_water_mark,
4528 peer->bufq_info.qdepth_no_thresh,
4529 peer->bufq_info.thresh);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004530}
4531
4532/**
4533 * ol_txrx_disp_peer_stats() - display peer stats
4534 * @pdev: pdev pointer
4535 *
4536 * Return: None
4537 */
4538static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4539{ int i;
4540 struct ol_txrx_peer_t *peer;
4541 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
4542
4543 if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4544 return;
4545
4546 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4547 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4548 peer = pdev->local_peer_ids.map[i];
Frank Liu4362e462018-01-16 11:51:55 +08004549 if (peer) {
4550 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khannab7bec722017-11-10 11:43:44 -08004551 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +08004552 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
4553 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004554 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4555
4556 if (peer) {
4557 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004558 "stats: peer 0x%pK local peer id %d", peer, i);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004559 ol_txrx_disp_peer_cached_bufq_stats(peer);
Mohit Khannab7bec722017-11-10 11:43:44 -08004560 ol_txrx_peer_release_ref(peer,
4561 PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004562 }
4563 }
4564}
4565#else
4566static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4567{
4568 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004569 "peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004570}
4571#endif
4572
Mohit Khannaca4173b2017-09-12 21:52:19 -07004573void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4574 enum qdf_stats_verbosity_level level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004575{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004576 u64 tx_dropped =
4577 pdev->stats.pub.tx.dropped.download_fail.pkts
4578 + pdev->stats.pub.tx.dropped.target_discard.pkts
4579 + pdev->stats.pub.tx.dropped.no_ack.pkts
4580 + pdev->stats.pub.tx.dropped.others.pkts;
4581
4582 if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
4583 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4584 "STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
4585 pdev->tx_desc.num_free,
4586 pdev->tx_desc.pool_size,
4587 pdev->stats.pub.tx.from_stack.pkts,
4588 pdev->stats.pub.tx.tso.tso_pkts.pkts,
4589 pdev->stats.pub.tx.delivered.pkts,
4590 htt_tx_status_download_fail,
4591 pdev->stats.pub.tx.dropped.download_fail.pkts,
4592 htt_tx_status_discard,
4593 pdev->stats.pub.tx.dropped.target_discard.pkts,
4594 htt_tx_status_no_ack,
4595 pdev->stats.pub.tx.dropped.no_ack.pkts,
4596 pdev->stats.pub.tx.dropped.others.pkts,
4597 pdev->stats.pub.tx.dropped.host_reject.pkts,
4598 pdev->stats.pub.rx.delivered.pkts,
4599 pdev->stats.pub.rx.dropped_err.pkts,
4600 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4601 pdev->stats.pub.rx.dropped_mic_err.pkts,
4602 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4603 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4604 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
4605 return;
4606 }
4607
4608 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304609 "TX PATH Statistics:");
Mohit Khannaca4173b2017-09-12 21:52:19 -07004610 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304611 "sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4612 pdev->stats.pub.tx.from_stack.pkts,
4613 pdev->stats.pub.tx.from_stack.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004614 pdev->stats.pub.tx.dropped.host_reject.pkts,
4615 pdev->stats.pub.tx.dropped.host_reject.bytes,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004616 tx_dropped,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004617 pdev->stats.pub.tx.dropped.download_fail.bytes
4618 + pdev->stats.pub.tx.dropped.target_discard.bytes
4619 + pdev->stats.pub.tx.dropped.no_ack.bytes);
Mohit Khannaca4173b2017-09-12 21:52:19 -07004620 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4621 "successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B) others: %lld (%lld B)",
Nirav Shahda008342016-05-17 18:50:40 +05304622 pdev->stats.pub.tx.delivered.pkts,
4623 pdev->stats.pub.tx.delivered.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004624 pdev->stats.pub.tx.dropped.download_fail.pkts,
4625 pdev->stats.pub.tx.dropped.download_fail.bytes,
4626 pdev->stats.pub.tx.dropped.target_discard.pkts,
4627 pdev->stats.pub.tx.dropped.target_discard.bytes,
4628 pdev->stats.pub.tx.dropped.no_ack.pkts,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004629 pdev->stats.pub.tx.dropped.no_ack.bytes,
4630 pdev->stats.pub.tx.dropped.others.pkts,
4631 pdev->stats.pub.tx.dropped.others.bytes);
4632 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304633 "Tx completions per HTT message:\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004634 "Single Packet %d\n"
4635 " 2-10 Packets %d\n"
4636 "11-20 Packets %d\n"
4637 "21-30 Packets %d\n"
4638 "31-40 Packets %d\n"
4639 "41-50 Packets %d\n"
4640 "51-60 Packets %d\n"
4641 " 60+ Packets %d\n",
4642 pdev->stats.pub.tx.comp_histogram.pkts_1,
4643 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4644 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4645 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4646 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4647 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4648 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4649 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304650
Mohit Khannaca4173b2017-09-12 21:52:19 -07004651 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304652 "RX PATH Statistics:");
Mohit Khannaca4173b2017-09-12 21:52:19 -07004653 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304654 "%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
Nirav Shahda008342016-05-17 18:50:40 +05304655 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4656 "msdus with frag_ind: %d msdus with offload_ind: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004657 pdev->stats.priv.rx.normal.ppdus,
4658 pdev->stats.priv.rx.normal.mpdus,
4659 pdev->stats.pub.rx.delivered.pkts,
4660 pdev->stats.pub.rx.delivered.bytes,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304661 pdev->stats.pub.rx.dropped_err.pkts,
4662 pdev->stats.pub.rx.dropped_err.bytes,
4663 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4664 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4665 pdev->stats.pub.rx.dropped_mic_err.pkts,
Nirav Shahda008342016-05-17 18:50:40 +05304666 pdev->stats.pub.rx.dropped_mic_err.bytes,
4667 pdev->stats.pub.rx.msdus_with_frag_ind,
4668 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004669
Mohit Khannaca4173b2017-09-12 21:52:19 -07004670 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004671 " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4672 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4673 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4674 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304675
Mohit Khannaca4173b2017-09-12 21:52:19 -07004676 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304677 "Rx packets per HTT message:\n"
Nirav Shah6a4eee62016-04-25 10:15:04 +05304678 "Single Packet %d\n"
4679 " 2-10 Packets %d\n"
4680 "11-20 Packets %d\n"
4681 "21-30 Packets %d\n"
4682 "31-40 Packets %d\n"
4683 "41-50 Packets %d\n"
4684 "51-60 Packets %d\n"
4685 " 60+ Packets %d\n",
4686 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4687 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4688 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4689 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4690 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4691 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4692 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4693 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004694
4695 ol_txrx_disp_peer_stats(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004696}
4697
4698void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4699{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304700 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004701}
4702
4703#if defined(ENABLE_TXRX_PROT_ANALYZE)
4704
4705void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4706{
4707 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4708 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4709}
4710
4711#endif /* ENABLE_TXRX_PROT_ANALYZE */
4712
4713#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4714int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4715{
4716 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4717 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4718}
4719#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4720
4721#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4722A_STATUS
4723ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4724 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4725{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304726 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304727 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304728 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304729 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004730 return A_OK;
4731}
4732#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4733
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004734static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004735{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004736 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004737
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004738 if (NULL == vdev)
4739 return;
4740
4741 vdev->disable_intrabss_fwd = val;
4742}
4743
Nirav Shahc657ef52016-07-26 14:22:38 +05304744/**
4745 * ol_txrx_update_mac_id() - update mac_id for vdev
4746 * @vdev_id: vdev id
4747 * @mac_id: mac id
4748 *
4749 * Return: none
4750 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004751static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304752{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004753 struct ol_txrx_vdev_t *vdev =
4754 (struct ol_txrx_vdev_t *)
4755 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304756
4757 if (NULL == vdev) {
4758 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4759 "%s: Invalid vdev_id %d", __func__, vdev_id);
4760 return;
4761 }
4762 vdev->mac_id = mac_id;
4763}
4764
Alok Kumar75355aa2018-03-19 17:32:58 +05304765/**
4766 * ol_txrx_get_tx_ack_count() - get tx ack count
4767 * @vdev_id: vdev_id
4768 *
4769 * Return: tx ack count
4770 */
4771static uint32_t ol_txrx_get_tx_ack_stats(uint8_t vdev_id)
4772{
4773 struct ol_txrx_vdev_t *vdev =
4774 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
4775 if (!vdev) {
4776 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4777 "%s: Invalid vdev_id %d", __func__, vdev_id);
4778 return 0;
4779 }
4780 return vdev->txrx_stats.txack_success;
4781}
4782
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004783#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
4784
4785/**
4786 * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
4787 * @sta_id: sta_id
4788 *
4789 * Return: vdev handle
4790 * NULL if not found.
4791 */
4792static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
4793{
4794 struct ol_txrx_peer_t *peer = NULL;
4795 ol_txrx_pdev_handle pdev = NULL;
4796
4797 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304798 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304799 "Invalid sta id passed");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004800 return NULL;
4801 }
4802
Anurag Chouhan6d760662016-02-20 16:05:43 +05304803 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004804 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304805 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304806 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004807 return NULL;
4808 }
4809
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004810 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004811
4812 if (!peer) {
Zhu Jianminf7ffe942017-08-24 10:24:15 +08004813 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304814 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004815 return NULL;
4816 }
4817
4818 return peer->vdev;
4819}
4820
4821/**
4822 * ol_txrx_register_tx_flow_control() - register tx flow control callback
4823 * @vdev_id: vdev_id
4824 * @flowControl: flow control callback
4825 * @osif_fc_ctx: callback context
bings284f8be2017-08-11 10:41:30 +08004826 * @flow_control_is_pause: is vdev paused by flow control
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004827 *
Jeff Johnson5ead5ab2018-05-06 00:11:08 -07004828 * Return: 0 for success or error code
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004829 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004830static int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
bings284f8be2017-08-11 10:41:30 +08004831 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
4832 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004833{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004834 struct ol_txrx_vdev_t *vdev =
4835 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004836
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004837 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304838 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304839 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004840 return -EINVAL;
4841 }
4842
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304843 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004844 vdev->osif_flow_control_cb = flowControl;
bings284f8be2017-08-11 10:41:30 +08004845 vdev->osif_flow_control_is_pause = flow_control_is_pause;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004846 vdev->osif_fc_ctx = osif_fc_ctx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304847 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004848 return 0;
4849}
4850
4851/**
Yun Parkeaea8632017-04-09 09:53:45 -07004852 * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
4853 * callback
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004854 * @vdev_id: vdev_id
4855 *
4856 * Return: 0 for success or error code
4857 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004858static int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004859{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004860 struct ol_txrx_vdev_t *vdev =
4861 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004862
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004863 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304864 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304865 "%s: Invalid vdev_id", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004866 return -EINVAL;
4867 }
4868
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304869 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004870 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08004871 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004872 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304873 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004874 return 0;
4875}
4876
4877/**
4878 * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
4879 * @sta_id: sta id
4880 * @low_watermark: low watermark
4881 * @high_watermark_offset: high watermark offset value
4882 *
4883 * Return: true/false
4884 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004885static bool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004886ol_txrx_get_tx_resource(uint8_t sta_id,
4887 unsigned int low_watermark,
4888 unsigned int high_watermark_offset)
4889{
4890 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004891
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004892 if (NULL == vdev) {
Zhu Jianminf7ffe942017-08-24 10:24:15 +08004893 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304894 "%s: Invalid sta_id %d", __func__, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004895 /* Return true so caller do not understand that resource
4896 * is less than low_watermark.
4897 * sta_id validation will be done in ol_tx_send_data_frame
4898 * and if sta_id is not registered then host will drop
4899 * packet.
4900 */
4901 return true;
4902 }
4903
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304904 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304905
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004906 if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
4907 vdev->tx_fl_lwm = (uint16_t) low_watermark;
4908 vdev->tx_fl_hwm =
4909 (uint16_t) (low_watermark + high_watermark_offset);
4910 /* Not enough free resource, stop TX OS Q */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05304911 qdf_atomic_set(&vdev->os_q_paused, 1);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304912 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004913 return false;
4914 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304915 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004916 return true;
4917}
4918
4919/**
4920 * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
4921 * @vdev_id: vdev id
4922 * @pause_q_depth: pause queue depth
4923 *
4924 * Return: 0 for success or error code
4925 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004926static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004927ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
4928{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004929 struct ol_txrx_vdev_t *vdev =
4930 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004931
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004932 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304933 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304934 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004935 return -EINVAL;
4936 }
4937
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304938 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004939 vdev->ll_pause.max_q_depth = pause_q_depth;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304940 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004941
4942 return 0;
4943}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004944#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
4945
Leo Chang8e073612015-11-13 10:55:34 -08004946/**
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004947 * ol_txrx_display_stats() - Display OL TXRX display stats
4948 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304949 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004950 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304951 */
Mohit Khannaca4173b2017-09-12 21:52:19 -07004952static QDF_STATUS
4953ol_txrx_display_stats(void *soc, uint16_t value,
4954 enum qdf_stats_verbosity_level verb_level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004955{
4956 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004957 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004958
Anurag Chouhan6d760662016-02-20 16:05:43 +05304959 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004960 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304961 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304962 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004963 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004964 }
4965
4966 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004967 case CDP_TXRX_PATH_STATS:
Mohit Khannaca4173b2017-09-12 21:52:19 -07004968 ol_txrx_stats_display(pdev, verb_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004969 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004970 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004971 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004972 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004973 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07004974 ol_tx_dump_flow_pool_info((void *)pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004975 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004976 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304977 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004978 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004979 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4980 htt_display_rx_buf_debug(pdev->htt_pdev);
4981 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304982#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004983 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304984 ol_tx_sched_cur_state_display(pdev);
4985 ol_tx_sched_stats_display(pdev);
4986 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004987 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304988 ol_tx_queue_log_display(pdev);
4989 break;
4990#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004991 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304992 ol_tx_dump_group_credit_stats(pdev);
4993 break;
4994#endif
4995
4996#ifdef DEBUG_HL_LOGGING
Nirav Shaheb017be2018-02-15 11:20:58 +05304997 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304998 htt_dump_bundle_stats(pdev->htt_pdev);
4999 break;
5000#endif
5001#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005002 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07005003 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005004 break;
5005 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07005006 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005007}
5008
Mohit Khanna3e2115b2016-10-11 13:18:29 -07005009/**
5010 * ol_txrx_clear_stats() - Clear OL TXRX stats
5011 * @value: Module id for which stats needs to be cleared
5012 *
5013 * Return: None
5014 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005015static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005016{
5017 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07005018 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005019
Anurag Chouhan6d760662016-02-20 16:05:43 +05305020 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005021 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05305022 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305023 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005024 return;
5025 }
5026
5027 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005028 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005029 ol_txrx_stats_clear(pdev);
5030 break;
Yun Park1027e8c2017-10-13 15:17:37 -07005031 case CDP_TXRX_TSO_STATS:
5032 ol_txrx_tso_stats_clear(pdev);
5033 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005034 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005035 ol_tx_clear_flow_pool_stats();
5036 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005037 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05305038 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005039 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305040#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005041 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305042 ol_tx_sched_stats_clear(pdev);
5043 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005044 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305045 ol_tx_queue_log_clear(pdev);
5046 break;
5047#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005048 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305049 ol_tx_clear_group_credit_stats(pdev);
5050 break;
5051#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005052 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305053 htt_clear_bundle_stats(pdev->htt_pdev);
5054 break;
5055#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005056 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07005057 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005058 break;
5059 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07005060
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005061}
5062
5063/**
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005064 * ol_txrx_drop_nbuf_list() - drop an nbuf list
5065 * @buf_list: buffer list to be dropepd
5066 *
5067 * Return: int (number of bufs dropped)
5068 */
5069static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
5070{
5071 int num_dropped = 0;
5072 qdf_nbuf_t buf, next_buf;
5073 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5074
5075 buf = buf_list;
5076 while (buf) {
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05305077 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005078 next_buf = qdf_nbuf_queue_next(buf);
5079 if (pdev)
5080 TXRX_STATS_MSDU_INCR(pdev,
5081 rx.dropped_peer_invalid, buf);
5082 qdf_nbuf_free(buf);
5083 buf = next_buf;
5084 num_dropped++;
5085 }
5086 return num_dropped;
5087}
5088
5089/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005090 * ol_rx_data_cb() - data rx callback
5091 * @peer: peer
5092 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05305093 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005094 *
5095 * Return: None
5096 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05305097static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
5098 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005099{
Mohit Khanna0696eef2016-04-14 16:14:08 -07005100 void *osif_dev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005101 uint8_t drop_count = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05305102 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305103 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08005104 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05305105 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005106
Jeff Johnsondac9e382017-09-24 10:36:08 -07005107 if (qdf_unlikely(!pdev))
Nirav Shah36a87bf2016-02-22 12:38:46 +05305108 goto free_buf;
5109
5110 /* Do not use peer directly. Derive peer from staid to
5111 * make sure that peer is valid.
5112 */
Jingxiang Ge3badb982018-01-02 17:39:01 +08005113 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
5114 staid, PEER_DEBUG_ID_OL_RX_THREAD);
Nirav Shah36a87bf2016-02-22 12:38:46 +05305115 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005116 goto free_buf;
5117
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305118 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07005119 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
5120 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305121 qdf_spin_unlock_bh(&peer->peer_info_lock);
Jingxiang Ge9f297062018-01-24 13:31:31 +08005122 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005123 goto free_buf;
5124 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08005125
5126 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07005127 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305128 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005129
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005130 qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
5131 if (!list_empty(&peer->bufq_info.cached_bufq)) {
5132 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005133 /* Flush the cached frames to HDD before passing new rx frame */
5134 ol_txrx_flush_rx_frames(peer, 0);
5135 } else
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005136 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005137
Jingxiang Ge3badb982018-01-02 17:39:01 +08005138 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
5139
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005140 buf = buf_list;
5141 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05305142 next_buf = qdf_nbuf_queue_next(buf);
5143 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07005144 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305145 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305146 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05305147 if (pdev)
5148 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05305149 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005150 }
5151 buf = next_buf;
5152 }
5153 return;
5154
5155free_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005156 drop_count = ol_txrx_drop_nbuf_list(buf_list);
5157 ol_txrx_warn("%s:Dropped frames %u", __func__, drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005158}
5159
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005160/* print for every 16th packet */
5161#define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
5162struct ol_rx_cached_buf *cache_buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305163
5164/** helper function to drop packets
5165 * Note: caller must hold the cached buq lock before invoking
5166 * this function. Also, it assumes that the pointers passed in
5167 * are valid (non-NULL)
5168 */
5169static inline void ol_txrx_drop_frames(
5170 struct ol_txrx_cached_bufq_t *bufqi,
5171 qdf_nbuf_t rx_buf_list)
5172{
5173 uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005174
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305175 bufqi->dropped += dropped;
5176 bufqi->qdepth_no_thresh += dropped;
5177
5178 if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
5179 bufqi->high_water_mark = bufqi->qdepth_no_thresh;
5180}
5181
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005182static QDF_STATUS ol_txrx_enqueue_rx_frames(
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305183 struct ol_txrx_peer_t *peer,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005184 struct ol_txrx_cached_bufq_t *bufqi,
5185 qdf_nbuf_t rx_buf_list)
5186{
5187 struct ol_rx_cached_buf *cache_buf;
5188 qdf_nbuf_t buf, next_buf;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005189 static uint32_t count;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005190
5191 if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
5192 ol_txrx_info_high(
5193 "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
5194 bufqi->curr, bufqi->dropped);
5195
5196 qdf_spin_lock_bh(&bufqi->bufq_lock);
5197 if (bufqi->curr >= bufqi->thresh) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305198 ol_txrx_drop_frames(bufqi, rx_buf_list);
5199 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5200 return QDF_STATUS_E_FAULT;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005201 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005202 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5203
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005204 buf = rx_buf_list;
5205 while (buf) {
5206 next_buf = qdf_nbuf_queue_next(buf);
5207 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
5208 if (!cache_buf) {
5209 ol_txrx_err(
5210 "Failed to allocate buf to cache the rx frames");
5211 qdf_nbuf_free(buf);
5212 } else {
5213 /* Add NULL terminator */
5214 qdf_nbuf_set_next(buf, NULL);
5215 cache_buf->buf = buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305216 if (peer && peer->valid) {
5217 qdf_spin_lock_bh(&bufqi->bufq_lock);
5218 list_add_tail(&cache_buf->list,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005219 &bufqi->cached_bufq);
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305220 bufqi->curr++;
5221 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5222 } else {
5223 qdf_mem_free(cache_buf);
5224 rx_buf_list = buf;
5225 qdf_nbuf_set_next(rx_buf_list, next_buf);
5226 qdf_spin_lock_bh(&bufqi->bufq_lock);
5227 ol_txrx_drop_frames(bufqi, rx_buf_list);
5228 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5229 return QDF_STATUS_E_FAULT;
5230 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005231 }
5232 buf = next_buf;
5233 }
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305234 return QDF_STATUS_SUCCESS;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005235}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005236/**
5237 * ol_rx_data_process() - process rx frame
5238 * @peer: peer
5239 * @rx_buf_list: rx buffer list
5240 *
5241 * Return: None
5242 */
5243void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05305244 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005245{
Yun Parkeaea8632017-04-09 09:53:45 -07005246 /*
5247 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005248 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07005249 * IPA kernel module API should not be called on SIRQ CTXT
5250 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08005251 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05305252 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005253 uint8_t drop_count;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005254
5255 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305256 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005257 goto drop_rx_buf;
5258 }
5259
Dhanashri Atre182b0272016-02-17 15:35:07 -08005260 qdf_assert(peer->vdev);
5261
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305262 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005263 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08005264 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305265 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005266
5267 /*
5268 * If there is a data frame from peer before the peer is
5269 * registered for data service, enqueue them on to pending queue
5270 * which will be flushed to HDD once that station is registered.
5271 */
5272 if (!data_rx) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305273 if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
5274 rx_buf_list)
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005275 != QDF_STATUS_SUCCESS)
Poddar, Siddarth07eebf32017-04-19 12:40:26 +05305276 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5277 "%s: failed to enqueue rx frm to cached_bufq",
5278 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005279 } else {
5280#ifdef QCA_CONFIG_SMP
5281 /*
5282 * If the kernel is SMP, schedule rx thread to
5283 * better use multicores.
5284 */
5285 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05305286 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005287 } else {
5288 p_cds_sched_context sched_ctx =
5289 get_cds_sched_ctxt();
5290 struct cds_ol_rx_pkt *pkt;
5291
5292 if (unlikely(!sched_ctx))
5293 goto drop_rx_buf;
5294
5295 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5296 if (!pkt) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05305297 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305298 "No available Rx message buffer");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005299 goto drop_rx_buf;
5300 }
5301 pkt->callback = (cds_ol_rx_thread_cb)
5302 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05305303 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005304 pkt->Rxpkt = (void *)rx_buf_list;
5305 pkt->staId = peer->local_id;
5306 cds_indicate_rxpkt(sched_ctx, pkt);
5307 }
5308#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05305309 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005310#endif /* QCA_CONFIG_SMP */
5311 }
5312
5313 return;
5314
5315drop_rx_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005316 drop_count = ol_txrx_drop_nbuf_list(rx_buf_list);
5317 ol_txrx_info_high("Dropped rx packets %u", drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005318}
5319
5320/**
5321 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005322 * @sta_desc: sta descriptor
5323 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05305324 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005325 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005326static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005327{
5328 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05305329 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005330 union ol_txrx_peer_update_param_t param;
5331 struct privacy_exemption privacy_filter;
5332
5333 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305334 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305335 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005336 }
5337
5338 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305339 ol_txrx_err("Invalid sta id :%d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005340 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305341 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005342 }
5343
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005344 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
5345 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005346 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305347 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005348
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305349 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005350 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305351 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005352
5353 param.qos_capable = sta_desc->is_qos_enabled;
5354 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
5355 ol_txrx_peer_update_qos_capable);
5356
5357 if (sta_desc->is_wapi_supported) {
5358 /*Privacy filter to accept unencrypted WAI frames */
5359 privacy_filter.ether_type = ETHERTYPE_WAI;
5360 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
5361 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
5362 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
5363 }
5364
5365 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305366 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005367}
5368
5369/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005370 * ol_txrx_register_ocb_peer - Function to register the OCB peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005371 * @mac_addr: MAC address of the self peer
5372 * @peer_id: Pointer to the peer ID
5373 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305374 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005375 */
Jeff Johnson382bce02017-09-01 14:21:07 -07005376static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005377 uint8_t *peer_id)
5378{
5379 ol_txrx_pdev_handle pdev;
5380 ol_txrx_peer_handle peer;
5381
Anurag Chouhan6d760662016-02-20 16:05:43 +05305382 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005383 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305384 ol_txrx_err("%s: Unable to find pdev!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005385 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305386 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005387 }
5388
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005389 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
5390 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005391 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305392 ol_txrx_err("%s: Unable to find OCB peer!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005393 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305394 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005395 }
5396
5397 ol_txrx_set_ocb_peer(pdev, peer);
5398
5399 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005400 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005401 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005402
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305403 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005404}
5405
5406/**
5407 * ol_txrx_set_ocb_peer - Function to store the OCB peer
5408 * @pdev: Handle to the HTT instance
5409 * @peer: Pointer to the peer
5410 */
5411void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
5412 struct ol_txrx_peer_t *peer)
5413{
5414 if (pdev == NULL)
5415 return;
5416
5417 pdev->ocb_peer = peer;
5418 pdev->ocb_peer_valid = (NULL != peer);
5419}
5420
5421/**
5422 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
5423 * @pdev: Handle to the HTT instance
5424 * @peer: Pointer to the returned peer
5425 *
5426 * Return: true if the peer is valid, false if not
5427 */
5428bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
5429 struct ol_txrx_peer_t **peer)
5430{
5431 int rc;
5432
5433 if ((pdev == NULL) || (peer == NULL)) {
5434 rc = false;
5435 goto exit;
5436 }
5437
5438 if (pdev->ocb_peer_valid) {
5439 *peer = pdev->ocb_peer;
5440 rc = true;
5441 } else {
5442 rc = false;
5443 }
5444
5445exit:
5446 return rc;
5447}
5448
5449#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5450/**
5451 * ol_txrx_register_pause_cb() - register pause callback
5452 * @pause_cb: pause callback
5453 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05305454 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005455 */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005456static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
5457 tx_pause_callback pause_cb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005458{
Anurag Chouhan6d760662016-02-20 16:05:43 +05305459 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -07005460
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005461 if (!pdev || !pause_cb) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305462 ol_txrx_err("pdev or pause_cb is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305463 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005464 }
5465 pdev->pause_cb = pause_cb;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305466 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005467}
5468#endif
5469
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005470#ifdef RECEIVE_OFFLOAD
5471/**
5472 * ol_txrx_offld_flush_handler() - offld flush handler
5473 * @context: dev handle
5474 * @rxpkt: rx data
5475 * @staid: station id
5476 *
5477 * This function handles an offld flush indication.
5478 * If the rx thread is enabled, it will be invoked by the rx
5479 * thread else it will be called in the tasklet context
5480 *
5481 * Return: none
5482 */
5483static void ol_txrx_offld_flush_handler(void *context,
5484 void *rxpkt,
5485 uint16_t staid)
5486{
5487 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5488
5489 if (qdf_unlikely(!pdev)) {
5490 ol_txrx_err("Invalid context");
5491 qdf_assert(0);
5492 return;
5493 }
5494
5495 if (pdev->offld_flush_cb)
5496 pdev->offld_flush_cb(context);
5497 else
5498 ol_txrx_err("offld_flush_cb NULL");
5499}
5500
5501/**
5502 * ol_txrx_offld_flush() - offld flush callback
5503 * @data: opaque data pointer
5504 *
5505 * This is the callback registered with CE to trigger
5506 * an offld flush
5507 *
5508 * Return: none
5509 */
5510static void ol_txrx_offld_flush(void *data)
5511{
5512 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
5513 struct cds_ol_rx_pkt *pkt;
5514 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5515
5516 if (qdf_unlikely(!sched_ctx))
5517 return;
5518
5519 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
5520 ol_txrx_offld_flush_handler(data, NULL, 0);
5521 } else {
5522 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5523 if (qdf_unlikely(!pkt)) {
5524 ol_txrx_err("Not able to allocate context");
5525 return;
5526 }
5527
5528 pkt->callback = ol_txrx_offld_flush_handler;
5529 pkt->context = data;
5530 pkt->Rxpkt = NULL;
5531 pkt->staId = 0;
5532 cds_indicate_rxpkt(sched_ctx, pkt);
5533 }
5534}
5535
5536/**
5537 * ol_register_offld_flush_cb() - register the offld flush callback
5538 * @offld_flush_cb: flush callback function
5539 * @offld_init_cb: Allocate and initialize offld data structure.
5540 *
5541 * Store the offld flush callback provided and in turn
5542 * register OL's offld flush handler with CE
5543 *
5544 * Return: none
5545 */
5546static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
5547{
5548 struct hif_opaque_softc *hif_device;
5549 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5550
5551 if (pdev == NULL) {
5552 ol_txrx_err("pdev NULL!");
5553 TXRX_ASSERT2(0);
5554 goto out;
5555 }
5556 if (pdev->offld_flush_cb != NULL) {
5557 ol_txrx_info("offld already initialised");
5558 if (pdev->offld_flush_cb != offld_flush_cb) {
5559 ol_txrx_err(
5560 "offld_flush_cb is differ to previously registered callback")
5561 TXRX_ASSERT2(0);
5562 goto out;
5563 }
5564 goto out;
5565 }
5566 pdev->offld_flush_cb = offld_flush_cb;
5567 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5568
5569 if (qdf_unlikely(hif_device == NULL)) {
5570 ol_txrx_err("hif_device NULL!");
5571 qdf_assert(0);
5572 goto out;
5573 }
5574
5575 hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
5576
5577out:
5578 return;
5579}
5580
5581/**
5582 * ol_deregister_offld_flush_cb() - deregister the offld flush callback
5583 *
5584 * Remove the offld flush callback provided and in turn
5585 * deregister OL's offld flush handler with CE
5586 *
5587 * Return: none
5588 */
5589static void ol_deregister_offld_flush_cb(void)
5590{
5591 struct hif_opaque_softc *hif_device;
5592 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5593
5594 if (pdev == NULL) {
5595 ol_txrx_err("pdev NULL!");
5596 return;
5597 }
5598 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5599
5600 if (qdf_unlikely(hif_device == NULL)) {
5601 ol_txrx_err("hif_device NULL!");
5602 qdf_assert(0);
5603 return;
5604 }
5605
5606 hif_offld_flush_cb_deregister(hif_device);
5607
5608 pdev->offld_flush_cb = NULL;
5609}
5610#endif /* RECEIVE_OFFLOAD */
5611
Poddar, Siddarth34872782017-08-10 14:08:51 +05305612/**
5613 * ol_register_data_stall_detect_cb() - register data stall callback
5614 * @data_stall_detect_callback: data stall callback function
5615 *
5616 *
5617 * Return: QDF_STATUS Enumeration
5618 */
5619static QDF_STATUS ol_register_data_stall_detect_cb(
5620 data_stall_detect_cb data_stall_detect_callback)
5621{
5622 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5623
5624 if (pdev == NULL) {
5625 ol_txrx_err("%s: pdev NULL!", __func__);
5626 return QDF_STATUS_E_INVAL;
5627 }
5628 pdev->data_stall_detect_callback = data_stall_detect_callback;
5629 return QDF_STATUS_SUCCESS;
5630}
5631
5632/**
5633 * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5634 * @data_stall_detect_callback: data stall callback function
5635 *
5636 *
5637 * Return: QDF_STATUS Enumeration
5638 */
5639static QDF_STATUS ol_deregister_data_stall_detect_cb(
5640 data_stall_detect_cb data_stall_detect_callback)
5641{
5642 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5643
5644 if (pdev == NULL) {
5645 ol_txrx_err("%s: pdev NULL!", __func__);
5646 return QDF_STATUS_E_INVAL;
5647 }
5648 pdev->data_stall_detect_callback = NULL;
5649 return QDF_STATUS_SUCCESS;
5650}
5651
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305652/**
5653 * ol_txrx_post_data_stall_event() - post data stall event
5654 * @indicator: Module triggering data stall
5655 * @data_stall_type: data stall event type
5656 * @pdev_id: pdev id
5657 * @vdev_id_bitmap: vdev id bitmap
5658 * @recovery_type: data stall recovery type
5659 *
5660 * Return: None
5661 */
5662static void ol_txrx_post_data_stall_event(
5663 enum data_stall_log_event_indicator indicator,
5664 enum data_stall_log_event_type data_stall_type,
5665 uint32_t pdev_id, uint32_t vdev_id_bitmap,
5666 enum data_stall_log_recovery_type recovery_type)
5667{
5668 struct scheduler_msg msg = {0};
5669 QDF_STATUS status;
5670 struct data_stall_event_info *data_stall_info;
5671 ol_txrx_pdev_handle pdev;
5672
5673 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5674 if (!pdev) {
5675 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5676 "%s: pdev is NULL.", __func__);
5677 return;
5678 }
5679 data_stall_info = qdf_mem_malloc(sizeof(*data_stall_info));
5680 if (!data_stall_info) {
5681 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5682 "%s: data_stall_info is NULL.", __func__);
5683 return;
5684 }
5685 data_stall_info->indicator = indicator;
5686 data_stall_info->data_stall_type = data_stall_type;
5687 data_stall_info->vdev_id_bitmap = vdev_id_bitmap;
5688 data_stall_info->pdev_id = pdev_id;
5689 data_stall_info->recovery_type = recovery_type;
5690
Poddar, Siddarthb9047592017-10-05 15:48:28 +05305691 if (data_stall_info->data_stall_type ==
5692 DATA_STALL_LOG_FW_RX_REFILL_FAILED)
5693 htt_log_rx_ring_info(pdev->htt_pdev);
5694
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305695 sys_build_message_header(SYS_MSG_ID_DATA_STALL_MSG, &msg);
5696 /* Save callback and data */
5697 msg.callback = pdev->data_stall_detect_callback;
5698 msg.bodyptr = data_stall_info;
5699 msg.bodyval = 0;
5700
5701 status = scheduler_post_msg(QDF_MODULE_ID_SYS, &msg);
5702
5703 if (status != QDF_STATUS_SUCCESS) {
5704 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5705 "%s: failed to post data stall msg to SYS", __func__);
5706 qdf_mem_free(data_stall_info);
5707 }
5708}
5709
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305710void
5711ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5712{
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07005713 qdf_print("%s: Pkt: VA 0x%pK PA 0x%llx len %d\n", __func__,
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305714 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5715 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5716 qdf_nbuf_data(nbuf), len, true);
5717}
5718
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305719#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5720bool
5721ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5722{
Yun Park63661012018-01-04 15:04:22 -08005723 struct ol_tx_flow_pool_t *pool;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305724 bool enough_desc_flag;
5725
5726 if (!vdev)
Yun Parkff5da562017-01-18 14:44:20 -08005727 return false;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305728
5729 pool = vdev->pool;
5730
Yun Parkff5da562017-01-18 14:44:20 -08005731 if (!pool)
5732 return false;
5733
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305734 qdf_spin_lock_bh(&pool->flow_pool_lock);
5735 enough_desc_flag = (pool->avail_desc < (pool->stop_th +
Yun Parkff5da562017-01-18 14:44:20 -08005736 OL_TX_NON_FWD_RESERVE))
5737 ? false : true;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305738 qdf_spin_unlock_bh(&pool->flow_pool_lock);
5739 return enough_desc_flag;
5740}
5741#else
5742bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5743{
5744 return true;
5745}
5746#endif
5747
Dhanashri Atre12a08392016-02-17 13:10:34 -08005748/**
5749 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5750 * @vdev_id: vdev_id
5751 *
5752 * Return: vdev handle
5753 * NULL if not found.
5754 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005755struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005756{
5757 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5758 ol_txrx_vdev_handle vdev = NULL;
5759
5760 if (qdf_unlikely(!pdev))
5761 return NULL;
5762
5763 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5764 if (vdev->vdev_id == vdev_id)
5765 break;
5766 }
5767
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005768 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005769}
Nirav Shah2e583a02016-04-30 14:06:12 +05305770
5771/**
5772 * ol_txrx_set_wisa_mode() - set wisa mode
5773 * @vdev: vdev handle
5774 * @enable: enable flag
5775 *
5776 * Return: QDF STATUS
5777 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005778static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305779{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005780 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005781
Nirav Shah2e583a02016-04-30 14:06:12 +05305782 if (!vdev)
5783 return QDF_STATUS_E_INVAL;
5784
5785 vdev->is_wisa_mode_enable = enable;
5786 return QDF_STATUS_SUCCESS;
5787}
Leo Chang98726762016-10-28 11:07:18 -07005788
5789/**
5790 * ol_txrx_get_vdev_id() - get interface id from interface context
5791 * @pvdev: vdev handle
5792 *
5793 * Return: virtual interface id
5794 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005795static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005796{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005797 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005798
Leo Chang98726762016-10-28 11:07:18 -07005799 return vdev->vdev_id;
5800}
5801
5802/**
5803 * ol_txrx_last_assoc_received() - get time of last assoc received
5804 * @ppeer: peer handle
5805 *
5806 * Return: pointer of the time of last assoc received
5807 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005808static qdf_time_t *ol_txrx_last_assoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005809{
5810 ol_txrx_peer_handle peer = ppeer;
5811
5812 return &peer->last_assoc_rcvd;
5813}
5814
5815/**
5816 * ol_txrx_last_disassoc_received() - get time of last disassoc received
5817 * @ppeer: peer handle
5818 *
5819 * Return: pointer of the time of last disassoc received
5820 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005821static qdf_time_t *ol_txrx_last_disassoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005822{
5823 ol_txrx_peer_handle peer = ppeer;
5824
5825 return &peer->last_disassoc_rcvd;
5826}
5827
5828/**
5829 * ol_txrx_last_deauth_received() - get time of last deauth received
5830 * @ppeer: peer handle
5831 *
5832 * Return: pointer of the time of last deauth received
5833 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005834static qdf_time_t *ol_txrx_last_deauth_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005835{
5836 ol_txrx_peer_handle peer = ppeer;
5837
5838 return &peer->last_deauth_rcvd;
5839}
5840
5841/**
5842 * ol_txrx_soc_attach_target() - attach soc target
5843 * @soc: soc handle
5844 *
5845 * MCL legacy OL do nothing here
5846 *
5847 * Return: 0
5848 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005849static int ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005850{
5851 /* MCL legacy OL do nothing here */
5852 return 0;
5853}
5854
5855/**
5856 * ol_txrx_soc_detach() - detach soc target
5857 * @soc: soc handle
5858 *
5859 * MCL legacy OL do nothing here
5860 *
5861 * Return: noe
5862 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005863static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005864{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005865 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005866}
5867
5868/**
5869 * ol_txrx_pkt_log_con_service() - connect packet log service
5870 * @ppdev: physical device handle
5871 * @scn: device context
5872 *
5873 * Return: noe
5874 */
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305875#ifdef REMOVE_PKT_LOG
5876static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
5877{
5878}
5879#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005880static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005881{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005882 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005883
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005884 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005885 pktlog_htc_attach();
5886}
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305887#endif
Leo Chang98726762016-10-28 11:07:18 -07005888
5889/* OL wrapper functions for CDP abstraction */
5890/**
5891 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5892 * @peer: peer handle
5893 * @drop: rx packets drop or deliver
5894 *
5895 * Return: none
5896 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005897static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005898{
5899 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5900}
5901
5902/**
5903 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5904 * @ppdev: pdev handle
5905 * @vdev_id: interface id
5906 *
5907 * Return: virtual interface instance
5908 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005909static
5910struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5911 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005912{
5913 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5914}
5915
5916/**
5917 * ol_txrx_wrapper_register_peer() - register peer
5918 * @pdev: pdev handle
5919 * @sta_desc: peer description
5920 *
5921 * Return: QDF STATUS
5922 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005923static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005924 struct ol_txrx_desc_type *sta_desc)
5925{
5926 return ol_txrx_register_peer(sta_desc);
5927}
5928
5929/**
5930 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5931 * @pdev - the data physical device object
5932 * @local_peer_id - the ID txrx assigned locally to the peer in question
5933 *
5934 * The control SW typically uses the txrx peer handle to refer to the peer.
5935 * In unusual circumstances, if it is infeasible for the control SW maintain
5936 * the txrx peer handle but it can maintain a small integer local peer ID,
5937 * this function allows the peer handled to be retrieved, based on the local
5938 * peer ID.
5939 *
5940 * @return handle to the txrx peer object
5941 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005942static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005943ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5944 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005945{
5946 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005947 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005948}
5949
5950/**
5951 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5952 * @pdev: pdev handle
5953 *
5954 * Return: 1 high latency bus
5955 * 0 low latency bus
5956 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005957static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005958{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005959 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005960}
5961
5962/**
5963 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5964 * @data_peer - which peer has changed its state
5965 * @state - the new state of the peer
5966 *
5967 * Specify the peer's authentication state (none, connected, authenticated)
5968 * to allow the data SW to determine whether to filter out invalid data frames.
5969 * (In the "connected" state, where security is enabled, but authentication
5970 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5971 * be discarded.)
5972 * This function is only relevant for systems in which the tx and rx filtering
5973 * are done in the host rather than in the target.
5974 *
5975 * Return: QDF Status
5976 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005977static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005978 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5979{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005980 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005981 peer_mac, state);
5982}
5983
5984/**
5985 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5986 * @pdev: pdev handle
Jeff Johnson37df7c32018-05-10 12:30:35 -07005987 * @peer_addr: peer address want to find
Leo Chang98726762016-10-28 11:07:18 -07005988 * @peer_id: peer id
5989 *
5990 * Return: peer instance pointer
5991 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005992static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005993 uint8_t *peer_addr, uint8_t *peer_id)
5994{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005995 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005996 peer_addr, peer_id);
5997}
5998
5999/**
Mohit Khannab7bec722017-11-10 11:43:44 -08006000 * ol_txrx_wrapper_peer_get_ref_by_addr() - get peer reference by address
6001 * @pdev: pdev handle
6002 * @peer_addr: peer address we want to find
6003 * @peer_id: peer id
6004 * @debug_id: peer debug id for tracking
6005 *
6006 * Return: peer instance pointer
6007 */
6008static void *
6009ol_txrx_wrapper_peer_get_ref_by_addr(struct cdp_pdev *pdev,
6010 u8 *peer_addr, uint8_t *peer_id,
6011 enum peer_debug_id_type debug_id)
6012{
6013 return ol_txrx_peer_get_ref_by_addr((ol_txrx_pdev_handle)pdev,
6014 peer_addr, peer_id, debug_id);
6015}
6016
6017/**
6018 * ol_txrx_wrapper_peer_release_ref() - release peer reference
6019 * @peer: peer handle
6020 * @debug_id: peer debug id for tracking
6021 *
6022 * Release peer ref acquired by peer get ref api
6023 *
6024 * Return: void
6025 */
6026static void ol_txrx_wrapper_peer_release_ref(void *peer,
6027 enum peer_debug_id_type debug_id)
6028{
6029 ol_txrx_peer_release_ref(peer, debug_id);
6030}
6031
6032/**
Leo Chang98726762016-10-28 11:07:18 -07006033 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
6034 * @cfg_ctx: cfg context
6035 * @cfg_param: cfg parameters
6036 *
6037 * Return: none
6038 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08006039static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08006040ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
6041 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07006042{
6043 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08006044 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07006045 (struct txrx_pdev_cfg_param_t *)cfg_param);
6046}
6047
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08006048#ifdef WDI_EVENT_ENABLE
6049void *ol_get_pldev(struct cdp_pdev *txrx_pdev)
6050{
6051 struct ol_txrx_pdev_t *pdev =
6052 (struct ol_txrx_pdev_t *)txrx_pdev;
6053 if (pdev != NULL)
6054 return pdev->pl_dev;
6055
6056 return NULL;
6057}
6058#endif
6059
Leo Chang98726762016-10-28 11:07:18 -07006060static struct cdp_cmn_ops ol_ops_cmn = {
6061 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
6062 .txrx_vdev_attach = ol_txrx_vdev_attach,
6063 .txrx_vdev_detach = ol_txrx_vdev_detach,
6064 .txrx_pdev_attach = ol_txrx_pdev_attach,
6065 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
6066 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05306067 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07006068 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08006069 .txrx_peer_create = ol_txrx_peer_attach,
6070 .txrx_peer_setup = NULL,
6071 .txrx_peer_teardown = NULL,
6072 .txrx_peer_delete = ol_txrx_peer_detach,
Leo Chang98726762016-10-28 11:07:18 -07006073 .txrx_vdev_register = ol_txrx_vdev_register,
6074 .txrx_soc_detach = ol_txrx_soc_detach,
6075 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
6076 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
6077 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08006078 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07006079 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
6080 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
6081 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07006082 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07006083 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
6084 .display_stats = ol_txrx_display_stats,
Leo Chang98726762016-10-28 11:07:18 -07006085 /* TODO: Add other functions */
6086};
6087
6088static struct cdp_misc_ops ol_ops_misc = {
6089 .set_ibss_vdev_heart_beat_timer =
6090 ol_txrx_set_ibss_vdev_heart_beat_timer,
6091#ifdef CONFIG_HL_SUPPORT
6092 .set_wmm_param = ol_txrx_set_wmm_param,
6093#endif /* CONFIG_HL_SUPPORT */
6094 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
6095 .bad_peer_txctl_update_threshold =
6096 ol_txrx_bad_peer_txctl_update_threshold,
6097 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
6098 .tx_non_std = ol_tx_non_std,
6099 .get_vdev_id = ol_txrx_get_vdev_id,
Alok Kumar75355aa2018-03-19 17:32:58 +05306100 .get_tx_ack_stats = ol_txrx_get_tx_ack_stats,
Leo Chang98726762016-10-28 11:07:18 -07006101 .set_wisa_mode = ol_txrx_set_wisa_mode,
Poddar, Siddarth34872782017-08-10 14:08:51 +05306102 .txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
6103 .txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
Poddar, Siddarthdb568162017-07-27 18:16:38 +05306104 .txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
Leo Chang98726762016-10-28 11:07:18 -07006105#ifdef FEATURE_RUNTIME_PM
6106 .runtime_suspend = ol_txrx_runtime_suspend,
6107 .runtime_resume = ol_txrx_runtime_resume,
6108#endif /* FEATURE_RUNTIME_PM */
6109 .get_opmode = ol_txrx_get_opmode,
6110 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
6111 .update_mac_id = ol_txrx_update_mac_id,
6112 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
6113 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
6114 .pkt_log_init = htt_pkt_log_init,
6115 .pkt_log_con_service = ol_txrx_pkt_log_con_service
6116};
6117
6118static struct cdp_flowctl_ops ol_ops_flowctl = {
6119#ifdef QCA_LL_TX_FLOW_CONTROL_V2
6120 .register_pause_cb = ol_txrx_register_pause_cb,
6121 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07006122 .dump_flow_pool_info = ol_tx_dump_flow_pool_info,
Leo Chang98726762016-10-28 11:07:18 -07006123#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
6124};
6125
6126static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
6127#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
6128 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
6129 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
6130 .flow_control_cb = ol_txrx_flow_control_cb,
6131 .get_tx_resource = ol_txrx_get_tx_resource,
6132 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
6133 .vdev_flush = ol_txrx_vdev_flush,
6134 .vdev_pause = ol_txrx_vdev_pause,
6135 .vdev_unpause = ol_txrx_vdev_unpause
6136#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
6137};
6138
Leo Chang98726762016-10-28 11:07:18 -07006139#ifdef IPA_OFFLOAD
Yun Parkb4f591d2017-03-29 15:51:01 -07006140static struct cdp_ipa_ops ol_ops_ipa = {
Leo Chang98726762016-10-28 11:07:18 -07006141 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
6142 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
6143 .ipa_set_active = ol_txrx_ipa_uc_set_active,
6144 .ipa_op_response = ol_txrx_ipa_uc_op_response,
6145 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
6146 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
6147 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07006148 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
Yun Parkb4f591d2017-03-29 15:51:01 -07006149 .ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
6150 .ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
6151 .ipa_setup = ol_txrx_ipa_setup,
6152 .ipa_cleanup = ol_txrx_ipa_cleanup,
6153 .ipa_setup_iface = ol_txrx_ipa_setup_iface,
6154 .ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
6155 .ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
6156 .ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
6157 .ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
6158#ifdef FEATURE_METERING
Yun Park637d6482016-10-05 10:51:33 -07006159 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
6160 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Yun Parkb4f591d2017-03-29 15:51:01 -07006161#endif
Leo Chang98726762016-10-28 11:07:18 -07006162};
Yun Parkb4f591d2017-03-29 15:51:01 -07006163#endif
Leo Chang98726762016-10-28 11:07:18 -07006164
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07006165#ifdef RECEIVE_OFFLOAD
6166static struct cdp_rx_offld_ops ol_rx_offld_ops = {
6167 .register_rx_offld_flush_cb = ol_register_offld_flush_cb,
6168 .deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
6169};
6170#endif
6171
Leo Chang98726762016-10-28 11:07:18 -07006172static struct cdp_bus_ops ol_ops_bus = {
6173 .bus_suspend = ol_txrx_bus_suspend,
6174 .bus_resume = ol_txrx_bus_resume
6175};
6176
6177static struct cdp_ocb_ops ol_ops_ocb = {
6178 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
6179 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
6180};
6181
6182static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08006183#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07006184 .throttle_init_period = ol_tx_throttle_init_period,
6185 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08006186#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07006187};
6188
6189static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07006190 .clear_stats = ol_txrx_clear_stats,
6191 .stats = ol_txrx_stats
6192};
6193
6194static struct cdp_cfg_ops ol_ops_cfg = {
6195 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
6196 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
6197 .cfg_attach = ol_pdev_cfg_attach,
6198 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
6199 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
6200 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
6201 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
6202 .set_flow_control_parameters =
6203 ol_txrx_wrapper_set_flow_control_parameters,
6204 .set_flow_steering = ol_set_cfg_flow_steering,
Yu Wang66a250b2017-07-19 11:46:40 +08006205 .set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
Leo Chang98726762016-10-28 11:07:18 -07006206};
6207
6208static struct cdp_peer_ops ol_ops_peer = {
6209 .register_peer = ol_txrx_wrapper_register_peer,
6210 .clear_peer = ol_txrx_clear_peer,
Mohit Khannab7bec722017-11-10 11:43:44 -08006211 .peer_get_ref_by_addr = ol_txrx_wrapper_peer_get_ref_by_addr,
6212 .peer_release_ref = ol_txrx_wrapper_peer_release_ref,
Leo Chang98726762016-10-28 11:07:18 -07006213 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
6214 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
6215 .local_peer_id = ol_txrx_local_peer_id,
6216 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
6217 .peer_state_update = ol_txrx_wrapper_peer_state_update,
6218 .get_vdevid = ol_txrx_get_vdevid,
6219 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
6220 .register_ocb_peer = ol_txrx_register_ocb_peer,
6221 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
6222 .get_peer_state = ol_txrx_get_peer_state,
6223 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
6224 .update_ibss_add_peer_num_of_vdev =
6225 ol_txrx_update_ibss_add_peer_num_of_vdev,
6226 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
6227 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08006228#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07006229 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
6230 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08006231 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
6232 .update_last_real_peer = ol_txrx_update_last_real_peer,
6233#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07006234 .last_assoc_received = ol_txrx_last_assoc_received,
6235 .last_disassoc_received = ol_txrx_last_disassoc_received,
6236 .last_deauth_received = ol_txrx_last_deauth_received,
Leo Chang98726762016-10-28 11:07:18 -07006237 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
6238};
6239
6240static struct cdp_tx_delay_ops ol_ops_delay = {
6241#ifdef QCA_COMPUTE_TX_DELAY
6242 .tx_delay = ol_tx_delay,
6243 .tx_delay_hist = ol_tx_delay_hist,
6244 .tx_packet_count = ol_tx_packet_count,
6245 .tx_set_compute_interval = ol_tx_set_compute_interval
6246#endif /* QCA_COMPUTE_TX_DELAY */
6247};
6248
6249static struct cdp_pmf_ops ol_ops_pmf = {
6250 .get_pn_info = ol_txrx_get_pn_info
6251};
6252
Leo Chang98726762016-10-28 11:07:18 -07006253static struct cdp_ctrl_ops ol_ops_ctrl = {
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05306254 .txrx_get_pldev = ol_get_pldev,
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08006255 .txrx_wdi_event_sub = wdi_event_sub,
6256 .txrx_wdi_event_unsub = wdi_event_unsub,
Leo Chang98726762016-10-28 11:07:18 -07006257};
6258
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05306259/* WINplatform specific structures */
Leo Chang98726762016-10-28 11:07:18 -07006260static struct cdp_me_ops ol_ops_me = {
6261 /* EMPTY FOR MCL */
6262};
6263
6264static struct cdp_mon_ops ol_ops_mon = {
6265 /* EMPTY FOR MCL */
6266};
6267
6268static struct cdp_host_stats_ops ol_ops_host_stats = {
6269 /* EMPTY FOR MCL */
6270};
6271
6272static struct cdp_wds_ops ol_ops_wds = {
6273 /* EMPTY FOR MCL */
6274};
6275
6276static struct cdp_raw_ops ol_ops_raw = {
6277 /* EMPTY FOR MCL */
6278};
6279
6280static struct cdp_ops ol_txrx_ops = {
6281 .cmn_drv_ops = &ol_ops_cmn,
6282 .ctrl_ops = &ol_ops_ctrl,
6283 .me_ops = &ol_ops_me,
6284 .mon_ops = &ol_ops_mon,
6285 .host_stats_ops = &ol_ops_host_stats,
6286 .wds_ops = &ol_ops_wds,
6287 .raw_ops = &ol_ops_raw,
6288 .misc_ops = &ol_ops_misc,
6289 .cfg_ops = &ol_ops_cfg,
6290 .flowctl_ops = &ol_ops_flowctl,
6291 .l_flowctl_ops = &ol_ops_l_flowctl,
Yun Parkb4f591d2017-03-29 15:51:01 -07006292#ifdef IPA_OFFLOAD
Leo Chang98726762016-10-28 11:07:18 -07006293 .ipa_ops = &ol_ops_ipa,
Yun Parkb4f591d2017-03-29 15:51:01 -07006294#endif
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07006295#ifdef RECEIVE_OFFLOAD
6296 .rx_offld_ops = &ol_rx_offld_ops,
6297#endif
Leo Chang98726762016-10-28 11:07:18 -07006298 .bus_ops = &ol_ops_bus,
6299 .ocb_ops = &ol_ops_ocb,
6300 .peer_ops = &ol_ops_peer,
6301 .throttle_ops = &ol_ops_throttle,
6302 .mob_stats_ops = &ol_ops_mob_stats,
6303 .delay_ops = &ol_ops_delay,
6304 .pmf_ops = &ol_ops_pmf
6305};
6306
Jeff Johnson02c37b42017-01-10 14:49:24 -08006307/*
6308 * Local prototype added to temporarily address warning caused by
6309 * -Wmissing-prototypes. A more correct solution, namely to expose
6310 * a prototype in an appropriate header file, will come later.
6311 */
6312struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
6313 struct ol_if_ops *dp_ol_if_ops);
6314struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
6315 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07006316{
6317 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07006318
Leo Chang98726762016-10-28 11:07:18 -07006319 if (!soc) {
6320 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6321 "%s: OL SOC memory allocation failed\n", __func__);
6322 return NULL;
6323 }
6324
6325 soc->ops = &ol_txrx_ops;
6326 return soc;
6327}
6328
6329