blob: be58266b2887bab3846eab901afdca675a2b29ff [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
wadesong9f2b1102017-12-20 22:58:35 +08002 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== includes ===*/
20/* header files for OS primitives */
21#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053023#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053024#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053025#include <qdf_atomic.h> /* qdf_atomic_read */
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053026#include <qdf_debugfs.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080027
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028/* header files for utilities */
29#include <cds_queue.h> /* TAILQ */
30
31/* header files for configuration API */
32#include <ol_cfg.h> /* ol_cfg_is_high_latency */
33#include <ol_if_athvar.h>
34
35/* header files for HTT API */
36#include <ol_htt_api.h>
37#include <ol_htt_tx_api.h>
38
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039/* header files for our own APIs */
40#include <ol_txrx_api.h>
41#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070042#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070043#include <ol_txrx_ctrl_api.h>
44#include <cdp_txrx_stats.h>
45#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046/* header files for our internal definitions */
47#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
48#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049#include <ol_tx.h> /* ol_tx_ll */
50#include <ol_rx.h> /* ol_rx_deliver */
51#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
52#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
53#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
54#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
55#include <ol_rx_reorder.h>
56#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
57#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
58#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053059#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080060#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070061#include <ol_txrx_types.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080062#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070063#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080064#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070065#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080066#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053067#include "hif.h"
wadesong9e95bd92017-04-14 14:28:40 +080068#include "hif_main.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070069#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053070#ifndef REMOVE_PKT_LOG
71#include "pktlog_ac.h"
72#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070073#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053074#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053075#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080076#include <cdp_txrx_handle.h>
Poddar, Siddarthdb568162017-07-27 18:16:38 +053077#include "wlan_qct_sys.h"
78
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080079#include <htt_internal.h>
Yun Parkb4f591d2017-03-29 15:51:01 -070080#include <ol_txrx_ipa.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070081#include "wlan_roam_debug.h"
Yun Parkb4f591d2017-03-29 15:51:01 -070082
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053083#define DPT_DEBUGFS_PERMS (QDF_FILE_USR_READ | \
84 QDF_FILE_USR_WRITE | \
85 QDF_FILE_GRP_READ | \
86 QDF_FILE_OTH_READ)
87
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080088QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070089 uint8_t *peer_mac,
90 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080091static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
92 bool val);
93int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -070094extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080095ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -070096 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -070097
Leo Chang98726762016-10-28 11:07:18 -070098extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
99 uint64_t **last_pn, uint32_t **rmf_pn_replays);
100
Mohit Khanna78cb6bb2017-03-31 17:05:14 -0700101/* thresh for peer's cached buf queue beyond which the elements are dropped */
102#define OL_TXRX_CACHED_BUFQ_THRESH 128
103
Yu Wang053d3e72017-02-08 18:48:24 +0800104#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530105
106/**
107 * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
108 * @vdev: the data virtual device
109 * @bss_addr: bss address
110 *
111 * Return: None
112 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800113static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800114ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530115{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800116 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700117
Frank Liu4362e462018-01-16 11:51:55 +0800118 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530119 if (bss_addr && vdev->last_real_peer &&
Ankit Guptaa5076012016-09-14 11:32:19 -0700120 !qdf_mem_cmp((u8 *)bss_addr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530121 vdev->last_real_peer->mac_addr.raw,
Ankit Guptaa5076012016-09-14 11:32:19 -0700122 IEEE80211_ADDR_LEN))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530123 qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
124 vdev->last_real_peer->mac_addr.raw,
125 OL_TXRX_MAC_ADDR_LEN);
Frank Liu4362e462018-01-16 11:51:55 +0800126 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530127}
128
129/**
130 * ol_txrx_add_last_real_peer() - add last peer
131 * @pdev: the data physical device
132 * @vdev: virtual device
133 * @peer_id: peer id
134 *
135 * Return: None
136 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800137static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800138ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
139 struct cdp_vdev *pvdev, uint8_t *peer_id)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530140{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800141 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
142 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530143 ol_txrx_peer_handle peer;
Yun Parkeaea8632017-04-09 09:53:45 -0700144
Frank Liu4362e462018-01-16 11:51:55 +0800145 peer = ol_txrx_find_peer_by_addr(
146 (struct cdp_pdev *)pdev,
147 vdev->hl_tdls_ap_mac_addr.raw,
148 peer_id);
149
150 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
151 if (!vdev->last_real_peer && peer &&
152 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
153 vdev->last_real_peer = peer;
154 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530155}
156
157/**
158 * is_vdev_restore_last_peer() - check for vdev last peer
159 * @peer: peer object
160 *
161 * Return: true if last peer is not null
162 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800163static bool
Leo Chang98726762016-10-28 11:07:18 -0700164is_vdev_restore_last_peer(void *ppeer)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530165{
Leo Chang98726762016-10-28 11:07:18 -0700166 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530167 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700168
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530169 vdev = peer->vdev;
170 return vdev->last_real_peer && (vdev->last_real_peer == peer);
171}
172
173/**
174 * ol_txrx_update_last_real_peer() - check for vdev last peer
175 * @pdev: the data physical device
176 * @peer: peer device
177 * @peer_id: peer id
178 * @restore_last_peer: restore last peer flag
179 *
180 * Return: None
181 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800182static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800183ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530184 uint8_t *peer_id, bool restore_last_peer)
185{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800186 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -0700187 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530188 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700189
Frank Liu4362e462018-01-16 11:51:55 +0800190 if (!restore_last_peer)
191 return;
192
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530193 vdev = peer->vdev;
Frank Liu4362e462018-01-16 11:51:55 +0800194 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530195 vdev->hl_tdls_ap_mac_addr.raw, peer_id);
Frank Liu4362e462018-01-16 11:51:55 +0800196
197 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
198 if (!vdev->last_real_peer && peer &&
199 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
200 vdev->last_real_peer = peer;
201 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530202}
203#endif
204
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530205/**
206 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
207 * fw is compatible for marking first packet after wow wakeup
208 * @value: 1 for enabled/ 0 for disabled
209 *
210 * Return: None
211 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800212static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530213{
214 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
215
216 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530217 ol_txrx_err(
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530218 "%s: pdev is NULL\n", __func__);
219 return;
220 }
221
222 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
223}
224
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530225u_int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800226ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530227{
228 u_int16_t desc_pool_size;
229 u_int16_t steady_state_tx_lifetime_ms;
230 u_int16_t safety_factor;
231
232 /*
233 * Steady-state tx latency:
234 * roughly 1-2 ms flight time
235 * + roughly 1-2 ms prep time,
236 * + roughly 1-2 ms target->host notification time.
237 * = roughly 6 ms total
238 * Thus, steady state number of frames =
239 * steady state max throughput / frame size * tx latency, e.g.
240 * 1 Gbps / 1500 bytes * 6 ms = 500
241 *
242 */
243 steady_state_tx_lifetime_ms = 6;
244
245 safety_factor = 8;
246
247 desc_pool_size =
248 ol_cfg_max_thruput_mbps(ctrl_pdev) *
249 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
250 (8 * OL_TX_AVG_FRM_BYTES) *
251 steady_state_tx_lifetime_ms *
252 safety_factor;
253
254 /* minimum */
255 if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
256 desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
257
258 /* maximum */
259 if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
260 desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
261
262 return desc_pool_size;
263}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800264
265/*=== function definitions ===*/
266
Nirav Shah22bf44d2015-12-10 15:39:48 +0530267/**
268 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
269 * wmi is enabled or not.
270 * @value: 1 for enabled/ 0 for disable
271 *
272 * Return: None
273 */
274void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
275{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530276 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700277
Nirav Shah22bf44d2015-12-10 15:39:48 +0530278 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530279 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530280 return;
281 }
282 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530283}
284
285/**
286 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
287 *
288 * Return: is_mgmt_over_wmi_enabled
289 */
290uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
291{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530292 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700293
Nirav Shah22bf44d2015-12-10 15:39:48 +0530294 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530295 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530296 return 0;
297 }
298 return pdev->is_mgmt_over_wmi_enabled;
299}
300
301
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800302#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800303static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800304ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
305 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800307 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
308 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309 struct ol_txrx_peer_t *peer;
310
311 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
312 if (!peer)
313 return NULL;
314 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800315 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800316 return peer;
317}
318
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800319static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800320{
Leo Chang98726762016-10-28 11:07:18 -0700321 struct ol_txrx_peer_t *peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700322
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800323 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530324 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530325 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530326 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800327 }
328
329 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530330 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800331}
332
Yun Park0dad1002017-07-14 14:57:01 -0700333static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(struct cdp_pdev *ppdev,
334 uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800335{
Yun Park0dad1002017-07-14 14:57:01 -0700336 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800337 struct ol_txrx_peer_t *peer = NULL;
Yun Park5dd9a122018-01-12 15:00:12 -0800338 ol_txrx_vdev_handle vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800339
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530341 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530342 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343 return NULL;
344 }
345
Yun Park5dd9a122018-01-12 15:00:12 -0800346 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev, sta_id,
347 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800348 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530349 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530350 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800351 return NULL;
352 }
353
Yun Park5dd9a122018-01-12 15:00:12 -0800354 vdev = peer->vdev;
355 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
356
357 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800358}
359
Mohit Khannababadb82017-02-21 18:54:19 -0800360/**
361 * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
362 * @ppdev: pointer of type cdp_pdev
363 * @peer_addr: peer mac addr
364 * @peer_id: pointer to fill in the value of peer->local_id for caller
365 *
366 * This function finds a peer with given mac address and returns its peer_id.
367 * Note that this function does not increment the peer->ref_cnt.
368 * This means that the peer may be deleted in some other parallel context after
369 * its been found.
370 *
371 * Return: peer handle if peer is found, NULL if peer is not found.
372 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800373void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Yun Park0dad1002017-07-14 14:57:01 -0700374 uint8_t *peer_addr,
375 uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376{
377 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800378 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379
Mohit Khannab7bec722017-11-10 11:43:44 -0800380 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
381 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800382 if (!peer)
383 return NULL;
384 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800385 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khannababadb82017-02-21 18:54:19 -0800386 return peer;
387}
388
389/**
Mohit Khannab7bec722017-11-10 11:43:44 -0800390 * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
Mohit Khannababadb82017-02-21 18:54:19 -0800391 * @pdev: pointer of type ol_txrx_pdev_handle
392 * @peer_addr: peer mac addr
393 * @peer_id: pointer to fill in the value of peer->local_id for caller
394 *
395 * This function finds the peer with given mac address and returns its peer_id.
396 * Note that this function increments the peer->ref_cnt.
397 * This makes sure that peer will be valid. This also means the caller needs to
Mohit Khannab7bec722017-11-10 11:43:44 -0800398 * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
Mohit Khannababadb82017-02-21 18:54:19 -0800399 * reference.
400 * Sample usage:
401 * {
402 * //the API call below increments the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800403 * peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800404 *
405 * // Once peer usage is done
406 *
407 * //the API call below decrements the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800408 * ol_txrx_peer_release_ref(peer, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800409 * }
410 *
411 * Return: peer handle if the peer is found, NULL if peer is not found.
412 */
Mohit Khannab7bec722017-11-10 11:43:44 -0800413ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
414 u8 *peer_addr,
415 u8 *peer_id,
416 enum peer_debug_id_type dbg_id)
Mohit Khannababadb82017-02-21 18:54:19 -0800417{
418 struct ol_txrx_peer_t *peer;
419
Mohit Khannab7bec722017-11-10 11:43:44 -0800420 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
421 dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800422 if (!peer)
423 return NULL;
424 *peer_id = peer->local_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800425 return peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800426}
427
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800428static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800429{
Leo Chang98726762016-10-28 11:07:18 -0700430 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700431
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800432 return peer->local_id;
433}
434
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700435/**
436 * @brief Find a txrx peer handle from a peer's local ID
437 * @details
438 * The control SW typically uses the txrx peer handle to refer to the peer.
439 * In unusual circumstances, if it is infeasible for the control SW maintain
440 * the txrx peer handle but it can maintain a small integer local peer ID,
441 * this function allows the peer handled to be retrieved, based on the local
442 * peer ID.
443 *
444 * @param pdev - the data physical device object
445 * @param local_peer_id - the ID txrx assigned locally to the peer in question
446 * @return handle to the txrx peer object
447 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800449ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800450 uint8_t local_peer_id)
451{
452 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800453 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700454
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800455 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
456 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
457 return NULL;
458 }
459
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530460 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800461 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530462 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800463 return peer;
464}
465
Jingxiang Ge3badb982018-01-02 17:39:01 +0800466/**
467 * @brief Find a txrx peer handle from a peer's local ID
468 * @param pdev - the data physical device object
469 * @param local_peer_id - the ID txrx assigned locally to the peer in question
470 * @dbg_id - debug_id to track caller
471 * @return handle to the txrx peer object
472 * @details
473 * The control SW typically uses the txrx peer handle to refer to the peer.
474 * In unusual circumstances, if it is infeasible for the control SW maintain
475 * the txrx peer handle but it can maintain a small integer local peer ID,
476 * this function allows the peer handled to be retrieved, based on the local
477 * peer ID.
478 *
479 * Note that this function increments the peer->ref_cnt.
480 * This makes sure that peer will be valid. This also means the caller needs to
481 * call the corresponding API -
482 * ol_txrx_peer_release_ref
483 *
484 * reference.
485 * Sample usage:
486 * {
487 * //the API call below increments the peer->ref_cnt
488 * peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
489 *
490 * // Once peer usage is done
491 *
492 * //the API call below decrements the peer->ref_cnt
493 * ol_txrx_peer_release_ref(peer, dbg_id);
494 * }
495 *
496 * Return: peer handle if the peer is found, NULL if peer is not found.
497 */
498ol_txrx_peer_handle
499ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
500 uint8_t local_peer_id,
501 enum peer_debug_id_type dbg_id)
502{
503 struct ol_txrx_peer_t *peer = NULL;
504 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
505
506 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
507 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
508 return NULL;
509 }
510
511 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
512 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
513 peer = pdev->local_peer_ids.map[local_peer_id];
514 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
515 if (peer && peer->valid)
516 ol_txrx_peer_get_ref(peer, dbg_id);
Jingxiang Ge9f297062018-01-24 13:31:31 +0800517 else
518 peer = NULL;
Jingxiang Ge3badb982018-01-02 17:39:01 +0800519 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
520
521 return peer;
522}
523
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800524static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
525{
526 int i;
527
528 /* point the freelist to the first ID */
529 pdev->local_peer_ids.freelist = 0;
530
531 /* link each ID to the next one */
532 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
533 pdev->local_peer_ids.pool[i] = i + 1;
534 pdev->local_peer_ids.map[i] = NULL;
535 }
536
537 /* link the last ID to itself, to mark the end of the list */
538 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
539 pdev->local_peer_ids.pool[i] = i;
540
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530541 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800542}
543
544static void
545ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
546 struct ol_txrx_peer_t *peer)
547{
548 int i;
549
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530550 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800551 i = pdev->local_peer_ids.freelist;
552 if (pdev->local_peer_ids.pool[i] == i) {
553 /* the list is empty, except for the list-end marker */
554 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
555 } else {
556 /* take the head ID and advance the freelist */
557 peer->local_id = i;
558 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
559 pdev->local_peer_ids.map[i] = peer;
560 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530561 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800562}
563
564static void
565ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
566 struct ol_txrx_peer_t *peer)
567{
568 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700569
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
571 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
572 return;
573 }
574 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530575 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800576 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
577 pdev->local_peer_ids.freelist = i;
578 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530579 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800580}
581
582static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
583{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530584 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800585}
586
587#else
588#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
589#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
590#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
591#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
592#endif
593
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530594#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
595
596/**
597 * ol_txrx_update_group_credit() - update group credit for tx queue
598 * @group: for which credit needs to be updated
599 * @credit: credits
600 * @absolute: TXQ group absolute
601 *
602 * Return: allocated pool size
603 */
604void ol_txrx_update_group_credit(
605 struct ol_tx_queue_group_t *group,
606 int32_t credit,
607 u_int8_t absolute)
608{
609 if (absolute)
610 qdf_atomic_set(&group->credit, credit);
611 else
612 qdf_atomic_add(credit, &group->credit);
613}
614
615/**
616 * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
617 * vdev id mask and ac mask is not matching
618 * @pdev: the data physical device
619 * @group_id: TXQ group id
620 * @credit: TXQ group credit count
621 * @absolute: TXQ group absolute
622 * @vdev_id_mask: TXQ vdev group id mask
623 * @ac_mask: TQX access category mask
624 *
625 * Return: None
626 */
627void ol_txrx_update_tx_queue_groups(
628 ol_txrx_pdev_handle pdev,
629 u_int8_t group_id,
630 int32_t credit,
631 u_int8_t absolute,
632 u_int32_t vdev_id_mask,
633 u_int32_t ac_mask
634 )
635{
636 struct ol_tx_queue_group_t *group;
637 u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
638 u_int32_t membership;
639 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700640
Tiger Yu1e553e52018-01-18 16:48:00 +0800641 if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
642 ol_txrx_warn("%s: invalid group_id=%u, ignore update.\n",
643 __func__,
644 group_id);
645 return;
646 }
647
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530648 group = &pdev->txq_grps[group_id];
649
650 membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
651
652 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
653 /*
654 * if the membership (vdev id mask and ac mask)
655 * matches then no need to update tx qeue groups.
656 */
657 if (group->membership == membership)
658 /* Update Credit Only */
659 goto credit_update;
660
661
662 /*
663 * membership (vdev id mask and ac mask) is not matching
664 * TODO: ignoring ac mask for now
665 */
666 group_vdev_id_mask =
667 OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
668
669 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
670 group_vdev_bit_mask =
671 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
672 group_vdev_id_mask, vdev->vdev_id);
673 vdev_bit_mask =
674 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
675 vdev_id_mask, vdev->vdev_id);
676
677 if (group_vdev_bit_mask != vdev_bit_mask) {
678 /*
679 * Change in vdev tx queue group
680 */
681 if (!vdev_bit_mask) {
682 /* Set Group Pointer (vdev and peer) to NULL */
683 ol_tx_set_vdev_group_ptr(
684 pdev, vdev->vdev_id, NULL);
685 } else {
686 /* Set Group Pointer (vdev and peer) */
687 ol_tx_set_vdev_group_ptr(
688 pdev, vdev->vdev_id, group);
689 }
690 }
691 }
692 /* Update membership */
693 group->membership = membership;
694credit_update:
695 /* Update Credit */
696 ol_txrx_update_group_credit(group, credit, absolute);
697 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
698}
699#endif
700
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530701#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
702
703/**
704 * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
705 * margin replenishment
706 * @desc_pool_size: tx desc pool size
707 *
708 * Return: threshold low
709 */
710static inline uint16_t
711ol_txrx_rsrc_threshold_lo(int desc_pool_size)
712{
713 int threshold_low;
Yun Parkeaea8632017-04-09 09:53:45 -0700714
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530715 /*
Yun Parkeaea8632017-04-09 09:53:45 -0700716 * 5% margin of unallocated desc is too much for per
717 * vdev mechanism.
Jeff Johnsonfa7d9602018-05-06 11:25:31 -0700718 * Define the value separately.
Yun Parkeaea8632017-04-09 09:53:45 -0700719 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530720 threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
721
722 return threshold_low;
723}
724
725/**
726 * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
727 * during tx desc margin replenishment
728 * @desc_pool_size: tx desc pool size
729 *
730 * Return: threshold high
731 */
732static inline uint16_t
733ol_txrx_rsrc_threshold_hi(int desc_pool_size)
734{
735 int threshold_high;
736 /* when freeing up descriptors,
737 * keep going until there's a 7.5% margin
738 */
739 threshold_high = ((15 * desc_pool_size)/100)/2;
740
741 return threshold_high;
742}
743#else
744
745static inline uint16_t
746ol_txrx_rsrc_threshold_lo(int desc_pool_size)
747{
748 int threshold_low;
749 /* always maintain a 5% margin of unallocated descriptors */
750 threshold_low = (5 * desc_pool_size)/100;
751
752 return threshold_low;
753}
754
755static inline uint16_t
756ol_txrx_rsrc_threshold_hi(int desc_pool_size)
757{
758 int threshold_high;
759 /* when freeing up descriptors, keep going until
760 * there's a 15% margin
761 */
762 threshold_high = (15 * desc_pool_size)/100;
763
764 return threshold_high;
765}
766#endif
767
768#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
769
770/**
771 * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
772 * @pdev: the physical device object
773 *
774 * Return: None
775 */
776static void
777ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
778{
779 qdf_spinlock_create(&pdev->txq_log_spinlock);
780 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
781 pdev->txq_log.oldest_record_offset = 0;
782 pdev->txq_log.offset = 0;
783 pdev->txq_log.allow_wrap = 1;
784 pdev->txq_log.wrapped = 0;
785}
786
787/**
788 * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
789 * @pdev: the physical device object
790 *
791 * Return: None
792 */
793static inline void
794ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
795{
796 qdf_spinlock_destroy(&pdev->txq_log_spinlock);
797}
798
799#else
800
801static inline void
802ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
803{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530804}
805
806static inline void
807ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
808{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530809}
810
811
812#endif
813
814#if defined(DEBUG_HL_LOGGING)
815
816/**
817 * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
818 * @pdev: the physical device object
819 *
820 * Return: None
821 */
822static inline void
823ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
824{
825 qdf_spinlock_create(&pdev->grp_stat_spinlock);
826 pdev->grp_stats.last_valid_index = -1;
827 pdev->grp_stats.wrap_around = 0;
828}
829
830/**
831 * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
832 * @pdev: the physical device object
833 *
834 * Return: None
835 */
836static inline void
837ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
838{
839 qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
840}
841#else
842
843static inline void
844ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
845{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530846}
847
848static inline void
849ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
850{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530851}
852#endif
853
854#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
855
856/**
857 * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
858 * @vdev: the virtual device object
859 * @flag: flag
860 *
861 * Return: None
862 */
863void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800864ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530865{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800866 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700867
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530868 vdev->hlTdlsFlag = flag;
869}
870#endif
871
872#if defined(CONFIG_HL_SUPPORT)
873
874/**
875 * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
876 * @vdev: the virtual device object
877 *
878 * Return: None
879 */
880static void
881ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
882{
883 u_int8_t i;
Yun Parkeaea8632017-04-09 09:53:45 -0700884
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530885 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
886 TAILQ_INIT(&vdev->txqs[i].head);
887 vdev->txqs[i].paused_count.total = 0;
888 vdev->txqs[i].frms = 0;
889 vdev->txqs[i].bytes = 0;
890 vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
891 vdev->txqs[i].flag = ol_tx_queue_empty;
892 /* aggregation is not applicable for vdev tx queues */
893 vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
894 ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
895 ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
896 }
897}
898
899/**
900 * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
901 * @vdev: the virtual device object
902 *
903 * Return: None
904 */
905static void
906ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
907{
908 struct ol_txrx_pdev_t *pdev = vdev->pdev;
909 struct ol_tx_frms_queue_t *txq;
910 int i;
911
912 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
913 txq = &vdev->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530914 ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530915 }
916}
917
918/**
919 * ol_txrx_peer_txqs_init() - initialise peer tx queues
920 * @pdev: the physical device object
921 * @peer: peer object
922 *
923 * Return: None
924 */
925static void
926ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
927 struct ol_txrx_peer_t *peer)
928{
929 uint8_t i;
930 struct ol_txrx_vdev_t *vdev = peer->vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700931
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530932 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
933 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
934 TAILQ_INIT(&peer->txqs[i].head);
935 peer->txqs[i].paused_count.total = 0;
936 peer->txqs[i].frms = 0;
937 peer->txqs[i].bytes = 0;
938 peer->txqs[i].ext_tid = i;
939 peer->txqs[i].flag = ol_tx_queue_empty;
940 peer->txqs[i].aggr_state = ol_tx_aggr_untried;
941 ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
942 ol_txrx_set_txq_peer(&peer->txqs[i], peer);
943 }
944 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
945
946 /* aggregation is not applicable for mgmt and non-QoS tx queues */
947 for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
948 peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
949
950 ol_txrx_peer_pause(peer);
951}
952
953/**
954 * ol_txrx_peer_tx_queue_free() - free peer tx queues
955 * @pdev: the physical device object
956 * @peer: peer object
957 *
958 * Return: None
959 */
960static void
961ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
962 struct ol_txrx_peer_t *peer)
963{
964 struct ol_tx_frms_queue_t *txq;
965 uint8_t i;
966
967 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
968 txq = &peer->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530969 ol_tx_queue_free(pdev, txq, i, true);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530970 }
971}
972#else
973
974static inline void
975ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
976{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530977}
978
979static inline void
980ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
981{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530982}
983
984static inline void
985ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
986 struct ol_txrx_peer_t *peer)
987{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530988}
989
990static inline void
991ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
992 struct ol_txrx_peer_t *peer)
993{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530994}
995#endif
996
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530997#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
998static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
999{
1000 qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
1001}
1002
1003static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1004{
1005 qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
1006}
1007
1008static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1009{
1010 int msdu_idx;
1011 int seg_idx;
1012
Mohit Khannaca4173b2017-09-12 21:52:19 -07001013 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1014 "TSO Statistics:");
1015 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1016 "TSO pkts %lld, bytes %lld\n",
1017 pdev->stats.pub.tx.tso.tso_pkts.pkts,
1018 pdev->stats.pub.tx.tso.tso_pkts.bytes);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301019
Mohit Khannaca4173b2017-09-12 21:52:19 -07001020 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1021 "TSO Histogram for numbers of segments:\n"
1022 "Single segment %d\n"
1023 " 2-5 segments %d\n"
1024 " 6-10 segments %d\n"
1025 "11-15 segments %d\n"
1026 "16-20 segments %d\n"
1027 " 20+ segments %d\n",
1028 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
1029 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
1030 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
1031 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
1032 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
1033 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301034
Mohit Khannaca4173b2017-09-12 21:52:19 -07001035 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1036 "TSO History Buffer: Total size %d, current_index %d",
1037 NUM_MAX_TSO_MSDUS,
1038 TXRX_STATS_TSO_MSDU_IDX(pdev));
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301039
1040 for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
1041 if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
1042 continue;
Mohit Khannaca4173b2017-09-12 21:52:19 -07001043 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1044 "jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
1045 msdu_idx,
1046 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
1047 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
1048 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
1049 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301050
1051 for (seg_idx = 0;
1052 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
1053 msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
1054 seg_idx++) {
1055 struct qdf_tso_seg_t tso_seg =
1056 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
1057
Mohit Khannaca4173b2017-09-12 21:52:19 -07001058 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1059 "seg idx: %d", seg_idx);
1060 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1061 "tso_enable: %d",
1062 tso_seg.tso_flags.tso_enable);
1063 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1064 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
1065 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
1066 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
1067 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
1068 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
1069 tso_seg.tso_flags.ns);
1070 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1071 "tcp_seq_num: 0x%x ip_id: %d",
1072 tso_seg.tso_flags.tcp_seq_num,
1073 tso_seg.tso_flags.ip_id);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301074 }
1075 }
1076}
Yun Park1027e8c2017-10-13 15:17:37 -07001077
1078static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
1079{
1080 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_pkts,
1081 sizeof(struct ol_txrx_stats_elem));
1082#if defined(FEATURE_TSO)
1083 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_info,
1084 sizeof(struct ol_txrx_stats_tso_info));
1085 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_hist,
1086 sizeof(struct ol_txrx_tso_histogram));
1087#endif
1088}
1089
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301090#else
Yun Park1027e8c2017-10-13 15:17:37 -07001091
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301092static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1093{
1094 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1095 "TSO is not supported\n");
1096}
1097
1098static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1099{
1100 /*
1101 * keeping the body empty and not keeping an error print as print will
1102 * will show up everytime during driver load if TSO is not enabled.
1103 */
1104}
1105
1106static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1107{
1108 /*
1109 * keeping the body empty and not keeping an error print as print will
1110 * will show up everytime during driver unload if TSO is not enabled.
1111 */
1112}
1113
Yun Park1027e8c2017-10-13 15:17:37 -07001114static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
1115{
1116 /*
1117 * keeping the body empty and not keeping an error print as print will
1118 * will show up everytime during driver unload if TSO is not enabled.
1119 */
1120}
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301121#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
1122
Nirav Shahd21a2e32018-04-20 16:34:43 +05301123#if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001124/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301125 * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
1126 * @file: file to read
1127 * @arg: pdev object
1128 *
1129 * Return: QDF_STATUS
1130 */
1131static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
1132 void *arg)
1133{
1134 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
1135 uint32_t i = 0;
1136 QDF_STATUS status = QDF_STATUS_SUCCESS;
1137
1138 if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
1139 return QDF_STATUS_E_INVAL;
1140 else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
1141 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
1142 return QDF_STATUS_SUCCESS;
1143 }
1144
1145 i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
1146 status = qdf_dpt_dump_stats_debugfs(file, i);
1147 if (status == QDF_STATUS_E_FAILURE)
1148 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
1149 else if (status == QDF_STATUS_SUCCESS)
1150 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
1151
1152 return status;
1153}
1154
1155/**
1156 * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
1157 * @priv: pdev object
1158 * @buf: buff to get value for dpt parameters
1159 * @len: buf length
1160 *
1161 * Return: QDF_STATUS
1162 */
1163static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
1164 const char *buf,
1165 qdf_size_t len)
1166{
1167 return QDF_STATUS_SUCCESS;
1168}
1169
1170static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
1171{
1172 pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
1173 pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
1174 pdev->dpt_debugfs_fops.priv = pdev;
1175
1176 pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
1177
1178 if (!pdev->dpt_stats_log_dir) {
1179 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1180 "%s: error while creating debugfs dir for %s",
1181 __func__, "dpt_stats");
1182 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
1183 return -EBUSY;
1184 }
1185
1186 if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
1187 pdev->dpt_stats_log_dir,
1188 &pdev->dpt_debugfs_fops)) {
1189 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1190 "%s: debug Entry creation failed!",
1191 __func__);
1192 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
1193 return -EBUSY;
1194 }
1195
1196 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
1197 return 0;
1198}
1199
Nirav Shahd21a2e32018-04-20 16:34:43 +05301200static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
1201{
1202 qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
1203}
1204#else
1205static inline int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
1206{
1207 return 0;
1208}
1209
1210static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
1211{
1212}
1213#endif
1214
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301215/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001216 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001217 * @ctrl_pdev: cfg pdev
1218 * @htc_pdev: HTC pdev
1219 * @osdev: os dev
1220 *
1221 * Return: txrx pdev handle
1222 * NULL for failure
1223 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001224static struct cdp_pdev *
1225ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -07001226 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001227{
1228 struct ol_txrx_pdev_t *pdev;
hqufd227fe2017-06-26 17:01:14 +08001229 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001230
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301231 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001232 if (!pdev)
1233 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001234
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301235 /* init LL/HL cfg here */
1236 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001237 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
1238
1239 /* store provided params */
1240 pdev->ctrl_pdev = ctrl_pdev;
1241 pdev->osdev = osdev;
1242
1243 for (i = 0; i < htt_num_sec_types; i++)
1244 pdev->sec_types[i] = (enum ol_sec_type)i;
1245
1246 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301247 ol_txrx_tso_stats_init(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001248
1249 TAILQ_INIT(&pdev->vdev_list);
Alok Kumarbda73bb2018-05-17 11:50:03 +05301250 TAILQ_INIT(&pdev->roam_stale_peer_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001251
tfyu9fcabd72017-09-26 17:46:48 +08001252 TAILQ_INIT(&pdev->req_list);
1253 pdev->req_list_depth = 0;
1254 qdf_spinlock_create(&pdev->req_list_spinlock);
1255
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001256 /* do initial set up of the peer ID -> peer object lookup map */
1257 if (ol_txrx_peer_find_attach(pdev))
1258 goto fail1;
1259
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301260 /* initialize the counter of the target's tx buffer availability */
1261 qdf_atomic_init(&pdev->target_tx_credit);
1262 qdf_atomic_init(&pdev->orig_target_tx_credit);
1263
1264 if (ol_cfg_is_high_latency(ctrl_pdev)) {
1265 qdf_spinlock_create(&pdev->tx_queue_spinlock);
1266 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
1267 if (pdev->tx_sched.scheduler == NULL)
1268 goto fail2;
1269 }
1270 ol_txrx_pdev_txq_log_init(pdev);
1271 ol_txrx_pdev_grp_stats_init(pdev);
1272
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001273 pdev->htt_pdev =
1274 htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
1275 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301276 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001277
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301278 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
1279 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +08001280
1281 /*
1282 * Init the tid --> category table.
1283 * Regular tids (0-15) map to their AC.
1284 * Extension tids get their own categories.
1285 */
1286 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
1287 int ac = TXRX_TID_TO_WMM_AC(tid);
1288
1289 pdev->tid_to_ac[tid] = ac;
1290 }
1291 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
1292 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
1293 pdev->tid_to_ac[OL_TX_MGMT_TID] =
1294 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
1295 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
1296 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
1297 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
1298 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
1299
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301300 ol_txrx_debugfs_init(pdev);
1301
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001302 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001303
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301304fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001305 ol_txrx_peer_find_detach(pdev);
1306
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301307fail2:
1308 if (ol_cfg_is_high_latency(ctrl_pdev))
1309 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
1310
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001311fail1:
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301312 ol_txrx_tso_stats_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301313 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001314
1315fail0:
1316 return NULL;
1317}
1318
Komal Seelamc4b28632016-02-03 15:02:18 +05301319#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1320/**
1321 * htt_pkt_log_init() - API to initialize packet log
1322 * @handle: pdev handle
1323 * @scn: HIF context
1324 *
1325 * Return: void
1326 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001327void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +05301328{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001329 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001330
Komal Seelamc4b28632016-02-03 15:02:18 +05301331 if (handle->pkt_log_init)
1332 return;
1333
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301334 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001335 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -07001336 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08001337 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Komal Seelamc4b28632016-02-03 15:02:18 +05301338 if (pktlogmod_init(scn))
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301339 qdf_print("%s: pktlogmod_init failed", __func__);
Komal Seelamc4b28632016-02-03 15:02:18 +05301340 else
1341 handle->pkt_log_init = true;
1342 }
1343}
1344
1345/**
1346 * htt_pktlogmod_exit() - API to cleanup pktlog info
1347 * @handle: Pdev handle
1348 * @scn: HIF Context
1349 *
1350 * Return: void
1351 */
Houston Hoffman8c485042017-02-08 13:40:21 -08001352static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +05301353{
Houston Hoffman8c485042017-02-08 13:40:21 -08001354 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001355 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +05301356 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -08001357 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +05301358 handle->pkt_log_init = false;
1359 }
1360}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001361
Komal Seelamc4b28632016-02-03 15:02:18 +05301362#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001363void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -08001364static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +05301365#endif
1366
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001367/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001368 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001369 * @pdev: txrx pdev
1370 *
1371 * Return: 0 for success
1372 */
1373int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001374ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001375{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001376 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001377 uint16_t i;
1378 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001379 int ret = 0;
1380 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301381 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382
Leo Chang376398b2015-10-23 14:19:02 -07001383 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1384 union ol_tx_desc_list_elem_t *c_element;
1385 unsigned int sig_bit;
1386 uint16_t desc_per_page;
1387
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001388 if (!osc) {
1389 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001390 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001391 }
1392
1393 /*
1394 * For LL, limit the number of host's tx descriptors to match
1395 * the number of target FW tx descriptors.
1396 * This simplifies the FW, by ensuring the host will never
1397 * download more tx descriptors than the target has space for.
1398 * The FW will drop/free low-priority tx descriptors when it
1399 * starts to run low, so that in theory the host should never
1400 * run out of tx descriptors.
1401 */
1402
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001403 /*
1404 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301405 * HL - wait for a HTT target credit initialization
1406 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001407 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301408 if (pdev->cfg.is_high_latency) {
1409 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001410
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301411 qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
1412 qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001413
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301414 pdev->tx_queue.rsrc_threshold_lo =
1415 ol_txrx_rsrc_threshold_lo(desc_pool_size);
1416 pdev->tx_queue.rsrc_threshold_hi =
1417 ol_txrx_rsrc_threshold_hi(desc_pool_size);
1418
1419 for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
1420 qdf_atomic_init(&pdev->txq_grps[i].credit);
1421
1422 ol_tx_target_credit_init(pdev, desc_pool_size);
1423 } else {
1424 qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
1425 &pdev->target_tx_credit);
1426 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1427 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001428
Nirav Shah76291962016-04-25 10:50:37 +05301429 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1430
Nirav Shah5ff1fd02018-03-11 14:55:53 +05301431 ol_tx_setup_fastpath_ce_handles(osc, pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001432
1433 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1434 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301435 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001436
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001437 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -08001438 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
1439 ret = htt_ipa_uc_attach(pdev->htt_pdev);
1440 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -07001441 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001442 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001443
Leo Chang376398b2015-10-23 14:19:02 -07001444 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301445 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301446 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -07001447 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1448 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1449 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301450 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001451 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -08001452 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001453 goto page_alloc_fail;
1454 }
1455 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1456 pdev->tx_desc.offset_filter = desc_per_page - 1;
1457 /* Calculate page divider to find page number */
1458 sig_bit = 0;
1459 while (desc_per_page) {
1460 sig_bit++;
1461 desc_per_page = desc_per_page >> 1;
1462 }
1463 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -08001464 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -07001465 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1466 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1467 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1468 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001469
1470 /*
1471 * Each SW tx desc (used only within the tx datapath SW) has a
1472 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1473 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1474 * desc now, to avoid doing it during time-critical transmit.
1475 */
1476 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -07001477 pdev->tx_desc.freelist =
1478 (union ol_tx_desc_list_elem_t *)
1479 (*pdev->tx_desc.desc_pages.cacheable_pages);
1480 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001481 for (i = 0; i < desc_pool_size; i++) {
1482 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001483 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301484 qdf_dma_addr_t frag_paddr = 0;
1485 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001486
Leo Chang376398b2015-10-23 14:19:02 -07001487 if (i == (desc_pool_size - 1))
1488 c_element->next = NULL;
1489 else
1490 c_element->next = (union ol_tx_desc_list_elem_t *)
1491 ol_tx_desc_find(pdev, i + 1);
1492
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001493 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001494 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301495 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001496 "%s: failed to alloc HTT tx desc (%d of %d)",
1497 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001498 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -08001499 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001500 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001501 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001502
Leo Chang376398b2015-10-23 14:19:02 -07001503 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001504 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001505 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001506 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001507 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301508 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001509 "%s: failed to alloc HTT frag dsc (%d/%d)",
1510 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001511 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001512 fail_idx = i;
1513 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001514 }
Leo Chang376398b2015-10-23 14:19:02 -07001515 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -07001516 /*
1517 * Initialize the first 6 words (TSO flags)
1518 * of the frag descriptor
1519 */
Leo Chang376398b2015-10-23 14:19:02 -07001520 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1521 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001522 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001523 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001524#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001525 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001526#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001527 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001528 0xffffffff;
1529#endif
1530#endif
Leo Chang376398b2015-10-23 14:19:02 -07001531 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301532 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001533 c_element = c_element->next;
1534 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001535 }
1536
1537 /* link SW tx descs into a freelist */
1538 pdev->tx_desc.num_free = desc_pool_size;
Poddar, Siddarth14521792017-03-14 21:19:42 +05301539 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001540 "%s first tx_desc:0x%pK Last tx desc:0x%pK\n", __func__,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001541 (uint32_t *) pdev->tx_desc.freelist,
1542 (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001543
1544 /* check what format of frames are expected to be delivered by the OS */
1545 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1546 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1547 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1548 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1549 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1550 pdev->htt_pkt_type = htt_pkt_type_eth2;
1551 else
1552 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1553 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301554 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001555 "%s Invalid standard frame type: %d",
1556 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001557 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001558 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001559 }
1560
1561 /* setup the global rx defrag waitlist */
1562 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1563
1564 /* configure where defrag timeout and duplicate detection is handled */
1565 pdev->rx.flags.defrag_timeout_check =
1566 pdev->rx.flags.dup_check =
1567 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1568
1569#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1570 /* Need to revisit this part. Currently,hardcode to riva's caps */
1571 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1572 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1573 /*
1574 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1575 * header for non-first subframe of A-MSDU.
1576 */
1577 pdev->sw_subfrm_hdr_recovery_enable = 1;
1578 /*
1579 * The Riva HW doesn't have the capability to set Protected Frame bit
1580 * in the MAC header for encrypted data frame.
1581 */
1582 pdev->sw_pf_proc_enable = 1;
1583
1584 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001585 /*
1586 * sw llc process is only needed in
1587 * 802.3 to 802.11 transform case
1588 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001589 pdev->sw_tx_llc_proc_enable = 1;
1590 pdev->sw_rx_llc_proc_enable = 1;
1591 } else {
1592 pdev->sw_tx_llc_proc_enable = 0;
1593 pdev->sw_rx_llc_proc_enable = 0;
1594 }
1595
1596 switch (pdev->frame_format) {
1597 case wlan_frm_fmt_raw:
1598 pdev->sw_tx_encap =
1599 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1600 ? 0 : 1;
1601 pdev->sw_rx_decap =
1602 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1603 ? 0 : 1;
1604 break;
1605 case wlan_frm_fmt_native_wifi:
1606 pdev->sw_tx_encap =
1607 pdev->
1608 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1609 ? 0 : 1;
1610 pdev->sw_rx_decap =
1611 pdev->
1612 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1613 ? 0 : 1;
1614 break;
1615 case wlan_frm_fmt_802_3:
1616 pdev->sw_tx_encap =
1617 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1618 ? 0 : 1;
1619 pdev->sw_rx_decap =
1620 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1621 ? 0 : 1;
1622 break;
1623 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301624 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001625 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1626 pdev->frame_format,
1627 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001628 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001629 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001630 }
1631#endif
1632
1633 /*
1634 * Determine what rx processing steps are done within the host.
1635 * Possibilities:
1636 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1637 * (This is unlikely; even if the target is doing rx->tx forwarding,
1638 * the host should be doing rx->tx forwarding too, as a back up for
1639 * the target's rx->tx forwarding, in case the target runs short on
1640 * memory, and can't store rx->tx frames that are waiting for
1641 * missing prior rx frames to arrive.)
1642 * 2. Just rx -> tx forwarding.
1643 * This is the typical configuration for HL, and a likely
1644 * configuration for LL STA or small APs (e.g. retail APs).
1645 * 3. Both PN check and rx -> tx forwarding.
1646 * This is the typical configuration for large LL APs.
1647 * Host-side PN check without rx->tx forwarding is not a valid
1648 * configuration, since the PN check needs to be done prior to
1649 * the rx->tx forwarding.
1650 */
1651 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001652 /*
1653 * PN check, rx-tx forwarding and rx reorder is done by
1654 * the target
1655 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001656 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1657 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1658 else
1659 pdev->rx_opt_proc = ol_rx_fwd_check;
1660 } else {
1661 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1662 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1663 /*
1664 * PN check done on host,
1665 * rx->tx forwarding not done at all.
1666 */
1667 pdev->rx_opt_proc = ol_rx_pn_check_only;
1668 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1669 /*
1670 * Both PN check and rx->tx forwarding done
1671 * on host.
1672 */
1673 pdev->rx_opt_proc = ol_rx_pn_check;
1674 } else {
1675#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1676"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301677 QDF_TRACE(QDF_MODULE_ID_TXRX,
1678 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001679 "%s: %s", __func__, TRACESTR01);
1680#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001681 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001682 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001683 }
1684 } else {
1685 /* PN check done on target */
1686 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1687 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1688 /*
1689 * rx->tx forwarding done on host (possibly as
1690 * back-up for target-side primary rx->tx
1691 * forwarding)
1692 */
1693 pdev->rx_opt_proc = ol_rx_fwd_check;
1694 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001695 /*
1696 * rx->tx forwarding either done in target,
1697 * or not done at all
1698 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001699 pdev->rx_opt_proc = ol_rx_deliver;
1700 }
1701 }
1702 }
1703
1704 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301705 qdf_spinlock_create(&pdev->tx_mutex);
1706 qdf_spinlock_create(&pdev->peer_ref_mutex);
1707 qdf_spinlock_create(&pdev->rx.mutex);
1708 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001709 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001710 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1711
Yun Parkf01f6e22017-01-18 17:27:02 -08001712 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1713 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001714 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001715 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001716
Yun Parkf01f6e22017-01-18 17:27:02 -08001717 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1718 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001719 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001720 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001721
1722#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1723 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1724#endif
1725
1726 /*
1727 * WDI event attach
1728 */
1729 wdi_event_attach(pdev);
1730
1731 /*
1732 * Initialize rx PN check characteristics for different security types.
1733 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301734 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001735
1736 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1737 pdev->rx_pn[htt_sec_type_tkip].len =
1738 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1739 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1740 pdev->rx_pn[htt_sec_type_tkip].cmp =
1741 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1742 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1743
1744 /* WAPI: 128-bit PN */
1745 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1746 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1747
1748 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1749
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001750 ol_txrx_dbg("Created pdev %pK\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001751
1752 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1753
1754#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1755#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1756
1757/* #if 1 -- TODO: clean this up */
1758#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1759 /* avg = 100% * new + 0% * old */ \
1760 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1761/*
Yun Parkeaea8632017-04-09 09:53:45 -07001762 * #else
1763 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1764 * //avg = 25% * new + 25% * old
1765 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1766 * #endif
1767 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001768 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1769 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1770#endif
1771
1772 ol_txrx_local_peer_id_pool_init(pdev);
1773
1774 pdev->cfg.ll_pause_txq_limit =
1775 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1776
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301777 /* TX flow control for peer who is in very bad link status */
1778 ol_tx_badpeer_flow_cl_init(pdev);
1779
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001780#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301781 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301782 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001783
1784 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301785 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001786 {
1787 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001788
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001789 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301790 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001791 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1792 * 1000);
1793 /*
1794 * Compute a factor and shift that together are equal to the
1795 * inverse of the bin_width time, so that rather than dividing
1796 * by the bin width time, approximately the same result can be
1797 * obtained much more efficiently by a multiply + shift.
1798 * multiply_factor >> shift = 1 / bin_width_time, so
1799 * multiply_factor = (1 << shift) / bin_width_time.
1800 *
1801 * Pick the shift semi-arbitrarily.
1802 * If we knew statically what the bin_width would be, we could
1803 * choose a shift that minimizes the error.
1804 * Since the bin_width is determined dynamically, simply use a
1805 * shift that is about half of the uint32_t size. This should
1806 * result in a relatively large multiplier value, which
1807 * minimizes error from rounding the multiplier to an integer.
1808 * The rounding error only becomes significant if the tick units
1809 * are on the order of 1 microsecond. In most systems, it is
1810 * expected that the tick units will be relatively low-res,
1811 * on the order of 1 millisecond. In such systems the rounding
1812 * error is negligible.
1813 * It would be more accurate to dynamically try out different
1814 * shifts and choose the one that results in the smallest
1815 * rounding error, but that extra level of fidelity is
1816 * not needed.
1817 */
1818 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1819 pdev->tx_delay.hist_internal_bin_width_mult =
1820 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1821 1000 + (bin_width_1000ticks >> 1)) /
1822 bin_width_1000ticks;
1823 }
1824#endif /* QCA_COMPUTE_TX_DELAY */
1825
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001826 /* Thermal Mitigation */
1827 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001828
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001829 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001830
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301831 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1832
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001833 ol_tx_register_flow_control(pdev);
1834
1835 return 0; /* success */
1836
Leo Chang376398b2015-10-23 14:19:02 -07001837pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001838 OL_RX_REORDER_TRACE_DETACH(pdev);
1839
Leo Chang376398b2015-10-23 14:19:02 -07001840reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301841 qdf_spinlock_destroy(&pdev->tx_mutex);
1842 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1843 qdf_spinlock_destroy(&pdev->rx.mutex);
1844 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301845 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001846 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1847
Leo Chang376398b2015-10-23 14:19:02 -07001848control_init_fail:
1849desc_alloc_fail:
1850 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001851 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001852 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001853
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301854 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001855 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001856
Leo Chang376398b2015-10-23 14:19:02 -07001857page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001858 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1859 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001860uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001861 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301862htt_attach_fail:
1863 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001864ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001865 return ret; /* fail */
1866}
1867
Dhanashri Atre12a08392016-02-17 13:10:34 -08001868/**
1869 * ol_txrx_pdev_attach_target() - send target configuration
1870 *
1871 * @pdev - the physical device being initialized
1872 *
1873 * The majority of the data SW setup are done by the pdev_attach
1874 * functions, but this function completes the data SW setup by
1875 * sending datapath configuration messages to the target.
1876 *
1877 * Return: 0 - success 1 - failure
1878 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001879static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001880{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001881 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001882
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05301883 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001884}
1885
Dhanashri Atre12a08392016-02-17 13:10:34 -08001886/**
Mohit Khanna54f3a382017-03-13 17:56:32 -07001887 * ol_tx_free_descs_inuse - free tx descriptors which are in use
1888 * @pdev - the physical device for which tx descs need to be freed
1889 *
1890 * Cycle through the list of TX descriptors (for a pdev) which are in use,
1891 * for which TX completion has not been received and free them. Should be
1892 * called only when the interrupts are off and all lower layer RX is stopped.
1893 * Otherwise there may be a race condition with TX completions.
1894 *
1895 * Return: None
1896 */
1897static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1898{
1899 int i;
1900 void *htt_tx_desc;
1901 struct ol_tx_desc_t *tx_desc;
1902 int num_freed_tx_desc = 0;
1903
1904 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1905 tx_desc = ol_tx_desc_find(pdev, i);
1906 /*
1907 * Confirm that each tx descriptor is "empty", i.e. it has
1908 * no tx frame attached.
1909 * In particular, check that there are no frames that have
1910 * been given to the target to transmit, for which the
1911 * target has never provided a response.
1912 */
1913 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
1914 ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
1915 ol_tx_desc_frame_free_nonstd(pdev,
1916 tx_desc, 1);
1917 num_freed_tx_desc++;
1918 }
1919 htt_tx_desc = tx_desc->htt_tx_desc;
1920 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1921 }
1922
1923 if (num_freed_tx_desc)
1924 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1925 "freed %d tx frames for which no resp from target",
1926 num_freed_tx_desc);
1927
1928}
1929
1930/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301931 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08001932 * @pdev - the data physical device object being removed
1933 * @force - delete the pdev (and its vdevs and peers) even if
1934 * there are outstanding references by the target to the vdevs
1935 * and peers within the pdev
1936 *
1937 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301938 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08001939 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301940 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08001941 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301942static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001943{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001944 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001945
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001946 /* preconditions */
1947 TXRX_ASSERT2(pdev);
1948
1949 /* check that the pdev has no vdevs allocated */
1950 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1951
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001952#ifdef QCA_SUPPORT_TX_THROTTLE
1953 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301954 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1955 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001956#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301957 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1958 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001959#endif
1960#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001961
1962 if (force) {
1963 /*
1964 * The assertion above confirms that all vdevs within this pdev
1965 * were detached. However, they may not have actually been
1966 * deleted.
1967 * If the vdev had peers which never received a PEER_UNMAP msg
1968 * from the target, then there are still zombie peer objects,
1969 * and the vdev parents of the zombie peers are also zombies,
1970 * hanging around until their final peer gets deleted.
1971 * Go through the peer hash table and delete any peers left.
1972 * As a side effect, this will complete the deletion of any
1973 * vdevs that are waiting for their peers to finish deletion.
1974 */
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001975 ol_txrx_dbg("Force delete for pdev %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001976 pdev);
1977 ol_txrx_peer_find_hash_erase(pdev);
1978 }
1979
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301980 /* to get flow pool status before freeing descs */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07001981 ol_tx_dump_flow_pool_info((void *)pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001982 ol_tx_free_descs_inuse(pdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301983 ol_tx_deregister_flow_control(pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001984
1985 /*
1986 * ol_tso_seg_list_deinit should happen after
1987 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
1988 * which is being de-initilized in ol_tso_seg_list_deinit
1989 */
1990 ol_tso_seg_list_deinit(pdev);
1991 ol_tso_num_seg_list_deinit(pdev);
1992
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301993 /* Stop the communication between HTT and target at first */
1994 htt_detach_target(pdev->htt_pdev);
1995
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301996 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001997 &pdev->tx_desc.desc_pages, 0, true);
1998 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001999
2000 /* Detach micro controller data path offload resource */
2001 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
2002 htt_ipa_uc_detach(pdev->htt_pdev);
2003
2004 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05302005 ol_tx_desc_dup_detect_deinit(pdev);
2006
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302007 qdf_spinlock_destroy(&pdev->tx_mutex);
2008 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
2009 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
2010 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07002011 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002012#ifdef QCA_SUPPORT_TX_THROTTLE
2013 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302014 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002015#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302016
2017 /* TX flow control for peer who is in very bad link status */
2018 ol_tx_badpeer_flow_cl_deinit(pdev);
2019
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002020 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
2021
2022 OL_RX_REORDER_TRACE_DETACH(pdev);
2023 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302024
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002025 /*
2026 * WDI event detach
2027 */
2028 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302029
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002030 ol_txrx_local_peer_id_cleanup(pdev);
2031
2032#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302033 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002034#endif
2035}
2036
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302037/**
2038 * ol_txrx_pdev_detach() - delete the data SW state
2039 * @ppdev - the data physical device object being removed
2040 * @force - delete the pdev (and its vdevs and peers) even if
2041 * there are outstanding references by the target to the vdevs
2042 * and peers within the pdev
2043 *
2044 * This function is used when the WLAN driver is being removed to
2045 * remove the host data component within the driver.
2046 * All virtual devices within the physical device need to be deleted
2047 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
2048 *
2049 * Return: None
2050 */
2051static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
2052{
2053 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rakesh Pillai33942c42018-05-09 11:45:38 +05302054 struct ol_txrx_stats_req_internal *req, *temp_req;
tfyu9fcabd72017-09-26 17:46:48 +08002055 int i = 0;
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302056
2057 /*checking to ensure txrx pdev structure is not NULL */
2058 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302059 ol_txrx_err(
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302060 "NULL pdev passed to %s\n", __func__);
2061 return;
2062 }
2063
2064 htt_pktlogmod_exit(pdev);
2065
tfyu9fcabd72017-09-26 17:46:48 +08002066 qdf_spin_lock_bh(&pdev->req_list_spinlock);
2067 if (pdev->req_list_depth > 0)
2068 ol_txrx_err(
2069 "Warning: the txrx req list is not empty, depth=%d\n",
2070 pdev->req_list_depth
2071 );
Rakesh Pillai33942c42018-05-09 11:45:38 +05302072 TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
tfyu9fcabd72017-09-26 17:46:48 +08002073 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
2074 pdev->req_list_depth--;
2075 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05302076 "%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
tfyu9fcabd72017-09-26 17:46:48 +08002077 i++,
2078 req,
2079 req->base.print.verbose,
2080 req->base.print.concise,
2081 req->base.stats_type_upload_mask,
2082 req->base.stats_type_reset_mask
2083 );
2084 qdf_mem_free(req);
2085 }
2086 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
2087
2088 qdf_spinlock_destroy(&pdev->req_list_spinlock);
2089
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302090 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
2091
2092 if (pdev->cfg.is_high_latency)
2093 ol_tx_sched_detach(pdev);
2094
2095 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
2096
2097 htt_pdev_free(pdev->htt_pdev);
2098 ol_txrx_peer_find_detach(pdev);
2099 ol_txrx_tso_stats_deinit(pdev);
2100
2101 ol_txrx_pdev_txq_log_destroy(pdev);
2102 ol_txrx_pdev_grp_stat_destroy(pdev);
Alok Kumarddd457e2018-04-09 13:51:42 +05302103
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05302104 ol_txrx_debugfs_exit(pdev);
2105
Alok Kumarddd457e2018-04-09 13:51:42 +05302106 qdf_mem_free(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302107}
2108
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302109#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
2110
2111/**
2112 * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
2113 * @vdev: the virtual device object
2114 *
2115 * Return: None
2116 */
2117static inline void
2118ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2119{
2120 qdf_atomic_init(&vdev->tx_desc_count);
2121}
2122#else
2123
2124static inline void
2125ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2126{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302127}
2128#endif
2129
Dhanashri Atre12a08392016-02-17 13:10:34 -08002130/**
2131 * ol_txrx_vdev_attach - Allocate and initialize the data object
2132 * for a new virtual device.
2133 *
2134 * @data_pdev - the physical device the virtual device belongs to
2135 * @vdev_mac_addr - the MAC address of the virtual device
2136 * @vdev_id - the ID used to identify the virtual device to the target
2137 * @op_mode - whether this virtual device is operating as an AP,
2138 * an IBSS, or a STA
2139 *
2140 * Return: success: handle to new data vdev object, failure: NULL
2141 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002142static struct cdp_vdev *
2143ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002144 uint8_t *vdev_mac_addr,
2145 uint8_t vdev_id, enum wlan_op_mode op_mode)
2146{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002147 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002148 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002149 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002150
2151 /* preconditions */
2152 TXRX_ASSERT2(pdev);
2153 TXRX_ASSERT2(vdev_mac_addr);
2154
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302155 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002156 if (!vdev)
2157 return NULL; /* failure */
2158
2159 /* store provided params */
2160 vdev->pdev = pdev;
2161 vdev->vdev_id = vdev_id;
2162 vdev->opmode = op_mode;
2163
2164 vdev->delete.pending = 0;
2165 vdev->safemode = 0;
2166 vdev->drop_unenc = 1;
2167 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05302168 vdev->fwd_tx_packets = 0;
2169 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002170
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302171 ol_txrx_vdev_tx_desc_cnt_init(vdev);
2172
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302173 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002174 OL_TXRX_MAC_ADDR_LEN);
2175
2176 TAILQ_INIT(&vdev->peer_list);
2177 vdev->last_real_peer = NULL;
2178
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002179 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302180
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002181#ifdef QCA_IBSS_SUPPORT
2182 vdev->ibss_peer_num = 0;
2183 vdev->ibss_peer_heart_beat_timer = 0;
2184#endif
2185
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302186 ol_txrx_vdev_txqs_init(vdev);
2187
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302188 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002189 vdev->ll_pause.paused_reason = 0;
2190 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
2191 vdev->ll_pause.txq.depth = 0;
wadesong5e2e8012017-08-21 16:56:03 +08002192 qdf_atomic_init(&vdev->delete.detaching);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302193 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002194 &vdev->ll_pause.timer,
2195 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302196 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302197 qdf_atomic_init(&vdev->os_q_paused);
2198 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002199 vdev->tx_fl_lwm = 0;
2200 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002201 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002202 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05302203 qdf_mem_zero(&vdev->last_peer_mac_addr,
2204 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302205 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002206 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002207 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002208 vdev->osif_fc_ctx = NULL;
2209
Alok Kumar75355aa2018-03-19 17:32:58 +05302210 vdev->txrx_stats.txack_success = 0;
2211 vdev->txrx_stats.txack_failed = 0;
2212
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002213 /* Default MAX Q depth for every VDEV */
2214 vdev->ll_pause.max_q_depth =
2215 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002216 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002217 /* add this vdev into the pdev's list */
2218 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
2219
Poddar, Siddarth14521792017-03-14 21:19:42 +05302220 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002221 "Created vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002222 vdev,
2223 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2224 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2225 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2226
2227 /*
2228 * We've verified that htt_op_mode == wlan_op_mode,
2229 * so no translation is needed.
2230 */
2231 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
2232
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002233 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002234}
2235
Dhanashri Atre12a08392016-02-17 13:10:34 -08002236/**
2237 *ol_txrx_vdev_register - Link a vdev's data object with the
2238 * matching OS shim vdev object.
2239 *
2240 * @txrx_vdev: the virtual device's data object
2241 * @osif_vdev: the virtual device's OS shim object
2242 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
2243 *
2244 * The data object for a virtual device is created by the
2245 * function ol_txrx_vdev_attach. However, rather than fully
2246 * linking the data vdev object with the vdev objects from the
2247 * other subsystems that the data vdev object interacts with,
2248 * the txrx_vdev_attach function focuses primarily on creating
2249 * the data vdev object. After the creation of both the data
2250 * vdev object and the OS shim vdev object, this
2251 * txrx_osif_vdev_attach function is used to connect the two
2252 * vdev objects, so the data SW can use the OS shim vdev handle
2253 * when passing rx data received by a vdev up to the OS shim.
2254 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002255static void ol_txrx_vdev_register(struct cdp_vdev *pvdev,
2256 void *osif_vdev,
2257 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002258{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002259 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002260
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002261 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
2262 qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
2263 qdf_assert(0);
2264 return;
2265 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002266
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002267 vdev->osif_dev = osif_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002268 vdev->rx = txrx_ops->rx.rx;
Poddar, Siddarth3906e172018-01-09 11:24:58 +05302269 vdev->stats_rx = txrx_ops->rx.stats_rx;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002270 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002271}
2272
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002273#ifdef currently_unused
Dhanashri Atre12a08392016-02-17 13:10:34 -08002274/**
2275 * ol_txrx_set_curchan - Setup the current operating channel of
2276 * the device
2277 * @pdev - the data physical device object
2278 * @chan_mhz - the channel frequency (mhz) packets on
2279 *
2280 * Mainly used when populating monitor mode status that requires
2281 * the current operating channel
2282 *
2283 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002284void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
2285{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002286}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002287#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002288
2289void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
2290{
2291 vdev->safemode = val;
2292}
2293
Dhanashri Atre12a08392016-02-17 13:10:34 -08002294/**
2295 * ol_txrx_set_privacy_filters - set the privacy filter
2296 * @vdev - the data virtual device object
2297 * @filter - filters to be set
2298 * @num - the number of filters
2299 *
2300 * Rx related. Set the privacy filters. When rx packets, check
2301 * the ether type, filter type and packet type to decide whether
2302 * discard these packets.
2303 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002304static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002305ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
2306 void *filters, uint32_t num)
2307{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302308 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002309 num * sizeof(struct privacy_exemption));
2310 vdev->num_filters = num;
2311}
2312
2313void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
2314{
2315 vdev->drop_unenc = val;
2316}
2317
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07002318#if defined(CONFIG_HL_SUPPORT) || defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
gbian016a42e2017-03-01 18:49:11 +08002319
2320static void
2321ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2322{
2323 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2324 int i;
2325 struct ol_tx_desc_t *tx_desc;
2326
2327 qdf_spin_lock_bh(&pdev->tx_mutex);
2328 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2329 tx_desc = ol_tx_desc_find(pdev, i);
2330 if (tx_desc->vdev == vdev)
2331 tx_desc->vdev = NULL;
2332 }
2333 qdf_spin_unlock_bh(&pdev->tx_mutex);
2334}
2335
2336#else
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07002337#ifdef QCA_LL_TX_FLOW_CONTROL_V2
2338static void ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2339{
2340 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2341 struct ol_tx_flow_pool_t *pool;
2342 int i;
2343 struct ol_tx_desc_t *tx_desc;
gbian016a42e2017-03-01 18:49:11 +08002344
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07002345 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
2346 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2347 tx_desc = ol_tx_desc_find(pdev, i);
2348 if (!qdf_atomic_read(&tx_desc->ref_cnt))
2349 /* not in use */
2350 continue;
2351
2352 pool = tx_desc->pool;
2353 qdf_spin_lock_bh(&pool->flow_pool_lock);
2354 if (tx_desc->vdev == vdev)
2355 tx_desc->vdev = NULL;
2356 qdf_spin_unlock_bh(&pool->flow_pool_lock);
2357 }
2358 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
2359}
2360
2361#else
gbian016a42e2017-03-01 18:49:11 +08002362static void
2363ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2364{
gbian016a42e2017-03-01 18:49:11 +08002365}
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07002366#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
2367#endif /* CONFIG_HL_SUPPORT */
gbian016a42e2017-03-01 18:49:11 +08002368
Dhanashri Atre12a08392016-02-17 13:10:34 -08002369/**
2370 * ol_txrx_vdev_detach - Deallocate the specified data virtual
2371 * device object.
2372 * @data_vdev: data object for the virtual device in question
2373 * @callback: function to call (if non-NULL) once the vdev has
2374 * been wholly deleted
2375 * @callback_context: context to provide in the callback
2376 *
2377 * All peers associated with the virtual device need to be deleted
2378 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
2379 * However, for the peers to be fully deleted, the peer deletion has to
2380 * percolate through the target data FW and back up to the host data SW.
2381 * Thus, even though the host control SW may have issued a peer_detach
2382 * call for each of the vdev's peers, the peer objects may still be
2383 * allocated, pending removal of all references to them by the target FW.
2384 * In this case, though the vdev_detach function call will still return
2385 * immediately, the vdev itself won't actually be deleted, until the
2386 * deletions of all its peers complete.
2387 * The caller can provide a callback function pointer to be notified when
2388 * the vdev deletion actually happens - whether it's directly within the
2389 * vdev_detach call, or if it's deferred until all in-progress peer
2390 * deletions have completed.
2391 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002392static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002393ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002394 ol_txrx_vdev_delete_cb callback, void *context)
2395{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002396 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
wadesong5e2e8012017-08-21 16:56:03 +08002397 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002398
2399 /* preconditions */
2400 TXRX_ASSERT2(vdev);
wadesong5e2e8012017-08-21 16:56:03 +08002401 pdev = vdev->pdev;
2402
2403 /* prevent anyone from restarting the ll_pause timer again */
2404 qdf_atomic_set(&vdev->delete.detaching, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002405
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302406 ol_txrx_vdev_tx_queue_free(vdev);
2407
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302408 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302409 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002410 vdev->ll_pause.is_q_timer_on = false;
2411 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302412 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07002413
Nirav Shahcbc6d722016-03-01 16:24:53 +05302414 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302415 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002416 vdev->ll_pause.txq.head = next;
2417 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302418 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
wadesong5e2e8012017-08-21 16:56:03 +08002419
2420 /* ll_pause timer should be deleted without any locks held, and
2421 * no timer function should be executed after this point because
2422 * qdf_timer_free is deleting the timer synchronously.
2423 */
2424 qdf_timer_free(&vdev->ll_pause.timer);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302425 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002426
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302427 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002428 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002429 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002430 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302431 qdf_spin_unlock_bh(&vdev->flow_control_lock);
2432 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002433
2434 /* remove the vdev from its parent pdev's list */
2435 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
2436
2437 /*
2438 * Use peer_ref_mutex while accessing peer_list, in case
2439 * a peer is in the process of being removed from the list.
2440 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302441 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002442 /* check that the vdev has no peers allocated */
2443 if (!TAILQ_EMPTY(&vdev->peer_list)) {
2444 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05302445 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002446 "%s: not deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002447 __func__, vdev,
2448 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2449 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2450 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2451 /* indicate that the vdev needs to be deleted */
2452 vdev->delete.pending = 1;
2453 vdev->delete.callback = callback;
2454 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302455 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002456 return;
2457 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302458 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002459 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002460
Poddar, Siddarth14521792017-03-14 21:19:42 +05302461 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002462 "%s: deleting vdev obj %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002463 __func__, vdev,
2464 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2465 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2466 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2467
2468 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2469
2470 /*
Yun Parkeaea8632017-04-09 09:53:45 -07002471 * The ol_tx_desc_free might access the invalid content of vdev referred
2472 * by tx desc, since this vdev might be detached in another thread
2473 * asynchronous.
2474 *
2475 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2476 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2477 * to avoid crash.
2478 *
2479 */
gbian016a42e2017-03-01 18:49:11 +08002480 ol_txrx_tx_desc_reset_vdev(vdev);
2481
2482 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002483 * Doesn't matter if there are outstanding tx frames -
2484 * they will be freed once the target sends a tx completion
2485 * message for them.
2486 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302487 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002488 if (callback)
2489 callback(context);
2490}
2491
2492/**
2493 * ol_txrx_flush_rx_frames() - flush cached rx frames
2494 * @peer: peer
2495 * @drop: set flag to drop frames
2496 *
2497 * Return: None
2498 */
2499void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302500 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002501{
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002502 struct ol_txrx_cached_bufq_t *bufqi;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002503 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302504 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002505 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002506
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302507 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2508 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002509 return;
2510 }
2511
Dhanashri Atre182b0272016-02-17 15:35:07 -08002512 qdf_assert(peer->vdev);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302513 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002514 bufqi = &peer->bufq_info;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002515
Dhanashri Atre50141c52016-04-07 13:15:29 -07002516 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002517 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002518 else
2519 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302520 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002521
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002522 qdf_spin_lock_bh(&bufqi->bufq_lock);
2523 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002524 typeof(*cache_buf), list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002525 while (!list_empty(&bufqi->cached_bufq)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002526 list_del(&cache_buf->list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002527 bufqi->curr--;
2528 qdf_assert(bufqi->curr >= 0);
2529 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002530 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302531 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002532 } else {
2533 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002534 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302535 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302536 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002537 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302538 qdf_mem_free(cache_buf);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002539 qdf_spin_lock_bh(&bufqi->bufq_lock);
2540 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002541 typeof(*cache_buf), list);
2542 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002543 bufqi->qdepth_no_thresh = bufqi->curr;
2544 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302545 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002546}
2547
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07002548static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302549{
2550 uint8_t sta_id;
2551 struct ol_txrx_peer_t *peer;
2552 struct ol_txrx_pdev_t *pdev;
2553
2554 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2555 if (!pdev)
2556 return;
2557
2558 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002559 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2560 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302561 if (!peer)
2562 continue;
2563 ol_txrx_flush_rx_frames(peer, 1);
2564 }
2565}
2566
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302567/* Define short name to use in cds_trigger_recovery */
2568#define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2569
Dhanashri Atre12a08392016-02-17 13:10:34 -08002570/**
Naveen Rawat17c42a82018-02-01 19:18:27 -08002571 * ol_txrx_dump_peer_access_list() - dump peer access list
2572 * @peer: peer handle
2573 *
2574 * This function will dump if any peer debug ids are still accessing peer
2575 *
2576 * Return: None
2577 */
2578static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2579{
2580 u32 i;
2581 u32 pending_ref;
2582
2583 for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2584 pending_ref = qdf_atomic_read(&peer->access_list[i]);
2585 if (pending_ref)
2586 ol_txrx_info_high("id %d pending refs %d",
2587 i, pending_ref);
2588 }
2589}
2590
2591/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08002592 * ol_txrx_peer_attach - Allocate and set up references for a
2593 * data peer object.
2594 * @data_pdev: data physical device object that will indirectly
2595 * own the data_peer object
2596 * @data_vdev - data virtual device object that will directly
2597 * own the data_peer object
2598 * @peer_mac_addr - MAC address of the new peer
2599 *
2600 * When an association with a peer starts, the host's control SW
2601 * uses this function to inform the host data SW.
2602 * The host data SW allocates its own peer object, and stores a
2603 * reference to the control peer object within the data peer object.
2604 * The host data SW also stores a reference to the virtual device
2605 * that the peer is associated with. This virtual device handle is
2606 * used when the data SW delivers rx data frames to the OS shim layer.
2607 * The host data SW returns a handle to the new peer data object,
2608 * so a reference within the control peer object can be set to the
2609 * data peer object.
2610 *
2611 * Return: handle to new data peer object, or NULL if the attach
2612 * fails
2613 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002614static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002615ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002616{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002617 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002618 struct ol_txrx_peer_t *peer;
2619 struct ol_txrx_peer_t *temp_peer;
2620 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002621 bool wait_on_deletion = false;
2622 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002623 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302624 bool cmp_wait_mac = false;
2625 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002626
2627 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002628 TXRX_ASSERT2(vdev);
2629 TXRX_ASSERT2(peer_mac_addr);
2630
Dhanashri Atre12a08392016-02-17 13:10:34 -08002631 pdev = vdev->pdev;
2632 TXRX_ASSERT2(pdev);
2633
Abhishek Singh217d9782017-04-28 23:49:11 +05302634 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2635 QDF_MAC_ADDR_SIZE))
2636 cmp_wait_mac = true;
2637
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302638 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002639 /* check for duplicate existing peer */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002640 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2641 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2642 (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302643 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002644 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exists.\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002645 vdev->vdev_id,
2646 peer_mac_addr[0], peer_mac_addr[1],
2647 peer_mac_addr[2], peer_mac_addr[3],
2648 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302649 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002650 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002651 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002652 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302653 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002654 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302655 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002656 return NULL;
2657 }
2658 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302659 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2660 &temp_peer->mac_addr,
2661 &vdev->last_peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302662 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002663 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exists.\n",
Abhishek Singh217d9782017-04-28 23:49:11 +05302664 vdev->vdev_id,
2665 vdev->last_peer_mac_addr.raw[0],
2666 vdev->last_peer_mac_addr.raw[1],
2667 vdev->last_peer_mac_addr.raw[2],
2668 vdev->last_peer_mac_addr.raw[3],
2669 vdev->last_peer_mac_addr.raw[4],
2670 vdev->last_peer_mac_addr.raw[5]);
2671 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2672 vdev->wait_on_peer_id = temp_peer->local_id;
2673 qdf_event_reset(&vdev->wait_delete_comp);
2674 wait_on_deletion = true;
2675 break;
2676 } else {
2677 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2678 ol_txrx_err("peer not found");
2679 return NULL;
2680 }
2681 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002682 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302683 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002684
Abhishek Singh217d9782017-04-28 23:49:11 +05302685 qdf_mem_zero(&vdev->last_peer_mac_addr,
2686 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002687 if (wait_on_deletion) {
2688 /* wait for peer deletion */
Nachiket Kukade0396b732017-11-14 16:35:16 +05302689 rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002690 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002691 if (QDF_STATUS_SUCCESS != rc) {
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002692 ol_txrx_err("error waiting for peer_id(%d) deletion, status %d\n",
Dustin Brown100201e2017-07-10 11:48:40 -07002693 vdev->wait_on_peer_id, (int) rc);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002694 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08002695 ol_txrx_dump_peer_access_list(temp_peer);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002696 wlan_roam_debug_dump_table();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002697 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Dustin Brown100201e2017-07-10 11:48:40 -07002698
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002699 return NULL;
2700 }
2701 }
2702
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302703 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002704 if (!peer)
2705 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002706
2707 /* store provided params */
2708 peer->vdev = vdev;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302709 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002710 OL_TXRX_MAC_ADDR_LEN);
2711
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302712 ol_txrx_peer_txqs_init(pdev, peer);
2713
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002714 INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302715 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002716 /* add this peer into the vdev's list */
2717 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302718 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002719 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
Frank Liu4362e462018-01-16 11:51:55 +08002720 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2721 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002722 vdev->last_real_peer = peer;
Frank Liu4362e462018-01-16 11:51:55 +08002723 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2724 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002725
2726 peer->rx_opt_proc = pdev->rx_opt_proc;
2727
2728 ol_rx_peer_init(pdev, peer);
2729
2730 /* initialize the peer_id */
2731 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2732 peer->peer_ids[i] = HTT_INVALID_PEER;
2733
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302734 qdf_spinlock_create(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002735 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2736
2737 peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002738
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302739 qdf_atomic_init(&peer->delete_in_progress);
2740 qdf_atomic_init(&peer->flush_in_progress);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302741 qdf_atomic_init(&peer->ref_cnt);
Mohit Khannab7bec722017-11-10 11:43:44 -08002742
2743 for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2744 qdf_atomic_init(&peer->access_list[i]);
2745
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002746 /* keep one reference for attach */
Mohit Khannab7bec722017-11-10 11:43:44 -08002747 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002748
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002749 /* Set a flag to indicate peer create is pending in firmware */
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002750 qdf_atomic_init(&peer->fw_create_pending);
2751 qdf_atomic_set(&peer->fw_create_pending, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002752
2753 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002754 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2755 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002756
2757 ol_txrx_peer_find_hash_add(pdev, peer);
2758
Mohit Khanna47384bc2016-08-15 15:37:05 -07002759 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002760 "vdev %pK created peer %pK ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Mohit Khanna47384bc2016-08-15 15:37:05 -07002761 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002762 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2763 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2764 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2765 /*
2766 * For every peer MAp message search and set if bss_peer
2767 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002768 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2769 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002770 peer->bss_peer = 1;
2771
2772 /*
2773 * The peer starts in the "disc" state while association is in progress.
2774 * Once association completes, the peer will get updated to "auth" state
2775 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2776 * or else to the "conn" state. For non-open mode, the peer will
2777 * progress to "auth" state once the authentication completes.
2778 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002779 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002780 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002781 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002782
2783#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2784 peer->rssi_dbm = HTT_RSSI_INVALID;
2785#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002786 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2787 !pdev->self_peer) {
2788 pdev->self_peer = peer;
2789 /*
2790 * No Tx in monitor mode, otherwise results in target assert.
2791 * Setting disable_intrabss_fwd to true
2792 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002793 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002794 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002795
2796 ol_txrx_local_peer_id_alloc(pdev, peer);
2797
Leo Chang98726762016-10-28 11:07:18 -07002798 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002799}
2800
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302801#undef PEER_DEL_TIMEOUT
2802
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002803/*
2804 * Discarding tx filter - removes all data frames (disconnected state)
2805 */
2806static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2807{
2808 return A_ERROR;
2809}
2810
2811/*
2812 * Non-autentication tx filter - filters out data frames that are not
2813 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2814 * data frames (connected state)
2815 */
2816static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2817{
2818 return
2819 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2820 tx_msdu_info->htt.info.ethertype ==
2821 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2822}
2823
2824/*
2825 * Pass-through tx filter - lets all data frames through (authenticated state)
2826 */
2827static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2828{
2829 return A_OK;
2830}
2831
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002832/**
2833 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2834 * @peer: handle to peer
2835 *
2836 * returns mac addrs for module which do not know peer type
2837 *
2838 * Return: the mac_addr from peer
2839 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002840static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002841ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002842{
Leo Chang98726762016-10-28 11:07:18 -07002843 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002844
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002845 if (!peer)
2846 return NULL;
2847
2848 return peer->mac_addr.raw;
2849}
2850
Abhishek Singhcfb44482017-03-10 12:42:37 +05302851#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002852/**
2853 * ol_txrx_get_pn_info() - Returns pn info from peer
2854 * @peer: handle to peer
2855 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2856 * @last_pn: return last_rmf_pn value from peer.
2857 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2858 *
2859 * Return: NONE
2860 */
2861void
Leo Chang98726762016-10-28 11:07:18 -07002862ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002863 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2864{
Leo Chang98726762016-10-28 11:07:18 -07002865 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002866 *last_pn_valid = &peer->last_rmf_pn_valid;
2867 *last_pn = &peer->last_rmf_pn;
2868 *rmf_pn_replays = &peer->rmf_pn_replays;
2869}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302870#else
2871void
2872ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2873 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2874{
2875}
2876#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002877
2878/**
2879 * ol_txrx_get_opmode() - Return operation mode of vdev
2880 * @vdev: vdev handle
2881 *
2882 * Return: operation mode.
2883 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002884static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002885{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002886 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002887
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002888 return vdev->opmode;
2889}
2890
2891/**
2892 * ol_txrx_get_peer_state() - Return peer state of peer
2893 * @peer: peer handle
2894 *
2895 * Return: return peer state
2896 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002897static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002898{
Leo Chang98726762016-10-28 11:07:18 -07002899 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002900
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002901 return peer->state;
2902}
2903
2904/**
2905 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2906 * @peer: peer handle
2907 *
2908 * Return: vdev handle from peer
2909 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002910static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002911{
Leo Chang98726762016-10-28 11:07:18 -07002912 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002913
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002914 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002915}
2916
2917/**
2918 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2919 * @vdev: vdev handle
2920 *
2921 * Return: vdev mac address
2922 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002923static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002924ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002925{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002926 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002927
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002928 if (!vdev)
2929 return NULL;
2930
2931 return vdev->mac_addr.raw;
2932}
2933
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002934#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002935/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002936 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002937 * vdev
2938 * @vdev: vdev handle
2939 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002940 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002941 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002942struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002943ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2944{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002945 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002946}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002947#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002948
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002949#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002950/**
2951 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2952 * @vdev: vdev handle
2953 *
2954 * Return: Handle to pdev
2955 */
2956ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2957{
2958 return vdev->pdev;
2959}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002960#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002961
2962/**
2963 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2964 * @vdev: vdev handle
2965 *
2966 * Return: Handle to control pdev
2967 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002968static struct cdp_cfg *
2969ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002970{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002971 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002972
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002973 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002974}
2975
2976/**
2977 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2978 * @vdev: vdev handle
2979 *
2980 * Return: Rx Fwd disabled status
2981 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002982static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002983ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002984{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002985 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002986 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2987 vdev->pdev->ctrl_pdev;
2988 return cfg->rx_fwd_disabled;
2989}
2990
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002991#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002992/**
2993 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2994 * @vdev: vdev handle
2995 * @peer_num_delta: peer nums to be adjusted
2996 *
2997 * Return: -1 for failure or total peer nums after adjustment.
2998 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002999static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003000ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003001 int16_t peer_num_delta)
3002{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003003 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003004 int16_t new_peer_num;
3005
3006 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07003007 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003008 return OL_TXRX_INVALID_NUM_PEERS;
3009
3010 vdev->ibss_peer_num = new_peer_num;
3011
3012 return new_peer_num;
3013}
3014
3015/**
3016 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
3017 * beat timer
3018 * @vdev: vdev handle
3019 * @timer_value_sec: new heart beat timer value
3020 *
3021 * Return: Old timer value set in vdev.
3022 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003023static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
3024 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003025{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003026 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003027 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
3028
3029 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
3030
3031 return old_timer_value;
3032}
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07003033#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003034
3035/**
3036 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
3037 * @vdev: vdev handle
3038 * @callback: callback function to remove the peer.
3039 * @callback_context: handle for callback function
3040 * @remove_last_peer: Does it required to last peer.
3041 *
3042 * Return: NONE
3043 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003044static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003045ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003046 ol_txrx_vdev_peer_remove_cb callback,
3047 void *callback_context, bool remove_last_peer)
3048{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003049 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003050 ol_txrx_peer_handle peer, temp;
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003051 int self_removed = 0;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003052 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003053 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003054
3055 temp = NULL;
3056 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
3057 peer_list_elem) {
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05303058 if (qdf_atomic_read(&peer->delete_in_progress))
3059 continue;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003060 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003061 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05303062 callback(callback_context, temp->mac_addr.raw,
Jiachao Wu641760e2018-01-21 12:11:31 +08003063 vdev->vdev_id, temp);
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003064 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003065 }
3066 /* self peer is deleted last */
3067 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003068 self_removed = 1;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003069 break;
Yun Parkeaea8632017-04-09 09:53:45 -07003070 }
3071 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003072 }
3073
Mohit Khanna137b97d2016-04-21 16:11:33 -07003074 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
3075
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003076 if (self_removed)
3077 ol_txrx_info("%s: self peer removed by caller ",
3078 __func__);
3079
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003080 if (remove_last_peer) {
3081 /* remove IBSS bss peer last */
3082 peer = TAILQ_FIRST(&vdev->peer_list);
3083 callback(callback_context, (uint8_t *) &vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08003084 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003085 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003086}
3087
3088/**
3089 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
3090 * @vdev: vdev handle
3091 * @callback: callback function to remove the peer.
3092 * @callback_context: handle for callback function
3093 *
3094 * Return: NONE
3095 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003096static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003097ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003098 ol_txrx_vdev_peer_remove_cb callback,
3099 void *callback_context)
3100{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003101 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003102 ol_txrx_peer_handle peer = NULL;
Jiachao Wu641760e2018-01-21 12:11:31 +08003103 ol_txrx_peer_handle tmp_peer = NULL;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003104
Jiachao Wu641760e2018-01-21 12:11:31 +08003105 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, peer_list_elem, tmp_peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05303106 ol_txrx_info_high(
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003107 "%s: peer found for vdev id %d. deleting the peer",
3108 __func__, vdev->vdev_id);
3109 callback(callback_context, (uint8_t *)&vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08003110 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003111 }
3112}
3113
3114/**
3115 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
3116 * @vdev: vdev handle
3117 * @ocb_set_chan: OCB channel information to be set in vdev.
3118 *
3119 * Return: NONE
3120 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003121static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003122 struct ol_txrx_ocb_set_chan ocb_set_chan)
3123{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003124 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003125
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003126 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
3127 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
3128}
3129
3130/**
3131 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
3132 * @vdev: vdev handle
3133 *
3134 * Return: handle to struct ol_txrx_ocb_chan_info
3135 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003136static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003137ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003138{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003139 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003140
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003141 return vdev->ocb_channel_info;
3142}
3143
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003144/**
3145 * @brief specify the peer's authentication state
3146 * @details
3147 * Specify the peer's authentication state (none, connected, authenticated)
3148 * to allow the data SW to determine whether to filter out invalid data frames.
3149 * (In the "connected" state, where security is enabled, but authentication
3150 * has not completed, tx and rx data frames other than EAPOL or WAPI should
3151 * be discarded.)
3152 * This function is only relevant for systems in which the tx and rx filtering
3153 * are done in the host rather than in the target.
3154 *
3155 * @param data_peer - which peer has changed its state
3156 * @param state - the new state of the peer
3157 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003158 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003159 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003160QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003161 uint8_t *peer_mac,
3162 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003163{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003164 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003165 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003166 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003167
Anurag Chouhanc5548422016-02-24 18:33:27 +05303168 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303169 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05303170 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303171 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003172 }
3173
Mohit Khannab7bec722017-11-10 11:43:44 -08003174 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
3175 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003176 if (NULL == peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303177 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303178 "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
3179 __func__,
3180 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
3181 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303182 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003183 }
3184
3185 /* TODO: Should we send WMI command of the connection state? */
3186 /* avoid multiple auth state change. */
3187 if (peer->state == state) {
3188#ifdef TXRX_PRINT_VERBOSE_ENABLE
Poddar, Siddarth14521792017-03-14 21:19:42 +05303189 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003190 "%s: no state change, returns directly\n",
3191 __func__);
3192#endif
Mohit Khannab7bec722017-11-10 11:43:44 -08003193 peer_ref_cnt = ol_txrx_peer_release_ref
3194 (peer,
3195 PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303196 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003197 }
3198
Poddar, Siddarth14521792017-03-14 21:19:42 +05303199 ol_txrx_dbg("%s: change from %d to %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003200 __func__, peer->state, state);
3201
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003202 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003203 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003204 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003205 ? ol_tx_filter_non_auth
3206 : ol_tx_filter_discard);
3207
3208 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003209 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003210 int tid;
3211 /*
3212 * Pause all regular (non-extended) TID tx queues until
3213 * data arrives and ADDBA negotiation has completed.
3214 */
Poddar, Siddarth14521792017-03-14 21:19:42 +05303215 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003216 "%s: pause peer and unpause mgmt/non-qos\n",
3217 __func__);
3218 ol_txrx_peer_pause(peer); /* pause all tx queues */
3219 /* unpause mgmt and non-QoS tx queues */
3220 for (tid = OL_TX_NUM_QOS_TIDS;
3221 tid < OL_TX_NUM_TIDS; tid++)
3222 ol_txrx_peer_tid_unpause(peer, tid);
3223 }
3224 }
Mohit Khannab7bec722017-11-10 11:43:44 -08003225 peer_ref_cnt = ol_txrx_peer_release_ref(peer,
3226 PEER_DEBUG_ID_OL_INTERNAL);
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003227 /*
Mohit Khannab7bec722017-11-10 11:43:44 -08003228 * after ol_txrx_peer_release_ref, peer object cannot be accessed
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003229 * if the return code was 0
3230 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003231 if (peer_ref_cnt > 0)
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003232 /*
3233 * Set the state after the Pause to avoid the race condiction
3234 * with ADDBA check in tx path
3235 */
3236 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303237 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003238}
3239
3240void
3241ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
3242{
3243 peer->keyinstalled = val;
3244}
3245
3246void
3247ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
3248 uint8_t *peer_mac,
3249 union ol_txrx_peer_update_param_t *param,
3250 enum ol_txrx_peer_update_select_t select)
3251{
3252 struct ol_txrx_peer_t *peer;
3253
Mohit Khannab7bec722017-11-10 11:43:44 -08003254 peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
3255 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003256 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303257 ol_txrx_dbg("%s: peer is null",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003258 __func__);
3259 return;
3260 }
3261
3262 switch (select) {
3263 case ol_txrx_peer_update_qos_capable:
3264 {
3265 /* save qos_capable here txrx peer,
3266 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
3267 */
3268 peer->qos_capable = param->qos_capable;
3269 /*
3270 * The following function call assumes that the peer has a
3271 * single ID. This is currently true, and
3272 * is expected to remain true.
3273 */
3274 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
3275 peer->peer_ids[0],
3276 peer->qos_capable);
3277 break;
3278 }
3279 case ol_txrx_peer_update_uapsdMask:
3280 {
3281 peer->uapsd_mask = param->uapsd_mask;
3282 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
3283 peer->peer_ids[0],
3284 peer->uapsd_mask);
3285 break;
3286 }
3287 case ol_txrx_peer_update_peer_security:
3288 {
3289 enum ol_sec_type sec_type = param->sec_type;
3290 enum htt_sec_type peer_sec_type = htt_sec_type_none;
3291
3292 switch (sec_type) {
3293 case ol_sec_type_none:
3294 peer_sec_type = htt_sec_type_none;
3295 break;
3296 case ol_sec_type_wep128:
3297 peer_sec_type = htt_sec_type_wep128;
3298 break;
3299 case ol_sec_type_wep104:
3300 peer_sec_type = htt_sec_type_wep104;
3301 break;
3302 case ol_sec_type_wep40:
3303 peer_sec_type = htt_sec_type_wep40;
3304 break;
3305 case ol_sec_type_tkip:
3306 peer_sec_type = htt_sec_type_tkip;
3307 break;
3308 case ol_sec_type_tkip_nomic:
3309 peer_sec_type = htt_sec_type_tkip_nomic;
3310 break;
3311 case ol_sec_type_aes_ccmp:
3312 peer_sec_type = htt_sec_type_aes_ccmp;
3313 break;
3314 case ol_sec_type_wapi:
3315 peer_sec_type = htt_sec_type_wapi;
3316 break;
3317 default:
3318 peer_sec_type = htt_sec_type_none;
3319 break;
3320 }
3321
3322 peer->security[txrx_sec_ucast].sec_type =
3323 peer->security[txrx_sec_mcast].sec_type =
3324 peer_sec_type;
3325
3326 break;
3327 }
3328 default:
3329 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303330 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003331 "ERROR: unknown param %d in %s", select,
3332 __func__);
3333 break;
3334 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003335 } /* switch */
Mohit Khannab7bec722017-11-10 11:43:44 -08003336 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003337}
3338
3339uint8_t
3340ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3341{
3342
3343 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07003344
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003345 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3346 if (peer)
3347 return peer->uapsd_mask;
3348 return 0;
3349}
3350
3351uint8_t
3352ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3353{
3354
3355 struct ol_txrx_peer_t *peer_t =
3356 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3357 if (peer_t != NULL)
3358 return peer_t->qos_capable;
3359 return 0;
3360}
3361
Mohit Khannab7bec722017-11-10 11:43:44 -08003362/**
Mohit Khannab7bec722017-11-10 11:43:44 -08003363 * ol_txrx_peer_free_tids() - free tids for the peer
3364 * @peer: peer handle
3365 *
3366 * Return: None
3367 */
3368static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
3369{
3370 int i = 0;
3371 /*
3372 * 'array' is allocated in addba handler and is supposed to be
3373 * freed in delba handler. There is the case (for example, in
3374 * SSR) where delba handler is not called. Because array points
3375 * to address of 'base' by default and is reallocated in addba
3376 * handler later, only free the memory when the array does not
3377 * point to base.
3378 */
3379 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
3380 if (peer->tids_rx_reorder[i].array !=
3381 &peer->tids_rx_reorder[i].base) {
3382 ol_txrx_dbg(
3383 "%s, delete reorder arr, tid:%d\n",
3384 __func__, i);
3385 qdf_mem_free(peer->tids_rx_reorder[i].array);
3386 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
3387 (uint8_t)i);
3388 }
3389 }
3390}
3391
Alok Kumar4d87ff22018-06-01 17:15:57 +05303392bool ol_txrx_is_peer_eligible_for_deletion(ol_txrx_peer_handle peer,
3393 struct ol_txrx_pdev_t *pdev)
Alok Kumarbda73bb2018-05-17 11:50:03 +05303394{
Alok Kumarbda73bb2018-05-17 11:50:03 +05303395 bool peerdel = true;
3396 u_int16_t peer_id;
3397 int i;
3398
Alok Kumarbda73bb2018-05-17 11:50:03 +05303399 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
3400 peer_id = peer->peer_ids[i];
3401
3402 if (!pdev->peer_id_to_obj_map[peer_id].peer_ref)
3403 continue;
3404
3405 if (pdev->peer_id_to_obj_map[peer_id].peer_ref != peer)
3406 continue;
3407
3408 if (qdf_atomic_read(&pdev->peer_id_to_obj_map[peer_id].
3409 del_peer_id_ref_cnt)) {
3410 peerdel = false;
3411 break;
3412 }
3413
3414 pdev->peer_id_to_obj_map[peer_id].peer_ref = NULL;
3415 }
3416 return peerdel;
3417}
3418
Mohit Khannab7bec722017-11-10 11:43:44 -08003419/**
3420 * ol_txrx_peer_release_ref() - release peer reference
3421 * @peer: peer handle
3422 *
3423 * Release peer reference and delete peer if refcount is 0
3424 *
wadesong9f2b1102017-12-20 22:58:35 +08003425 * Return: Resulting peer ref_cnt after this function is invoked
Mohit Khannab7bec722017-11-10 11:43:44 -08003426 */
3427int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
3428 enum peer_debug_id_type debug_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003429{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003430 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003431 struct ol_txrx_vdev_t *vdev;
3432 struct ol_txrx_pdev_t *pdev;
Jingxiang Ge3badb982018-01-02 17:39:01 +08003433 bool ref_silent = false;
Jingxiang Ge190679b2018-01-30 08:56:19 +08003434 int access_list = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003435
3436 /* preconditions */
3437 TXRX_ASSERT2(peer);
3438
3439 vdev = peer->vdev;
3440 if (NULL == vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303441 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003442 "The vdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003443 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003444 }
3445
3446 pdev = vdev->pdev;
3447 if (NULL == pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303448 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003449 "The pdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003450 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003451 }
3452
Mohit Khannab7bec722017-11-10 11:43:44 -08003453 if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
3454 ol_txrx_err("incorrect debug_id %d ", debug_id);
3455 return -EINVAL;
3456 }
3457
Jingxiang Ge3badb982018-01-02 17:39:01 +08003458 if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
3459 ref_silent = true;
3460
3461 if (!ref_silent)
3462 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3463 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3464 peer, 0,
3465 qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003466
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003467
3468 /*
3469 * Hold the lock all the way from checking if the peer ref count
3470 * is zero until the peer references are removed from the hash
3471 * table and vdev list (if the peer ref count is zero).
3472 * This protects against a new HL tx operation starting to use the
3473 * peer object just after this function concludes it's done being used.
3474 * Furthermore, the lock needs to be held while checking whether the
3475 * vdev's list of peers is empty, to make sure that list is not modified
3476 * concurrently with the empty check.
3477 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303478 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003479
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003480 /*
3481 * Check for the reference count before deleting the peer
3482 * as we noticed that sometimes we are re-entering this
3483 * function again which is leading to dead-lock.
3484 * (A double-free should never happen, so assert if it does.)
3485 */
3486 rc = qdf_atomic_read(&(peer->ref_cnt));
3487
3488 if (rc == 0) {
3489 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3490 ol_txrx_err("The Peer is not present anymore\n");
3491 qdf_assert(0);
3492 return -EACCES;
3493 }
3494 /*
3495 * now decrement rc; this will be the return code.
3496 * 0 : peer deleted
3497 * >0: peer ref removed, but still has other references
3498 * <0: sanity failed - no changes to the state of the peer
3499 */
3500 rc--;
3501
Mohit Khannab7bec722017-11-10 11:43:44 -08003502 if (!qdf_atomic_read(&peer->access_list[debug_id])) {
3503 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
jitiphil8ad8a6f2018-03-01 23:45:05 +05303504 ol_txrx_err("peer %pK ref was not taken by %d",
Mohit Khannab7bec722017-11-10 11:43:44 -08003505 peer, debug_id);
3506 ol_txrx_dump_peer_access_list(peer);
3507 QDF_BUG(0);
3508 return -EACCES;
3509 }
Mohit Khannab7bec722017-11-10 11:43:44 -08003510 qdf_atomic_dec(&peer->access_list[debug_id]);
3511
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07003512 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Mohit Khannab7bec722017-11-10 11:43:44 -08003513 u16 peer_id;
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003514 wlan_roam_debug_log(vdev->vdev_id,
3515 DEBUG_DELETING_PEER_OBJ,
3516 DEBUG_INVALID_PEER_ID,
3517 &peer->mac_addr.raw, peer, 0,
3518 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003519 peer_id = peer->local_id;
3520 /* remove the reference to the peer from the hash table */
3521 ol_txrx_peer_find_hash_remove(pdev, peer);
3522
3523 /* remove the peer from its parent vdev's list */
3524 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3525
3526 /* cleanup the Rx reorder queues for this peer */
3527 ol_rx_peer_cleanup(vdev, peer);
3528
Jingxiang Ge3badb982018-01-02 17:39:01 +08003529 qdf_spinlock_destroy(&peer->peer_info_lock);
3530 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
3531
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003532 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303533 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003534
3535 /*
3536 * Set wait_delete_comp event if the current peer id matches
3537 * with registered peer id.
3538 */
3539 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303540 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003541 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3542 }
3543
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003544 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3545 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003546
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003547 /* check whether the parent vdev has no peers left */
3548 if (TAILQ_EMPTY(&vdev->peer_list)) {
3549 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003550 * Check if the parent vdev was waiting for its peers
3551 * to be deleted, in order for it to be deleted too.
3552 */
3553 if (vdev->delete.pending) {
3554 ol_txrx_vdev_delete_cb vdev_delete_cb =
3555 vdev->delete.callback;
3556 void *vdev_delete_context =
3557 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303558 /*
3559 * Now that there are no references to the peer,
3560 * we can release the peer reference lock.
3561 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303562 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303563
gbian016a42e2017-03-01 18:49:11 +08003564 /*
Yun Parkeaea8632017-04-09 09:53:45 -07003565 * The ol_tx_desc_free might access the invalid
3566 * content of vdev referred by tx desc, since
3567 * this vdev might be detached in another thread
3568 * asynchronous.
3569 *
3570 * Go through tx desc pool to set corresponding
3571 * tx desc's vdev to NULL when detach this vdev,
3572 * and add vdev checking in the ol_tx_desc_free
3573 * to avoid crash.
3574 */
gbian016a42e2017-03-01 18:49:11 +08003575 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05303576 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003577 "%s: deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
Yun Parkeaea8632017-04-09 09:53:45 -07003578 __func__, vdev,
3579 vdev->mac_addr.raw[0],
3580 vdev->mac_addr.raw[1],
3581 vdev->mac_addr.raw[2],
3582 vdev->mac_addr.raw[3],
3583 vdev->mac_addr.raw[4],
3584 vdev->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003585 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303586 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003587 if (vdev_delete_cb)
3588 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303589 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303590 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003591 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303592 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303593 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303594 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003595
jitiphil8ad8a6f2018-03-01 23:45:05 +05303596 ol_txrx_info_high("[%d][%d]: Deleting peer %pK ref_cnt -> %d %s",
Mohit Khannab7bec722017-11-10 11:43:44 -08003597 debug_id,
3598 qdf_atomic_read(&peer->access_list[debug_id]),
3599 peer, rc,
3600 qdf_atomic_read(&peer->fw_create_pending)
3601 == 1 ?
3602 "(No Maps received)" : "");
Mohit Khanna8ee37c62017-08-07 17:15:20 -07003603
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303604 ol_txrx_peer_tx_queue_free(pdev, peer);
3605
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -07003606 /* Remove mappings from peer_id to peer object */
3607 ol_txrx_peer_clear_map_peer(pdev, peer);
3608
wadesong9f2b1102017-12-20 22:58:35 +08003609 /* Remove peer pointer from local peer ID map */
3610 ol_txrx_local_peer_id_free(pdev, peer);
3611
Mohit Khannab7bec722017-11-10 11:43:44 -08003612 ol_txrx_peer_free_tids(peer);
3613
3614 ol_txrx_dump_peer_access_list(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003615
Alok Kumarbda73bb2018-05-17 11:50:03 +05303616 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
Alok Kumar4d87ff22018-06-01 17:15:57 +05303617 if (ol_txrx_is_peer_eligible_for_deletion(peer, pdev)) {
Alok Kumarbda73bb2018-05-17 11:50:03 +05303618 qdf_mem_free(peer);
3619 } else {
3620 /*
3621 * Mark this PEER as a stale peer, to be deleted
3622 * during PEER UNMAP. Remove this peer from
3623 * roam_stale_peer_list during UNMAP.
3624 */
3625 struct ol_txrx_roam_stale_peer_t *roam_stale_peer;
3626
3627 roam_stale_peer = qdf_mem_malloc(
3628 sizeof(struct ol_txrx_roam_stale_peer_t));
3629 if (roam_stale_peer) {
3630 roam_stale_peer->peer = peer;
3631 TAILQ_INSERT_TAIL(&pdev->roam_stale_peer_list,
3632 roam_stale_peer,
3633 next_stale_entry);
3634 } else {
3635 QDF_TRACE(QDF_MODULE_ID_TXRX,
3636 QDF_TRACE_LEVEL_ERROR,
3637 "No memory allocated");
3638 }
3639 }
3640 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003641 } else {
Jingxiang Ge190679b2018-01-30 08:56:19 +08003642 access_list = qdf_atomic_read(
3643 &peer->access_list[debug_id]);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303644 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Jingxiang Ge3badb982018-01-02 17:39:01 +08003645 if (!ref_silent)
jitiphil8ad8a6f2018-03-01 23:45:05 +05303646 ol_txrx_info_high("[%d][%d]: ref delete peer %pK ref_cnt -> %d",
Jingxiang Ge3badb982018-01-02 17:39:01 +08003647 debug_id,
Jingxiang Ge190679b2018-01-30 08:56:19 +08003648 access_list,
Jingxiang Ge3badb982018-01-02 17:39:01 +08003649 peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003650 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003651 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003652}
3653
Dhanashri Atre12a08392016-02-17 13:10:34 -08003654/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003655 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3656 * @peer: pointer to ol txrx peer structure
3657 *
3658 * Return: QDF Status
3659 */
3660static QDF_STATUS
3661ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3662{
3663 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3664 /* Drop pending Rx frames in CDS */
3665 if (sched_ctx)
3666 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3667
3668 /* Purge the cached rx frame queue */
3669 ol_txrx_flush_rx_frames(peer, 1);
3670
3671 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003672 peer->state = OL_TXRX_PEER_STATE_DISC;
3673 qdf_spin_unlock_bh(&peer->peer_info_lock);
3674
3675 return QDF_STATUS_SUCCESS;
3676}
3677
3678/**
3679 * ol_txrx_clear_peer() - clear peer
3680 * @sta_id: sta id
3681 *
3682 * Return: QDF Status
3683 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003684static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003685{
3686 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003687 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003688
3689 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303690 ol_txrx_err("%s: Unable to find pdev!",
Mohit Khanna0696eef2016-04-14 16:14:08 -07003691 __func__);
3692 return QDF_STATUS_E_FAILURE;
3693 }
3694
3695 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303696 ol_txrx_err("Invalid sta id %d", sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003697 return QDF_STATUS_E_INVAL;
3698 }
3699
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003700 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Kabilan Kannanfa163982018-01-30 12:03:41 -08003701
3702 /* Return success, if the peer is already cleared by
3703 * data path via peer detach function.
3704 */
Mohit Khanna0696eef2016-04-14 16:14:08 -07003705 if (!peer)
Kabilan Kannanfa163982018-01-30 12:03:41 -08003706 return QDF_STATUS_SUCCESS;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003707
3708 return ol_txrx_clear_peer_internal(peer);
3709
3710}
3711
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003712void peer_unmap_timer_work_function(void *param)
3713{
3714 WMA_LOGE("Enter: %s", __func__);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003715 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08003716 ol_txrx_dump_peer_access_list(param);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003717 wlan_roam_debug_dump_table();
Anurag Chouhan4085ff72017-10-05 18:09:56 +05303718 cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003719}
3720
Mohit Khanna0696eef2016-04-14 16:14:08 -07003721/**
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003722 * peer_unmap_timer_handler() - peer unmap timer function
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003723 * @data: peer object pointer
3724 *
3725 * Return: none
3726 */
3727void peer_unmap_timer_handler(void *data)
3728{
3729 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003730 ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003731
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003732 ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003733 peer, qdf_atomic_read(&peer->ref_cnt));
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003734 ol_txrx_err("peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003735 peer,
3736 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3737 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3738 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Nachiket Kukadea48fd772017-07-28 18:48:57 +05303739 if (!cds_is_driver_recovering() && !cds_is_fw_down()) {
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003740 qdf_create_work(0, &txrx_pdev->peer_unmap_timer_work,
3741 peer_unmap_timer_work_function,
Naveen Rawat17c42a82018-02-01 19:18:27 -08003742 peer);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003743 qdf_sched_work(0, &txrx_pdev->peer_unmap_timer_work);
Deepak Dhamdhered42ab7c2017-04-13 19:32:16 -07003744 } else {
3745 ol_txrx_err("Recovery is in progress, ignore!");
3746 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003747}
3748
3749
3750/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003751 * ol_txrx_peer_detach() - Delete a peer's data object.
3752 * @peer - the object to detach
Naveen Rawatf4ada152017-09-05 14:56:12 -07003753 * @bitmap - bitmap indicating special handling of request.
Dhanashri Atre12a08392016-02-17 13:10:34 -08003754 *
3755 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003756 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003757 * stored in the control peer object to the data peer
3758 * object (set up by a call to ol_peer_store()) is provided.
3759 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003760 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003761 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003762static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003763{
Leo Chang98726762016-10-28 11:07:18 -07003764 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003765 struct ol_txrx_vdev_t *vdev = peer->vdev;
3766
3767 /* redirect peer's rx delivery function to point to a discard func */
3768 peer->rx_opt_proc = ol_rx_discard;
3769
3770 peer->valid = 0;
3771
Mohit Khanna0696eef2016-04-14 16:14:08 -07003772 /* flush all rx packets before clearing up the peer local_id */
3773 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003774
3775 /* debug print to dump rx reorder state */
3776 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3777
Poddar, Siddarth14521792017-03-14 21:19:42 +05303778 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003779 "%s:peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003780 __func__, peer,
3781 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3782 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3783 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003784
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303785 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003786 if (vdev->last_real_peer == peer)
3787 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303788 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003789 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3790
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003791 /*
3792 * set delete_in_progress to identify that wma
3793 * is waiting for unmap massage for this peer
3794 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303795 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003796
Lin Bai973e6922018-01-08 17:59:19 +08003797 if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
Naveen Rawatf4ada152017-09-05 14:56:12 -07003798 if (vdev->opmode == wlan_op_mode_sta) {
3799 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3800 &peer->mac_addr,
3801 sizeof(union ol_txrx_align_mac_addr_t));
Abhishek Singh217d9782017-04-28 23:49:11 +05303802
Lin Bai973e6922018-01-08 17:59:19 +08003803 /*
3804 * Create a timer to track unmap events when the
3805 * sta peer gets deleted.
3806 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003807 qdf_timer_start(&peer->peer_unmap_timer,
3808 OL_TXRX_PEER_UNMAP_TIMEOUT);
Mohit Khannab7bec722017-11-10 11:43:44 -08003809 ol_txrx_info_high
3810 ("started peer_unmap_timer for peer %pK",
3811 peer);
Naveen Rawatf4ada152017-09-05 14:56:12 -07003812 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003813 }
3814
3815 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003816 * Remove the reference added during peer_attach.
3817 * The peer will still be left allocated until the
3818 * PEER_UNMAP message arrives to remove the other
3819 * reference, added by the PEER_MAP message.
3820 */
Mohit Khannab7bec722017-11-10 11:43:44 -08003821 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003822}
3823
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003824/**
3825 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
Lin Bai973e6922018-01-08 17:59:19 +08003826 * @ppeer - the object to detach
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003827 *
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003828 * Detach a peer and force peer object to be removed. It is called during
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003829 * roaming scenario when the firmware has already deleted a peer.
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003830 * Remove it from the peer_id_to_object map. Peer object is actually freed
3831 * when last reference is deleted.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003832 *
3833 * Return: None
3834 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003835static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003836{
Leo Chang98726762016-10-28 11:07:18 -07003837 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003838 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3839
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003840 ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d",
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003841 __func__, peer, qdf_atomic_read(&peer->ref_cnt));
3842
3843 /* Clear the peer_id_to_obj map entries */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003844 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
Lin Bai973e6922018-01-08 17:59:19 +08003845 ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003846}
3847
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003848/**
3849 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3850 * @txrx_pdev: Pointer to txrx pdev
3851 *
3852 * Return: none
3853 */
3854static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3855{
3856 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003857 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003858
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303859 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3860 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3861 else
3862 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003863
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003864 num_free = ol_tx_get_total_free_desc(pdev);
3865
Kapil Gupta53d9b572017-06-28 17:53:25 +05303866 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303867 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003868 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003869
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003870}
3871
3872/**
3873 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3874 * @timeout: timeout in ms
3875 *
3876 * Wait for tx queue to be empty, return timeout error if
3877 * queue doesn't empty before timeout occurs.
3878 *
3879 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303880 * QDF_STATUS_SUCCESS if the queue empties,
3881 * QDF_STATUS_E_TIMEOUT in case of timeout,
3882 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003883 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003884static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003885{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003886 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003887
3888 if (txrx_pdev == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303889 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003890 "%s: txrx context is null", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303891 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003892 }
3893
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003894 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303895 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003896 if (timeout <= 0) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303897 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303898 "%s: tx frames are pending", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003899 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303900 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003901 }
3902 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3903 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303904 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003905}
3906
3907#ifndef QCA_WIFI_3_0_EMU
Himanshu Agarwal83a87572017-05-25 14:09:50 +05303908#define SUSPEND_DRAIN_WAIT 500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003909#else
3910#define SUSPEND_DRAIN_WAIT 3000
3911#endif
3912
Yue Ma1e11d792016-02-26 18:58:44 -08003913#ifdef FEATURE_RUNTIME_PM
3914/**
3915 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3916 * @txrx_pdev: TXRX pdev context
3917 *
3918 * TXRX is ready to runtime suspend if there are no pending packets
3919 * in the tx queue.
3920 *
3921 * Return: QDF_STATUS
3922 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003923static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003924{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003925 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003926
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003927 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003928 return QDF_STATUS_E_BUSY;
3929 else
3930 return QDF_STATUS_SUCCESS;
3931}
3932
3933/**
3934 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3935 * @txrx_pdev: TXRX pdev context
3936 *
3937 * This is a dummy function for symmetry.
3938 *
3939 * Return: QDF_STATUS_SUCCESS
3940 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003941static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003942{
3943 return QDF_STATUS_SUCCESS;
3944}
3945#endif
3946
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003947/**
3948 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003949 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003950 *
3951 * Ensure that ol_txrx is ready for bus suspend
3952 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303953 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003954 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003955static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003956{
3957 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3958}
3959
3960/**
3961 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003962 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003963 *
3964 * Dummy function for symetry
3965 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303966 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003967 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003968static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003969{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303970 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003971}
3972
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003973/**
3974 * ol_txrx_get_tx_pending - Get the number of pending transmit
3975 * frames that are awaiting completion.
3976 *
3977 * @pdev - the data physical device object
3978 * Mainly used in clean up path to make sure all buffers have been freed
3979 *
3980 * Return: count of pending frames
3981 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003982int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003983{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003984 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003985 uint32_t total;
3986
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303987 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3988 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3989 else
3990 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003991
Nirav Shah55b45a02016-01-21 10:00:16 +05303992 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003993}
3994
3995void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3996{
3997 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07003998 /*
3999 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05304000 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07004001 * which is tha same with normal data send complete path
4002 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004003 htt_tx_pending_discard(pdev_handle->htt_pdev);
4004
4005 TAILQ_INIT(&tx_descs);
4006 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
4007 /* Discard Frames in Discard List */
4008 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
4009
4010 ol_tx_discard_target_frms(pdev_handle);
4011}
4012
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004013static inline
4014uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
4015{
4016 return (uint64_t) ((size_t) req);
4017}
4018
4019static inline
4020struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
4021{
4022 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
4023}
4024
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004025#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004026void
4027ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
4028 uint8_t cfg_stats_type, uint32_t cfg_val)
4029{
4030 uint64_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07004031
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004032 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
4033 0 /* reset mask */,
4034 cfg_stats_type, cfg_val, dummy_cookie);
4035}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004036#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004037
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004038static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004039ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07004040 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004041{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004042 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004043 struct ol_txrx_pdev_t *pdev = vdev->pdev;
4044 uint64_t cookie;
4045 struct ol_txrx_stats_req_internal *non_volatile_req;
4046
4047 if (!pdev ||
4048 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
4049 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
4050 return A_ERROR;
4051 }
4052
4053 /*
4054 * Allocate a non-transient stats request object.
4055 * (The one provided as an argument is likely allocated on the stack.)
4056 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304057 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004058 if (!non_volatile_req)
4059 return A_NO_MEMORY;
4060
4061 /* copy the caller's specifications */
4062 non_volatile_req->base = *req;
4063 non_volatile_req->serviced = 0;
4064 non_volatile_req->offset = 0;
4065
4066 /* use the non-volatile request object's address as the cookie */
4067 cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
4068
tfyu9fcabd72017-09-26 17:46:48 +08004069 if (response_expected) {
4070 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4071 TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
4072 pdev->req_list_depth++;
4073 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4074 }
4075
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004076 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
4077 req->stats_type_upload_mask,
4078 req->stats_type_reset_mask,
4079 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
4080 cookie)) {
tfyu9fcabd72017-09-26 17:46:48 +08004081 if (response_expected) {
4082 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4083 TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem);
4084 pdev->req_list_depth--;
4085 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4086 }
4087
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304088 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004089 return A_ERROR;
4090 }
4091
Nirav Shahd2310422016-01-21 18:58:06 +05304092 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304093 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05304094
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004095 return A_OK;
4096}
Dhanashri Atre12a08392016-02-17 13:10:34 -08004097
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004098void
4099ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
4100 uint64_t cookie, uint8_t *stats_info_list)
4101{
4102 enum htt_dbg_stats_type type;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004103 enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004104 enum htt_dbg_stats_status status;
4105 int length;
4106 uint8_t *stats_data;
tfyu9fcabd72017-09-26 17:46:48 +08004107 struct ol_txrx_stats_req_internal *req, *tmp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004108 int more = 0;
tfyu9fcabd72017-09-26 17:46:48 +08004109 int found = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004110
4111 req = ol_txrx_u64_to_stats_ptr(cookie);
4112
tfyu9fcabd72017-09-26 17:46:48 +08004113 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4114 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4115 if (req == tmp) {
4116 found = 1;
4117 break;
4118 }
4119 }
4120 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4121
4122 if (!found) {
4123 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05304124 "req(%pK) from firmware can't be found in the list\n", req);
tfyu9fcabd72017-09-26 17:46:48 +08004125 return;
4126 }
4127
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004128 do {
4129 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
4130 &length, &stats_data);
4131 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
4132 break;
4133 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
4134 status == HTT_DBG_STATS_STATUS_PARTIAL) {
4135 uint8_t *buf;
4136 int bytes = 0;
4137
4138 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
4139 more = 1;
4140 if (req->base.print.verbose || req->base.print.concise)
4141 /* provide the header along with the data */
4142 htt_t2h_stats_print(stats_info_list,
4143 req->base.print.concise);
4144
4145 switch (type) {
4146 case HTT_DBG_STATS_WAL_PDEV_TXRX:
4147 bytes = sizeof(struct wlan_dbg_stats);
4148 if (req->base.copy.buf) {
4149 int lmt;
4150
4151 lmt = sizeof(struct wlan_dbg_stats);
4152 if (req->base.copy.byte_limit < lmt)
4153 lmt = req->base.copy.byte_limit;
4154 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304155 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004156 }
4157 break;
4158 case HTT_DBG_STATS_RX_REORDER:
4159 bytes = sizeof(struct rx_reorder_stats);
4160 if (req->base.copy.buf) {
4161 int lmt;
4162
4163 lmt = sizeof(struct rx_reorder_stats);
4164 if (req->base.copy.byte_limit < lmt)
4165 lmt = req->base.copy.byte_limit;
4166 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304167 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004168 }
4169 break;
4170 case HTT_DBG_STATS_RX_RATE_INFO:
4171 bytes = sizeof(wlan_dbg_rx_rate_info_t);
4172 if (req->base.copy.buf) {
4173 int lmt;
4174
4175 lmt = sizeof(wlan_dbg_rx_rate_info_t);
4176 if (req->base.copy.byte_limit < lmt)
4177 lmt = req->base.copy.byte_limit;
4178 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304179 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004180 }
4181 break;
4182
4183 case HTT_DBG_STATS_TX_RATE_INFO:
4184 bytes = sizeof(wlan_dbg_tx_rate_info_t);
4185 if (req->base.copy.buf) {
4186 int lmt;
4187
4188 lmt = sizeof(wlan_dbg_tx_rate_info_t);
4189 if (req->base.copy.byte_limit < lmt)
4190 lmt = req->base.copy.byte_limit;
4191 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304192 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004193 }
4194 break;
4195
4196 case HTT_DBG_STATS_TX_PPDU_LOG:
4197 bytes = 0;
4198 /* TO DO: specify how many bytes are present */
4199 /* TO DO: add copying to the requestor's buf */
4200
4201 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07004202 bytes = sizeof(struct
4203 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004204 if (req->base.copy.buf) {
4205 int limit;
4206
Yun Parkeaea8632017-04-09 09:53:45 -07004207 limit = sizeof(struct
4208 rx_remote_buffer_mgmt_stats);
4209 if (req->base.copy.byte_limit < limit)
4210 limit = req->base.copy.
4211 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004212 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304213 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004214 }
4215 break;
4216
4217 case HTT_DBG_STATS_TXBF_INFO:
4218 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
4219 if (req->base.copy.buf) {
4220 int limit;
4221
Yun Parkeaea8632017-04-09 09:53:45 -07004222 limit = sizeof(struct
4223 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004224 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004225 limit = req->base.copy.
4226 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004227 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304228 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004229 }
4230 break;
4231
4232 case HTT_DBG_STATS_SND_INFO:
4233 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
4234 if (req->base.copy.buf) {
4235 int limit;
4236
Yun Parkeaea8632017-04-09 09:53:45 -07004237 limit = sizeof(struct
4238 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004239 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004240 limit = req->base.copy.
4241 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004242 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304243 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004244 }
4245 break;
4246
4247 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07004248 bytes = sizeof(struct
4249 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004250 if (req->base.copy.buf) {
4251 int limit;
4252
Yun Parkeaea8632017-04-09 09:53:45 -07004253 limit = sizeof(struct
4254 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004255 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004256 limit = req->base.copy.
4257 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004258 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304259 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004260 }
4261 break;
4262
4263 case HTT_DBG_STATS_ERROR_INFO:
4264 bytes =
4265 sizeof(struct wlan_dbg_wifi2_error_stats);
4266 if (req->base.copy.buf) {
4267 int limit;
4268
Yun Parkeaea8632017-04-09 09:53:45 -07004269 limit = sizeof(struct
4270 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004271 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004272 limit = req->base.copy.
4273 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004274 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304275 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004276 }
4277 break;
4278
4279 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
4280 bytes =
4281 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
4282 if (req->base.copy.buf) {
4283 int limit;
4284
4285 limit = sizeof(struct
4286 rx_txbf_musu_ndpa_pkts_stats);
4287 if (req->base.copy.byte_limit < limit)
4288 limit =
4289 req->base.copy.byte_limit;
4290 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304291 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004292 }
4293 break;
4294
4295 default:
4296 break;
4297 }
Yun Parkeaea8632017-04-09 09:53:45 -07004298 buf = req->base.copy.buf ?
4299 req->base.copy.buf : stats_data;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004300
4301 /* Not implemented for MCL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004302 if (req->base.callback.fp)
4303 req->base.callback.fp(req->base.callback.ctxt,
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07004304 cmn_type, buf, bytes);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004305 }
4306 stats_info_list += length;
4307 } while (1);
4308
4309 if (!more) {
tfyu9fcabd72017-09-26 17:46:48 +08004310 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4311 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4312 if (req == tmp) {
4313 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
4314 pdev->req_list_depth--;
4315 qdf_mem_free(req);
4316 break;
4317 }
4318 }
4319 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004320 }
4321}
4322
4323#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
4324int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
4325{
4326 if (debug_specs & TXRX_DBG_MASK_OBJS) {
4327#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4328 ol_txrx_pdev_display(vdev->pdev, 0);
4329#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304330 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304331 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004332#endif
4333 }
Yun Parkeaea8632017-04-09 09:53:45 -07004334 if (debug_specs & TXRX_DBG_MASK_STATS)
Mohit Khannaca4173b2017-09-12 21:52:19 -07004335 ol_txrx_stats_display(vdev->pdev,
4336 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004337 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
4338#if defined(ENABLE_TXRX_PROT_ANALYZE)
4339 ol_txrx_prot_ans_display(vdev->pdev);
4340#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304341 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304342 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004343#endif
4344 }
4345 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
4346#if defined(ENABLE_RX_REORDER_TRACE)
4347 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4348#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304349 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304350 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004351#endif
4352
4353 }
4354 return 0;
4355}
4356#endif
4357
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004358#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004359int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4360 int max_subfrms_ampdu, int max_subfrms_amsdu)
4361{
4362 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4363 max_subfrms_ampdu, max_subfrms_amsdu);
4364}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004365#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004366
4367#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4368void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4369{
4370 struct ol_txrx_vdev_t *vdev;
4371
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304372 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004373 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304374 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004375 "%*spdev object: %pK", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304376 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004377 "%*svdev list:", indent + 4, " ");
4378 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304379 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004380 }
4381 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304382 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004383 "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004384 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304385 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004386 htt_display(pdev->htt_pdev, indent);
4387}
4388
4389void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4390{
4391 struct ol_txrx_peer_t *peer;
4392
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304393 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004394 "%*stxrx vdev: %pK\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304395 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004396 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304397 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004398 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4399 indent + 4, " ",
4400 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4401 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4402 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304403 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004404 "%*speer list:", indent + 4, " ");
4405 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304406 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004407 }
4408}
4409
4410void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4411{
4412 int i;
4413
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304414 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004415 "%*stxrx peer: %pK", indent, " ", peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004416 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4417 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304418 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004419 "%*sID: %d", indent + 4, " ",
4420 peer->peer_ids[i]);
4421 }
4422 }
4423}
4424#endif /* TXRX_DEBUG_LEVEL */
4425
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004426/**
4427 * ol_txrx_stats() - update ol layer stats
4428 * @vdev_id: vdev_id
4429 * @buffer: pointer to buffer
4430 * @buf_len: length of the buffer
4431 *
4432 * Return: length of string
4433 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004434static int
Yun Parkeaea8632017-04-09 09:53:45 -07004435ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004436{
4437 uint32_t len = 0;
4438
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004439 struct ol_txrx_vdev_t *vdev =
4440 (struct ol_txrx_vdev_t *)
4441 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004442
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004443 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304444 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304445 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004446 snprintf(buffer, buf_len, "vdev not found");
4447 return len;
4448 }
4449
4450 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004451 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304452 ((vdev->ll_pause.is_q_paused == false) ?
4453 "UNPAUSED" : "PAUSED"),
4454 vdev->ll_pause.q_pause_cnt,
4455 vdev->ll_pause.q_unpause_cnt,
4456 vdev->ll_pause.q_overflow_cnt,
4457 ((vdev->ll_pause.is_q_timer_on == false)
4458 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004459 return len;
4460}
4461
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004462#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4463/**
4464 * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4465 * @peer: peer pointer
4466 *
4467 * Return: None
4468 */
4469static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4470{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004471 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4472 "cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4473 peer->bufq_info.curr,
4474 peer->bufq_info.dropped,
4475 peer->bufq_info.high_water_mark,
4476 peer->bufq_info.qdepth_no_thresh,
4477 peer->bufq_info.thresh);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004478}
4479
4480/**
4481 * ol_txrx_disp_peer_stats() - display peer stats
4482 * @pdev: pdev pointer
4483 *
4484 * Return: None
4485 */
4486static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4487{ int i;
4488 struct ol_txrx_peer_t *peer;
4489 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
4490
4491 if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4492 return;
4493
4494 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4495 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4496 peer = pdev->local_peer_ids.map[i];
Frank Liu4362e462018-01-16 11:51:55 +08004497 if (peer) {
4498 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khannab7bec722017-11-10 11:43:44 -08004499 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +08004500 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
4501 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004502 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4503
4504 if (peer) {
4505 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004506 "stats: peer 0x%pK local peer id %d", peer, i);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004507 ol_txrx_disp_peer_cached_bufq_stats(peer);
Mohit Khannab7bec722017-11-10 11:43:44 -08004508 ol_txrx_peer_release_ref(peer,
4509 PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004510 }
4511 }
4512}
4513#else
4514static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4515{
4516 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004517 "peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004518}
4519#endif
4520
Mohit Khannaca4173b2017-09-12 21:52:19 -07004521void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4522 enum qdf_stats_verbosity_level level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004523{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004524 u64 tx_dropped =
4525 pdev->stats.pub.tx.dropped.download_fail.pkts
4526 + pdev->stats.pub.tx.dropped.target_discard.pkts
4527 + pdev->stats.pub.tx.dropped.no_ack.pkts
4528 + pdev->stats.pub.tx.dropped.others.pkts;
4529
4530 if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
4531 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4532 "STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
4533 pdev->tx_desc.num_free,
4534 pdev->tx_desc.pool_size,
4535 pdev->stats.pub.tx.from_stack.pkts,
4536 pdev->stats.pub.tx.tso.tso_pkts.pkts,
4537 pdev->stats.pub.tx.delivered.pkts,
4538 htt_tx_status_download_fail,
4539 pdev->stats.pub.tx.dropped.download_fail.pkts,
4540 htt_tx_status_discard,
4541 pdev->stats.pub.tx.dropped.target_discard.pkts,
4542 htt_tx_status_no_ack,
4543 pdev->stats.pub.tx.dropped.no_ack.pkts,
4544 pdev->stats.pub.tx.dropped.others.pkts,
4545 pdev->stats.pub.tx.dropped.host_reject.pkts,
4546 pdev->stats.pub.rx.delivered.pkts,
4547 pdev->stats.pub.rx.dropped_err.pkts,
4548 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4549 pdev->stats.pub.rx.dropped_mic_err.pkts,
4550 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4551 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4552 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
4553 return;
4554 }
4555
4556 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304557 "TX PATH Statistics:");
Mohit Khannaca4173b2017-09-12 21:52:19 -07004558 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304559 "sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4560 pdev->stats.pub.tx.from_stack.pkts,
4561 pdev->stats.pub.tx.from_stack.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004562 pdev->stats.pub.tx.dropped.host_reject.pkts,
4563 pdev->stats.pub.tx.dropped.host_reject.bytes,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004564 tx_dropped,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004565 pdev->stats.pub.tx.dropped.download_fail.bytes
4566 + pdev->stats.pub.tx.dropped.target_discard.bytes
4567 + pdev->stats.pub.tx.dropped.no_ack.bytes);
Mohit Khannaca4173b2017-09-12 21:52:19 -07004568 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4569 "successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B) others: %lld (%lld B)",
Nirav Shahda008342016-05-17 18:50:40 +05304570 pdev->stats.pub.tx.delivered.pkts,
4571 pdev->stats.pub.tx.delivered.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004572 pdev->stats.pub.tx.dropped.download_fail.pkts,
4573 pdev->stats.pub.tx.dropped.download_fail.bytes,
4574 pdev->stats.pub.tx.dropped.target_discard.pkts,
4575 pdev->stats.pub.tx.dropped.target_discard.bytes,
4576 pdev->stats.pub.tx.dropped.no_ack.pkts,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004577 pdev->stats.pub.tx.dropped.no_ack.bytes,
4578 pdev->stats.pub.tx.dropped.others.pkts,
4579 pdev->stats.pub.tx.dropped.others.bytes);
4580 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304581 "Tx completions per HTT message:\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004582 "Single Packet %d\n"
4583 " 2-10 Packets %d\n"
4584 "11-20 Packets %d\n"
4585 "21-30 Packets %d\n"
4586 "31-40 Packets %d\n"
4587 "41-50 Packets %d\n"
4588 "51-60 Packets %d\n"
4589 " 60+ Packets %d\n",
4590 pdev->stats.pub.tx.comp_histogram.pkts_1,
4591 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4592 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4593 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4594 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4595 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4596 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4597 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304598
Mohit Khannaca4173b2017-09-12 21:52:19 -07004599 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304600 "RX PATH Statistics:");
Mohit Khannaca4173b2017-09-12 21:52:19 -07004601 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304602 "%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
Nirav Shahda008342016-05-17 18:50:40 +05304603 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4604 "msdus with frag_ind: %d msdus with offload_ind: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004605 pdev->stats.priv.rx.normal.ppdus,
4606 pdev->stats.priv.rx.normal.mpdus,
4607 pdev->stats.pub.rx.delivered.pkts,
4608 pdev->stats.pub.rx.delivered.bytes,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304609 pdev->stats.pub.rx.dropped_err.pkts,
4610 pdev->stats.pub.rx.dropped_err.bytes,
4611 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4612 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4613 pdev->stats.pub.rx.dropped_mic_err.pkts,
Nirav Shahda008342016-05-17 18:50:40 +05304614 pdev->stats.pub.rx.dropped_mic_err.bytes,
4615 pdev->stats.pub.rx.msdus_with_frag_ind,
4616 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004617
Mohit Khannaca4173b2017-09-12 21:52:19 -07004618 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004619 " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4620 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4621 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4622 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304623
Mohit Khannaca4173b2017-09-12 21:52:19 -07004624 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304625 "Rx packets per HTT message:\n"
Nirav Shah6a4eee62016-04-25 10:15:04 +05304626 "Single Packet %d\n"
4627 " 2-10 Packets %d\n"
4628 "11-20 Packets %d\n"
4629 "21-30 Packets %d\n"
4630 "31-40 Packets %d\n"
4631 "41-50 Packets %d\n"
4632 "51-60 Packets %d\n"
4633 " 60+ Packets %d\n",
4634 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4635 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4636 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4637 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4638 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4639 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4640 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4641 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004642
4643 ol_txrx_disp_peer_stats(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004644}
4645
4646void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4647{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304648 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004649}
4650
4651#if defined(ENABLE_TXRX_PROT_ANALYZE)
4652
4653void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4654{
4655 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4656 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4657}
4658
4659#endif /* ENABLE_TXRX_PROT_ANALYZE */
4660
4661#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4662int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4663{
4664 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4665 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4666}
4667#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4668
4669#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4670A_STATUS
4671ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4672 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4673{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304674 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304675 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304676 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304677 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004678 return A_OK;
4679}
4680#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4681
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004682static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004683{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004684 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004685
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004686 if (NULL == vdev)
4687 return;
4688
4689 vdev->disable_intrabss_fwd = val;
4690}
4691
Nirav Shahc657ef52016-07-26 14:22:38 +05304692/**
4693 * ol_txrx_update_mac_id() - update mac_id for vdev
4694 * @vdev_id: vdev id
4695 * @mac_id: mac id
4696 *
4697 * Return: none
4698 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004699static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304700{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004701 struct ol_txrx_vdev_t *vdev =
4702 (struct ol_txrx_vdev_t *)
4703 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304704
4705 if (NULL == vdev) {
4706 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4707 "%s: Invalid vdev_id %d", __func__, vdev_id);
4708 return;
4709 }
4710 vdev->mac_id = mac_id;
4711}
4712
Alok Kumar75355aa2018-03-19 17:32:58 +05304713/**
4714 * ol_txrx_get_tx_ack_count() - get tx ack count
4715 * @vdev_id: vdev_id
4716 *
4717 * Return: tx ack count
4718 */
4719static uint32_t ol_txrx_get_tx_ack_stats(uint8_t vdev_id)
4720{
4721 struct ol_txrx_vdev_t *vdev =
4722 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
4723 if (!vdev) {
4724 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4725 "%s: Invalid vdev_id %d", __func__, vdev_id);
4726 return 0;
4727 }
4728 return vdev->txrx_stats.txack_success;
4729}
4730
Leo Chang8e073612015-11-13 10:55:34 -08004731/**
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004732 * ol_txrx_display_stats() - Display OL TXRX display stats
4733 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304734 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004735 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304736 */
Mohit Khannaca4173b2017-09-12 21:52:19 -07004737static QDF_STATUS
4738ol_txrx_display_stats(void *soc, uint16_t value,
4739 enum qdf_stats_verbosity_level verb_level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004740{
4741 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004742 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004743
Anurag Chouhan6d760662016-02-20 16:05:43 +05304744 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004745 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304746 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304747 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004748 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004749 }
4750
4751 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004752 case CDP_TXRX_PATH_STATS:
Mohit Khannaca4173b2017-09-12 21:52:19 -07004753 ol_txrx_stats_display(pdev, verb_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004754 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004755 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004756 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004757 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004758 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07004759 ol_tx_dump_flow_pool_info((void *)pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004760 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004761 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304762 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004763 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004764 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4765 htt_display_rx_buf_debug(pdev->htt_pdev);
4766 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304767#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004768 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304769 ol_tx_sched_cur_state_display(pdev);
4770 ol_tx_sched_stats_display(pdev);
4771 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004772 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304773 ol_tx_queue_log_display(pdev);
4774 break;
4775#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004776 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304777 ol_tx_dump_group_credit_stats(pdev);
4778 break;
4779#endif
4780
4781#ifdef DEBUG_HL_LOGGING
Nirav Shaheb017be2018-02-15 11:20:58 +05304782 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304783 htt_dump_bundle_stats(pdev->htt_pdev);
4784 break;
4785#endif
4786#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004787 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004788 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004789 break;
4790 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004791 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004792}
4793
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004794/**
4795 * ol_txrx_clear_stats() - Clear OL TXRX stats
4796 * @value: Module id for which stats needs to be cleared
4797 *
4798 * Return: None
4799 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004800static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004801{
4802 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004803 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004804
Anurag Chouhan6d760662016-02-20 16:05:43 +05304805 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004806 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304807 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304808 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004809 return;
4810 }
4811
4812 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004813 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004814 ol_txrx_stats_clear(pdev);
4815 break;
Yun Park1027e8c2017-10-13 15:17:37 -07004816 case CDP_TXRX_TSO_STATS:
4817 ol_txrx_tso_stats_clear(pdev);
4818 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004819 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004820 ol_tx_clear_flow_pool_stats();
4821 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004822 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304823 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004824 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304825#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004826 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304827 ol_tx_sched_stats_clear(pdev);
4828 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004829 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304830 ol_tx_queue_log_clear(pdev);
4831 break;
4832#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004833 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304834 ol_tx_clear_group_credit_stats(pdev);
4835 break;
4836#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004837 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304838 htt_clear_bundle_stats(pdev->htt_pdev);
4839 break;
4840#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004841 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004842 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004843 break;
4844 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004845
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004846}
4847
4848/**
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004849 * ol_txrx_drop_nbuf_list() - drop an nbuf list
4850 * @buf_list: buffer list to be dropepd
4851 *
4852 * Return: int (number of bufs dropped)
4853 */
4854static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
4855{
4856 int num_dropped = 0;
4857 qdf_nbuf_t buf, next_buf;
4858 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4859
4860 buf = buf_list;
4861 while (buf) {
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05304862 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004863 next_buf = qdf_nbuf_queue_next(buf);
4864 if (pdev)
4865 TXRX_STATS_MSDU_INCR(pdev,
4866 rx.dropped_peer_invalid, buf);
4867 qdf_nbuf_free(buf);
4868 buf = next_buf;
4869 num_dropped++;
4870 }
4871 return num_dropped;
4872}
4873
4874/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004875 * ol_rx_data_cb() - data rx callback
4876 * @peer: peer
4877 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304878 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004879 *
4880 * Return: None
4881 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304882static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
4883 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004884{
Mohit Khanna0696eef2016-04-14 16:14:08 -07004885 void *osif_dev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004886 uint8_t drop_count = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304887 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304888 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004889 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304890 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004891
Jeff Johnsondac9e382017-09-24 10:36:08 -07004892 if (qdf_unlikely(!pdev))
Nirav Shah36a87bf2016-02-22 12:38:46 +05304893 goto free_buf;
4894
4895 /* Do not use peer directly. Derive peer from staid to
4896 * make sure that peer is valid.
4897 */
Jingxiang Ge3badb982018-01-02 17:39:01 +08004898 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
4899 staid, PEER_DEBUG_ID_OL_RX_THREAD);
Nirav Shah36a87bf2016-02-22 12:38:46 +05304900 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004901 goto free_buf;
4902
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304903 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07004904 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
4905 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304906 qdf_spin_unlock_bh(&peer->peer_info_lock);
Jingxiang Ge9f297062018-01-24 13:31:31 +08004907 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004908 goto free_buf;
4909 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08004910
4911 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07004912 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304913 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004914
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004915 qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
4916 if (!list_empty(&peer->bufq_info.cached_bufq)) {
4917 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004918 /* Flush the cached frames to HDD before passing new rx frame */
4919 ol_txrx_flush_rx_frames(peer, 0);
4920 } else
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004921 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004922
Jingxiang Ge3badb982018-01-02 17:39:01 +08004923 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
4924
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004925 buf = buf_list;
4926 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304927 next_buf = qdf_nbuf_queue_next(buf);
4928 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07004929 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304930 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304931 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05304932 if (pdev)
4933 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304934 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004935 }
4936 buf = next_buf;
4937 }
4938 return;
4939
4940free_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004941 drop_count = ol_txrx_drop_nbuf_list(buf_list);
4942 ol_txrx_warn("%s:Dropped frames %u", __func__, drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004943}
4944
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004945/* print for every 16th packet */
4946#define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
4947struct ol_rx_cached_buf *cache_buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304948
4949/** helper function to drop packets
4950 * Note: caller must hold the cached buq lock before invoking
4951 * this function. Also, it assumes that the pointers passed in
4952 * are valid (non-NULL)
4953 */
4954static inline void ol_txrx_drop_frames(
4955 struct ol_txrx_cached_bufq_t *bufqi,
4956 qdf_nbuf_t rx_buf_list)
4957{
4958 uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004959
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304960 bufqi->dropped += dropped;
4961 bufqi->qdepth_no_thresh += dropped;
4962
4963 if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
4964 bufqi->high_water_mark = bufqi->qdepth_no_thresh;
4965}
4966
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004967static QDF_STATUS ol_txrx_enqueue_rx_frames(
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304968 struct ol_txrx_peer_t *peer,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004969 struct ol_txrx_cached_bufq_t *bufqi,
4970 qdf_nbuf_t rx_buf_list)
4971{
4972 struct ol_rx_cached_buf *cache_buf;
4973 qdf_nbuf_t buf, next_buf;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004974 static uint32_t count;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004975
4976 if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
4977 ol_txrx_info_high(
4978 "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
4979 bufqi->curr, bufqi->dropped);
4980
4981 qdf_spin_lock_bh(&bufqi->bufq_lock);
4982 if (bufqi->curr >= bufqi->thresh) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304983 ol_txrx_drop_frames(bufqi, rx_buf_list);
4984 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4985 return QDF_STATUS_E_FAULT;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004986 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004987 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4988
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004989 buf = rx_buf_list;
4990 while (buf) {
4991 next_buf = qdf_nbuf_queue_next(buf);
4992 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
4993 if (!cache_buf) {
4994 ol_txrx_err(
4995 "Failed to allocate buf to cache the rx frames");
4996 qdf_nbuf_free(buf);
4997 } else {
4998 /* Add NULL terminator */
4999 qdf_nbuf_set_next(buf, NULL);
5000 cache_buf->buf = buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305001 if (peer && peer->valid) {
5002 qdf_spin_lock_bh(&bufqi->bufq_lock);
5003 list_add_tail(&cache_buf->list,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005004 &bufqi->cached_bufq);
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305005 bufqi->curr++;
5006 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5007 } else {
5008 qdf_mem_free(cache_buf);
5009 rx_buf_list = buf;
5010 qdf_nbuf_set_next(rx_buf_list, next_buf);
5011 qdf_spin_lock_bh(&bufqi->bufq_lock);
5012 ol_txrx_drop_frames(bufqi, rx_buf_list);
5013 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5014 return QDF_STATUS_E_FAULT;
5015 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005016 }
5017 buf = next_buf;
5018 }
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305019 return QDF_STATUS_SUCCESS;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005020}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005021/**
5022 * ol_rx_data_process() - process rx frame
5023 * @peer: peer
5024 * @rx_buf_list: rx buffer list
5025 *
5026 * Return: None
5027 */
5028void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05305029 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005030{
Yun Parkeaea8632017-04-09 09:53:45 -07005031 /*
5032 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005033 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07005034 * IPA kernel module API should not be called on SIRQ CTXT
5035 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08005036 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05305037 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005038 uint8_t drop_count;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005039
5040 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305041 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005042 goto drop_rx_buf;
5043 }
5044
Dhanashri Atre182b0272016-02-17 15:35:07 -08005045 qdf_assert(peer->vdev);
5046
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305047 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005048 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08005049 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305050 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005051
5052 /*
5053 * If there is a data frame from peer before the peer is
5054 * registered for data service, enqueue them on to pending queue
5055 * which will be flushed to HDD once that station is registered.
5056 */
5057 if (!data_rx) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305058 if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
5059 rx_buf_list)
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005060 != QDF_STATUS_SUCCESS)
Poddar, Siddarth07eebf32017-04-19 12:40:26 +05305061 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5062 "%s: failed to enqueue rx frm to cached_bufq",
5063 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005064 } else {
5065#ifdef QCA_CONFIG_SMP
5066 /*
5067 * If the kernel is SMP, schedule rx thread to
5068 * better use multicores.
5069 */
5070 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05305071 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005072 } else {
5073 p_cds_sched_context sched_ctx =
5074 get_cds_sched_ctxt();
5075 struct cds_ol_rx_pkt *pkt;
5076
5077 if (unlikely(!sched_ctx))
5078 goto drop_rx_buf;
5079
5080 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5081 if (!pkt) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05305082 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305083 "No available Rx message buffer");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005084 goto drop_rx_buf;
5085 }
5086 pkt->callback = (cds_ol_rx_thread_cb)
5087 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05305088 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005089 pkt->Rxpkt = (void *)rx_buf_list;
5090 pkt->staId = peer->local_id;
5091 cds_indicate_rxpkt(sched_ctx, pkt);
5092 }
5093#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05305094 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005095#endif /* QCA_CONFIG_SMP */
5096 }
5097
5098 return;
5099
5100drop_rx_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005101 drop_count = ol_txrx_drop_nbuf_list(rx_buf_list);
5102 ol_txrx_info_high("Dropped rx packets %u", drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005103}
5104
5105/**
5106 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005107 * @sta_desc: sta descriptor
5108 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05305109 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005110 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005111static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005112{
5113 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05305114 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005115 union ol_txrx_peer_update_param_t param;
5116 struct privacy_exemption privacy_filter;
5117
5118 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305119 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305120 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005121 }
5122
5123 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305124 ol_txrx_err("Invalid sta id :%d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005125 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305126 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005127 }
5128
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005129 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
5130 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005131 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305132 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005133
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305134 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005135 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305136 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005137
5138 param.qos_capable = sta_desc->is_qos_enabled;
5139 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
5140 ol_txrx_peer_update_qos_capable);
5141
5142 if (sta_desc->is_wapi_supported) {
5143 /*Privacy filter to accept unencrypted WAI frames */
5144 privacy_filter.ether_type = ETHERTYPE_WAI;
5145 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
5146 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
5147 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
5148 }
5149
5150 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305151 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005152}
5153
5154/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005155 * ol_txrx_register_ocb_peer - Function to register the OCB peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005156 * @mac_addr: MAC address of the self peer
5157 * @peer_id: Pointer to the peer ID
5158 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305159 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005160 */
Jeff Johnson382bce02017-09-01 14:21:07 -07005161static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005162 uint8_t *peer_id)
5163{
5164 ol_txrx_pdev_handle pdev;
5165 ol_txrx_peer_handle peer;
5166
Anurag Chouhan6d760662016-02-20 16:05:43 +05305167 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005168 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305169 ol_txrx_err("%s: Unable to find pdev!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005170 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305171 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005172 }
5173
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005174 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
5175 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005176 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305177 ol_txrx_err("%s: Unable to find OCB peer!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005178 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305179 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005180 }
5181
5182 ol_txrx_set_ocb_peer(pdev, peer);
5183
5184 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005185 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005186 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005187
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305188 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005189}
5190
5191/**
5192 * ol_txrx_set_ocb_peer - Function to store the OCB peer
5193 * @pdev: Handle to the HTT instance
5194 * @peer: Pointer to the peer
5195 */
5196void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
5197 struct ol_txrx_peer_t *peer)
5198{
5199 if (pdev == NULL)
5200 return;
5201
5202 pdev->ocb_peer = peer;
5203 pdev->ocb_peer_valid = (NULL != peer);
5204}
5205
5206/**
5207 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
5208 * @pdev: Handle to the HTT instance
5209 * @peer: Pointer to the returned peer
5210 *
5211 * Return: true if the peer is valid, false if not
5212 */
5213bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
5214 struct ol_txrx_peer_t **peer)
5215{
5216 int rc;
5217
5218 if ((pdev == NULL) || (peer == NULL)) {
5219 rc = false;
5220 goto exit;
5221 }
5222
5223 if (pdev->ocb_peer_valid) {
5224 *peer = pdev->ocb_peer;
5225 rc = true;
5226 } else {
5227 rc = false;
5228 }
5229
5230exit:
5231 return rc;
5232}
5233
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005234#ifdef RECEIVE_OFFLOAD
5235/**
5236 * ol_txrx_offld_flush_handler() - offld flush handler
5237 * @context: dev handle
5238 * @rxpkt: rx data
5239 * @staid: station id
5240 *
5241 * This function handles an offld flush indication.
5242 * If the rx thread is enabled, it will be invoked by the rx
5243 * thread else it will be called in the tasklet context
5244 *
5245 * Return: none
5246 */
5247static void ol_txrx_offld_flush_handler(void *context,
5248 void *rxpkt,
5249 uint16_t staid)
5250{
5251 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5252
5253 if (qdf_unlikely(!pdev)) {
5254 ol_txrx_err("Invalid context");
5255 qdf_assert(0);
5256 return;
5257 }
5258
5259 if (pdev->offld_flush_cb)
5260 pdev->offld_flush_cb(context);
5261 else
5262 ol_txrx_err("offld_flush_cb NULL");
5263}
5264
5265/**
5266 * ol_txrx_offld_flush() - offld flush callback
5267 * @data: opaque data pointer
5268 *
5269 * This is the callback registered with CE to trigger
5270 * an offld flush
5271 *
5272 * Return: none
5273 */
5274static void ol_txrx_offld_flush(void *data)
5275{
5276 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
5277 struct cds_ol_rx_pkt *pkt;
5278 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5279
5280 if (qdf_unlikely(!sched_ctx))
5281 return;
5282
5283 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
5284 ol_txrx_offld_flush_handler(data, NULL, 0);
5285 } else {
5286 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5287 if (qdf_unlikely(!pkt)) {
5288 ol_txrx_err("Not able to allocate context");
5289 return;
5290 }
5291
5292 pkt->callback = ol_txrx_offld_flush_handler;
5293 pkt->context = data;
5294 pkt->Rxpkt = NULL;
5295 pkt->staId = 0;
5296 cds_indicate_rxpkt(sched_ctx, pkt);
5297 }
5298}
5299
5300/**
5301 * ol_register_offld_flush_cb() - register the offld flush callback
5302 * @offld_flush_cb: flush callback function
5303 * @offld_init_cb: Allocate and initialize offld data structure.
5304 *
5305 * Store the offld flush callback provided and in turn
5306 * register OL's offld flush handler with CE
5307 *
5308 * Return: none
5309 */
5310static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
5311{
5312 struct hif_opaque_softc *hif_device;
5313 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5314
5315 if (pdev == NULL) {
5316 ol_txrx_err("pdev NULL!");
5317 TXRX_ASSERT2(0);
5318 goto out;
5319 }
5320 if (pdev->offld_flush_cb != NULL) {
5321 ol_txrx_info("offld already initialised");
5322 if (pdev->offld_flush_cb != offld_flush_cb) {
5323 ol_txrx_err(
5324 "offld_flush_cb is differ to previously registered callback")
5325 TXRX_ASSERT2(0);
5326 goto out;
5327 }
5328 goto out;
5329 }
5330 pdev->offld_flush_cb = offld_flush_cb;
5331 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5332
5333 if (qdf_unlikely(hif_device == NULL)) {
5334 ol_txrx_err("hif_device NULL!");
5335 qdf_assert(0);
5336 goto out;
5337 }
5338
5339 hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
5340
5341out:
5342 return;
5343}
5344
5345/**
5346 * ol_deregister_offld_flush_cb() - deregister the offld flush callback
5347 *
5348 * Remove the offld flush callback provided and in turn
5349 * deregister OL's offld flush handler with CE
5350 *
5351 * Return: none
5352 */
5353static void ol_deregister_offld_flush_cb(void)
5354{
5355 struct hif_opaque_softc *hif_device;
5356 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5357
5358 if (pdev == NULL) {
5359 ol_txrx_err("pdev NULL!");
5360 return;
5361 }
5362 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5363
5364 if (qdf_unlikely(hif_device == NULL)) {
5365 ol_txrx_err("hif_device NULL!");
5366 qdf_assert(0);
5367 return;
5368 }
5369
5370 hif_offld_flush_cb_deregister(hif_device);
5371
5372 pdev->offld_flush_cb = NULL;
5373}
5374#endif /* RECEIVE_OFFLOAD */
5375
Poddar, Siddarth34872782017-08-10 14:08:51 +05305376/**
5377 * ol_register_data_stall_detect_cb() - register data stall callback
5378 * @data_stall_detect_callback: data stall callback function
5379 *
5380 *
5381 * Return: QDF_STATUS Enumeration
5382 */
5383static QDF_STATUS ol_register_data_stall_detect_cb(
5384 data_stall_detect_cb data_stall_detect_callback)
5385{
5386 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5387
5388 if (pdev == NULL) {
5389 ol_txrx_err("%s: pdev NULL!", __func__);
5390 return QDF_STATUS_E_INVAL;
5391 }
5392 pdev->data_stall_detect_callback = data_stall_detect_callback;
5393 return QDF_STATUS_SUCCESS;
5394}
5395
5396/**
5397 * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5398 * @data_stall_detect_callback: data stall callback function
5399 *
5400 *
5401 * Return: QDF_STATUS Enumeration
5402 */
5403static QDF_STATUS ol_deregister_data_stall_detect_cb(
5404 data_stall_detect_cb data_stall_detect_callback)
5405{
5406 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5407
5408 if (pdev == NULL) {
5409 ol_txrx_err("%s: pdev NULL!", __func__);
5410 return QDF_STATUS_E_INVAL;
5411 }
5412 pdev->data_stall_detect_callback = NULL;
5413 return QDF_STATUS_SUCCESS;
5414}
5415
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305416/**
5417 * ol_txrx_post_data_stall_event() - post data stall event
5418 * @indicator: Module triggering data stall
5419 * @data_stall_type: data stall event type
5420 * @pdev_id: pdev id
5421 * @vdev_id_bitmap: vdev id bitmap
5422 * @recovery_type: data stall recovery type
5423 *
5424 * Return: None
5425 */
5426static void ol_txrx_post_data_stall_event(
5427 enum data_stall_log_event_indicator indicator,
5428 enum data_stall_log_event_type data_stall_type,
5429 uint32_t pdev_id, uint32_t vdev_id_bitmap,
5430 enum data_stall_log_recovery_type recovery_type)
5431{
5432 struct scheduler_msg msg = {0};
5433 QDF_STATUS status;
5434 struct data_stall_event_info *data_stall_info;
5435 ol_txrx_pdev_handle pdev;
5436
5437 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5438 if (!pdev) {
5439 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5440 "%s: pdev is NULL.", __func__);
5441 return;
5442 }
5443 data_stall_info = qdf_mem_malloc(sizeof(*data_stall_info));
5444 if (!data_stall_info) {
5445 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5446 "%s: data_stall_info is NULL.", __func__);
5447 return;
5448 }
5449 data_stall_info->indicator = indicator;
5450 data_stall_info->data_stall_type = data_stall_type;
5451 data_stall_info->vdev_id_bitmap = vdev_id_bitmap;
5452 data_stall_info->pdev_id = pdev_id;
5453 data_stall_info->recovery_type = recovery_type;
5454
Poddar, Siddarthb9047592017-10-05 15:48:28 +05305455 if (data_stall_info->data_stall_type ==
5456 DATA_STALL_LOG_FW_RX_REFILL_FAILED)
5457 htt_log_rx_ring_info(pdev->htt_pdev);
5458
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305459 sys_build_message_header(SYS_MSG_ID_DATA_STALL_MSG, &msg);
5460 /* Save callback and data */
5461 msg.callback = pdev->data_stall_detect_callback;
5462 msg.bodyptr = data_stall_info;
5463 msg.bodyval = 0;
5464
5465 status = scheduler_post_msg(QDF_MODULE_ID_SYS, &msg);
5466
5467 if (status != QDF_STATUS_SUCCESS) {
5468 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5469 "%s: failed to post data stall msg to SYS", __func__);
5470 qdf_mem_free(data_stall_info);
5471 }
5472}
5473
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305474void
5475ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5476{
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07005477 qdf_print("%s: Pkt: VA 0x%pK PA 0x%llx len %d\n", __func__,
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305478 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5479 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5480 qdf_nbuf_data(nbuf), len, true);
5481}
5482
Dhanashri Atre12a08392016-02-17 13:10:34 -08005483/**
5484 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5485 * @vdev_id: vdev_id
5486 *
5487 * Return: vdev handle
5488 * NULL if not found.
5489 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005490struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005491{
5492 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5493 ol_txrx_vdev_handle vdev = NULL;
5494
5495 if (qdf_unlikely(!pdev))
5496 return NULL;
5497
5498 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5499 if (vdev->vdev_id == vdev_id)
5500 break;
5501 }
5502
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005503 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005504}
Nirav Shah2e583a02016-04-30 14:06:12 +05305505
5506/**
5507 * ol_txrx_set_wisa_mode() - set wisa mode
5508 * @vdev: vdev handle
5509 * @enable: enable flag
5510 *
5511 * Return: QDF STATUS
5512 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005513static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305514{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005515 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005516
Nirav Shah2e583a02016-04-30 14:06:12 +05305517 if (!vdev)
5518 return QDF_STATUS_E_INVAL;
5519
5520 vdev->is_wisa_mode_enable = enable;
5521 return QDF_STATUS_SUCCESS;
5522}
Leo Chang98726762016-10-28 11:07:18 -07005523
5524/**
5525 * ol_txrx_get_vdev_id() - get interface id from interface context
5526 * @pvdev: vdev handle
5527 *
5528 * Return: virtual interface id
5529 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005530static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005531{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005532 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005533
Leo Chang98726762016-10-28 11:07:18 -07005534 return vdev->vdev_id;
5535}
5536
5537/**
5538 * ol_txrx_last_assoc_received() - get time of last assoc received
5539 * @ppeer: peer handle
5540 *
5541 * Return: pointer of the time of last assoc received
5542 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005543static qdf_time_t *ol_txrx_last_assoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005544{
5545 ol_txrx_peer_handle peer = ppeer;
5546
5547 return &peer->last_assoc_rcvd;
5548}
5549
5550/**
5551 * ol_txrx_last_disassoc_received() - get time of last disassoc received
5552 * @ppeer: peer handle
5553 *
5554 * Return: pointer of the time of last disassoc received
5555 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005556static qdf_time_t *ol_txrx_last_disassoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005557{
5558 ol_txrx_peer_handle peer = ppeer;
5559
5560 return &peer->last_disassoc_rcvd;
5561}
5562
5563/**
5564 * ol_txrx_last_deauth_received() - get time of last deauth received
5565 * @ppeer: peer handle
5566 *
5567 * Return: pointer of the time of last deauth received
5568 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005569static qdf_time_t *ol_txrx_last_deauth_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005570{
5571 ol_txrx_peer_handle peer = ppeer;
5572
5573 return &peer->last_deauth_rcvd;
5574}
5575
5576/**
5577 * ol_txrx_soc_attach_target() - attach soc target
5578 * @soc: soc handle
5579 *
5580 * MCL legacy OL do nothing here
5581 *
5582 * Return: 0
5583 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005584static int ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005585{
5586 /* MCL legacy OL do nothing here */
5587 return 0;
5588}
5589
5590/**
5591 * ol_txrx_soc_detach() - detach soc target
5592 * @soc: soc handle
5593 *
5594 * MCL legacy OL do nothing here
5595 *
5596 * Return: noe
5597 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005598static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005599{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005600 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005601}
5602
5603/**
5604 * ol_txrx_pkt_log_con_service() - connect packet log service
5605 * @ppdev: physical device handle
5606 * @scn: device context
5607 *
5608 * Return: noe
5609 */
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305610#ifdef REMOVE_PKT_LOG
5611static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
5612{
5613}
5614#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005615static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005616{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005617 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005618
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005619 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005620 pktlog_htc_attach();
5621}
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305622#endif
Leo Chang98726762016-10-28 11:07:18 -07005623
5624/* OL wrapper functions for CDP abstraction */
5625/**
5626 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5627 * @peer: peer handle
5628 * @drop: rx packets drop or deliver
5629 *
5630 * Return: none
5631 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005632static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005633{
5634 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5635}
5636
5637/**
5638 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5639 * @ppdev: pdev handle
5640 * @vdev_id: interface id
5641 *
5642 * Return: virtual interface instance
5643 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005644static
5645struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5646 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005647{
5648 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5649}
5650
5651/**
5652 * ol_txrx_wrapper_register_peer() - register peer
5653 * @pdev: pdev handle
5654 * @sta_desc: peer description
5655 *
5656 * Return: QDF STATUS
5657 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005658static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005659 struct ol_txrx_desc_type *sta_desc)
5660{
5661 return ol_txrx_register_peer(sta_desc);
5662}
5663
5664/**
5665 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5666 * @pdev - the data physical device object
5667 * @local_peer_id - the ID txrx assigned locally to the peer in question
5668 *
5669 * The control SW typically uses the txrx peer handle to refer to the peer.
5670 * In unusual circumstances, if it is infeasible for the control SW maintain
5671 * the txrx peer handle but it can maintain a small integer local peer ID,
5672 * this function allows the peer handled to be retrieved, based on the local
5673 * peer ID.
5674 *
5675 * @return handle to the txrx peer object
5676 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005677static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005678ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5679 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005680{
5681 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005682 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005683}
5684
5685/**
5686 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5687 * @pdev: pdev handle
5688 *
5689 * Return: 1 high latency bus
5690 * 0 low latency bus
5691 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005692static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005693{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005694 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005695}
5696
5697/**
5698 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5699 * @data_peer - which peer has changed its state
5700 * @state - the new state of the peer
5701 *
5702 * Specify the peer's authentication state (none, connected, authenticated)
5703 * to allow the data SW to determine whether to filter out invalid data frames.
5704 * (In the "connected" state, where security is enabled, but authentication
5705 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5706 * be discarded.)
5707 * This function is only relevant for systems in which the tx and rx filtering
5708 * are done in the host rather than in the target.
5709 *
5710 * Return: QDF Status
5711 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005712static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005713 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5714{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005715 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005716 peer_mac, state);
5717}
5718
5719/**
5720 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5721 * @pdev: pdev handle
Jeff Johnson37df7c32018-05-10 12:30:35 -07005722 * @peer_addr: peer address want to find
Leo Chang98726762016-10-28 11:07:18 -07005723 * @peer_id: peer id
5724 *
5725 * Return: peer instance pointer
5726 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005727static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005728 uint8_t *peer_addr, uint8_t *peer_id)
5729{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005730 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005731 peer_addr, peer_id);
5732}
5733
5734/**
Mohit Khannab7bec722017-11-10 11:43:44 -08005735 * ol_txrx_wrapper_peer_get_ref_by_addr() - get peer reference by address
5736 * @pdev: pdev handle
5737 * @peer_addr: peer address we want to find
5738 * @peer_id: peer id
5739 * @debug_id: peer debug id for tracking
5740 *
5741 * Return: peer instance pointer
5742 */
5743static void *
5744ol_txrx_wrapper_peer_get_ref_by_addr(struct cdp_pdev *pdev,
5745 u8 *peer_addr, uint8_t *peer_id,
5746 enum peer_debug_id_type debug_id)
5747{
5748 return ol_txrx_peer_get_ref_by_addr((ol_txrx_pdev_handle)pdev,
5749 peer_addr, peer_id, debug_id);
5750}
5751
5752/**
5753 * ol_txrx_wrapper_peer_release_ref() - release peer reference
5754 * @peer: peer handle
5755 * @debug_id: peer debug id for tracking
5756 *
5757 * Release peer ref acquired by peer get ref api
5758 *
5759 * Return: void
5760 */
5761static void ol_txrx_wrapper_peer_release_ref(void *peer,
5762 enum peer_debug_id_type debug_id)
5763{
5764 ol_txrx_peer_release_ref(peer, debug_id);
5765}
5766
5767/**
Leo Chang98726762016-10-28 11:07:18 -07005768 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5769 * @cfg_ctx: cfg context
5770 * @cfg_param: cfg parameters
5771 *
5772 * Return: none
5773 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08005774static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005775ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5776 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07005777{
5778 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005779 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005780 (struct txrx_pdev_cfg_param_t *)cfg_param);
5781}
5782
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005783#ifdef WDI_EVENT_ENABLE
5784void *ol_get_pldev(struct cdp_pdev *txrx_pdev)
5785{
5786 struct ol_txrx_pdev_t *pdev =
5787 (struct ol_txrx_pdev_t *)txrx_pdev;
5788 if (pdev != NULL)
5789 return pdev->pl_dev;
5790
5791 return NULL;
5792}
5793#endif
5794
Leo Chang98726762016-10-28 11:07:18 -07005795static struct cdp_cmn_ops ol_ops_cmn = {
5796 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
5797 .txrx_vdev_attach = ol_txrx_vdev_attach,
5798 .txrx_vdev_detach = ol_txrx_vdev_detach,
5799 .txrx_pdev_attach = ol_txrx_pdev_attach,
5800 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
5801 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05305802 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07005803 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08005804 .txrx_peer_create = ol_txrx_peer_attach,
5805 .txrx_peer_setup = NULL,
5806 .txrx_peer_teardown = NULL,
5807 .txrx_peer_delete = ol_txrx_peer_detach,
Leo Chang98726762016-10-28 11:07:18 -07005808 .txrx_vdev_register = ol_txrx_vdev_register,
5809 .txrx_soc_detach = ol_txrx_soc_detach,
5810 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
5811 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
5812 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08005813 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07005814 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
5815 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
5816 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07005817 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005818 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
5819 .display_stats = ol_txrx_display_stats,
Leo Chang98726762016-10-28 11:07:18 -07005820 /* TODO: Add other functions */
5821};
5822
5823static struct cdp_misc_ops ol_ops_misc = {
5824 .set_ibss_vdev_heart_beat_timer =
5825 ol_txrx_set_ibss_vdev_heart_beat_timer,
5826#ifdef CONFIG_HL_SUPPORT
5827 .set_wmm_param = ol_txrx_set_wmm_param,
5828#endif /* CONFIG_HL_SUPPORT */
5829 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
5830 .bad_peer_txctl_update_threshold =
5831 ol_txrx_bad_peer_txctl_update_threshold,
5832 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
5833 .tx_non_std = ol_tx_non_std,
5834 .get_vdev_id = ol_txrx_get_vdev_id,
Alok Kumar75355aa2018-03-19 17:32:58 +05305835 .get_tx_ack_stats = ol_txrx_get_tx_ack_stats,
Leo Chang98726762016-10-28 11:07:18 -07005836 .set_wisa_mode = ol_txrx_set_wisa_mode,
Poddar, Siddarth34872782017-08-10 14:08:51 +05305837 .txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
5838 .txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305839 .txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
Leo Chang98726762016-10-28 11:07:18 -07005840#ifdef FEATURE_RUNTIME_PM
5841 .runtime_suspend = ol_txrx_runtime_suspend,
5842 .runtime_resume = ol_txrx_runtime_resume,
5843#endif /* FEATURE_RUNTIME_PM */
5844 .get_opmode = ol_txrx_get_opmode,
5845 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
5846 .update_mac_id = ol_txrx_update_mac_id,
5847 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
5848 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
5849 .pkt_log_init = htt_pkt_log_init,
5850 .pkt_log_con_service = ol_txrx_pkt_log_con_service
5851};
5852
5853static struct cdp_flowctl_ops ol_ops_flowctl = {
5854#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5855 .register_pause_cb = ol_txrx_register_pause_cb,
5856 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005857 .dump_flow_pool_info = ol_tx_dump_flow_pool_info,
Leo Chang98726762016-10-28 11:07:18 -07005858#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
5859};
5860
5861static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
5862#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
5863 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
5864 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
5865 .flow_control_cb = ol_txrx_flow_control_cb,
5866 .get_tx_resource = ol_txrx_get_tx_resource,
5867 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
5868 .vdev_flush = ol_txrx_vdev_flush,
5869 .vdev_pause = ol_txrx_vdev_pause,
5870 .vdev_unpause = ol_txrx_vdev_unpause
5871#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
5872};
5873
Leo Chang98726762016-10-28 11:07:18 -07005874#ifdef IPA_OFFLOAD
Yun Parkb4f591d2017-03-29 15:51:01 -07005875static struct cdp_ipa_ops ol_ops_ipa = {
Leo Chang98726762016-10-28 11:07:18 -07005876 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
5877 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
5878 .ipa_set_active = ol_txrx_ipa_uc_set_active,
5879 .ipa_op_response = ol_txrx_ipa_uc_op_response,
5880 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
5881 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
5882 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07005883 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
Yun Parkb4f591d2017-03-29 15:51:01 -07005884 .ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
5885 .ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
5886 .ipa_setup = ol_txrx_ipa_setup,
5887 .ipa_cleanup = ol_txrx_ipa_cleanup,
5888 .ipa_setup_iface = ol_txrx_ipa_setup_iface,
5889 .ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
5890 .ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
5891 .ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
5892 .ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
5893#ifdef FEATURE_METERING
Yun Park637d6482016-10-05 10:51:33 -07005894 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
5895 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Yun Parkb4f591d2017-03-29 15:51:01 -07005896#endif
Leo Chang98726762016-10-28 11:07:18 -07005897};
Yun Parkb4f591d2017-03-29 15:51:01 -07005898#endif
Leo Chang98726762016-10-28 11:07:18 -07005899
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005900#ifdef RECEIVE_OFFLOAD
5901static struct cdp_rx_offld_ops ol_rx_offld_ops = {
5902 .register_rx_offld_flush_cb = ol_register_offld_flush_cb,
5903 .deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
5904};
5905#endif
5906
Leo Chang98726762016-10-28 11:07:18 -07005907static struct cdp_bus_ops ol_ops_bus = {
5908 .bus_suspend = ol_txrx_bus_suspend,
5909 .bus_resume = ol_txrx_bus_resume
5910};
5911
5912static struct cdp_ocb_ops ol_ops_ocb = {
5913 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
5914 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
5915};
5916
5917static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005918#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07005919 .throttle_init_period = ol_tx_throttle_init_period,
5920 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005921#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07005922};
5923
5924static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07005925 .clear_stats = ol_txrx_clear_stats,
5926 .stats = ol_txrx_stats
5927};
5928
5929static struct cdp_cfg_ops ol_ops_cfg = {
5930 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
5931 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
5932 .cfg_attach = ol_pdev_cfg_attach,
5933 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
5934 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
5935 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
5936 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
5937 .set_flow_control_parameters =
5938 ol_txrx_wrapper_set_flow_control_parameters,
5939 .set_flow_steering = ol_set_cfg_flow_steering,
Yu Wang66a250b2017-07-19 11:46:40 +08005940 .set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
Leo Chang98726762016-10-28 11:07:18 -07005941};
5942
5943static struct cdp_peer_ops ol_ops_peer = {
5944 .register_peer = ol_txrx_wrapper_register_peer,
5945 .clear_peer = ol_txrx_clear_peer,
Mohit Khannab7bec722017-11-10 11:43:44 -08005946 .peer_get_ref_by_addr = ol_txrx_wrapper_peer_get_ref_by_addr,
5947 .peer_release_ref = ol_txrx_wrapper_peer_release_ref,
Leo Chang98726762016-10-28 11:07:18 -07005948 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
5949 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
5950 .local_peer_id = ol_txrx_local_peer_id,
5951 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
5952 .peer_state_update = ol_txrx_wrapper_peer_state_update,
5953 .get_vdevid = ol_txrx_get_vdevid,
5954 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
5955 .register_ocb_peer = ol_txrx_register_ocb_peer,
5956 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
5957 .get_peer_state = ol_txrx_get_peer_state,
5958 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
5959 .update_ibss_add_peer_num_of_vdev =
5960 ol_txrx_update_ibss_add_peer_num_of_vdev,
5961 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
5962 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08005963#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07005964 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
5965 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005966 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
5967 .update_last_real_peer = ol_txrx_update_last_real_peer,
5968#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07005969 .last_assoc_received = ol_txrx_last_assoc_received,
5970 .last_disassoc_received = ol_txrx_last_disassoc_received,
5971 .last_deauth_received = ol_txrx_last_deauth_received,
Leo Chang98726762016-10-28 11:07:18 -07005972 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
5973};
5974
5975static struct cdp_tx_delay_ops ol_ops_delay = {
5976#ifdef QCA_COMPUTE_TX_DELAY
5977 .tx_delay = ol_tx_delay,
5978 .tx_delay_hist = ol_tx_delay_hist,
5979 .tx_packet_count = ol_tx_packet_count,
5980 .tx_set_compute_interval = ol_tx_set_compute_interval
5981#endif /* QCA_COMPUTE_TX_DELAY */
5982};
5983
5984static struct cdp_pmf_ops ol_ops_pmf = {
5985 .get_pn_info = ol_txrx_get_pn_info
5986};
5987
Leo Chang98726762016-10-28 11:07:18 -07005988static struct cdp_ctrl_ops ol_ops_ctrl = {
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305989 .txrx_get_pldev = ol_get_pldev,
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005990 .txrx_wdi_event_sub = wdi_event_sub,
5991 .txrx_wdi_event_unsub = wdi_event_unsub,
Leo Chang98726762016-10-28 11:07:18 -07005992};
5993
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305994/* WINplatform specific structures */
Leo Chang98726762016-10-28 11:07:18 -07005995static struct cdp_me_ops ol_ops_me = {
5996 /* EMPTY FOR MCL */
5997};
5998
5999static struct cdp_mon_ops ol_ops_mon = {
6000 /* EMPTY FOR MCL */
6001};
6002
6003static struct cdp_host_stats_ops ol_ops_host_stats = {
6004 /* EMPTY FOR MCL */
6005};
6006
6007static struct cdp_wds_ops ol_ops_wds = {
6008 /* EMPTY FOR MCL */
6009};
6010
6011static struct cdp_raw_ops ol_ops_raw = {
6012 /* EMPTY FOR MCL */
6013};
6014
6015static struct cdp_ops ol_txrx_ops = {
6016 .cmn_drv_ops = &ol_ops_cmn,
6017 .ctrl_ops = &ol_ops_ctrl,
6018 .me_ops = &ol_ops_me,
6019 .mon_ops = &ol_ops_mon,
6020 .host_stats_ops = &ol_ops_host_stats,
6021 .wds_ops = &ol_ops_wds,
6022 .raw_ops = &ol_ops_raw,
6023 .misc_ops = &ol_ops_misc,
6024 .cfg_ops = &ol_ops_cfg,
6025 .flowctl_ops = &ol_ops_flowctl,
6026 .l_flowctl_ops = &ol_ops_l_flowctl,
Yun Parkb4f591d2017-03-29 15:51:01 -07006027#ifdef IPA_OFFLOAD
Leo Chang98726762016-10-28 11:07:18 -07006028 .ipa_ops = &ol_ops_ipa,
Yun Parkb4f591d2017-03-29 15:51:01 -07006029#endif
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07006030#ifdef RECEIVE_OFFLOAD
6031 .rx_offld_ops = &ol_rx_offld_ops,
6032#endif
Leo Chang98726762016-10-28 11:07:18 -07006033 .bus_ops = &ol_ops_bus,
6034 .ocb_ops = &ol_ops_ocb,
6035 .peer_ops = &ol_ops_peer,
6036 .throttle_ops = &ol_ops_throttle,
6037 .mob_stats_ops = &ol_ops_mob_stats,
6038 .delay_ops = &ol_ops_delay,
6039 .pmf_ops = &ol_ops_pmf
6040};
6041
Jeff Johnson02c37b42017-01-10 14:49:24 -08006042/*
6043 * Local prototype added to temporarily address warning caused by
6044 * -Wmissing-prototypes. A more correct solution, namely to expose
6045 * a prototype in an appropriate header file, will come later.
6046 */
6047struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
6048 struct ol_if_ops *dp_ol_if_ops);
6049struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
6050 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07006051{
6052 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07006053
Leo Chang98726762016-10-28 11:07:18 -07006054 if (!soc) {
6055 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6056 "%s: OL SOC memory allocation failed\n", __func__);
6057 return NULL;
6058 }
6059
6060 soc->ops = &ol_txrx_ops;
6061 return soc;
6062}
6063
6064