blob: 87655a3a6fbd8fdd5cea0758701fe1f3a3c217ad [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
wadesong9f2b1102017-12-20 22:58:35 +08002 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== includes ===*/
29/* header files for OS primitives */
30#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053032#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053033#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053034#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053036#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037/* Required for WLAN_FEATURE_FASTPATH */
38#include <ce_api.h>
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053039#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080040/* header files for utilities */
41#include <cds_queue.h> /* TAILQ */
42
43/* header files for configuration API */
44#include <ol_cfg.h> /* ol_cfg_is_high_latency */
45#include <ol_if_athvar.h>
46
47/* header files for HTT API */
48#include <ol_htt_api.h>
49#include <ol_htt_tx_api.h>
50
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051/* header files for our own APIs */
52#include <ol_txrx_api.h>
53#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070054#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070055#include <ol_txrx_ctrl_api.h>
56#include <cdp_txrx_stats.h>
57#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058/* header files for our internal definitions */
59#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
60#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080061#include <ol_tx.h> /* ol_tx_ll */
62#include <ol_rx.h> /* ol_rx_deliver */
63#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
64#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
65#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
66#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
67#include <ol_rx_reorder.h>
68#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
69#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
70#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053071#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080072#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070073#include <ol_txrx_types.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080074#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070075#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080076#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070077#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080078#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053079#include "hif.h"
wadesong9e95bd92017-04-14 14:28:40 +080080#include "hif_main.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070081#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053082#ifndef REMOVE_PKT_LOG
83#include "pktlog_ac.h"
84#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070085#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053086#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053087#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080088#include <cdp_txrx_handle.h>
Poddar, Siddarthdb568162017-07-27 18:16:38 +053089#include "wlan_qct_sys.h"
90
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080091#include <htt_internal.h>
Yun Parkb4f591d2017-03-29 15:51:01 -070092#include <ol_txrx_ipa.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070093#include "wlan_roam_debug.h"
Yun Parkb4f591d2017-03-29 15:51:01 -070094
Leo Chang98726762016-10-28 11:07:18 -070095#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
96ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080097ol_txrx_peer_find_by_local_id(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070098 uint8_t local_peer_id);
Jingxiang Ge3badb982018-01-02 17:39:01 +080099ol_txrx_peer_handle
100ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
101 uint8_t local_peer_id,
102 enum peer_debug_id_type dbg_id);
Leo Chang98726762016-10-28 11:07:18 -0700103#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800104QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -0700105 uint8_t *peer_mac,
106 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800107static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
108 bool val);
109int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700110extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800111ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700112 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700113
Leo Chang98726762016-10-28 11:07:18 -0700114extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
115 uint64_t **last_pn, uint32_t **rmf_pn_replays);
116
Mohit Khanna78cb6bb2017-03-31 17:05:14 -0700117/* thresh for peer's cached buf queue beyond which the elements are dropped */
118#define OL_TXRX_CACHED_BUFQ_THRESH 128
119
Yu Wang053d3e72017-02-08 18:48:24 +0800120#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530121
122/**
123 * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
124 * @vdev: the data virtual device
125 * @bss_addr: bss address
126 *
127 * Return: None
128 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800129static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800130ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530131{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800132 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700133
Frank Liu4362e462018-01-16 11:51:55 +0800134 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530135 if (bss_addr && vdev->last_real_peer &&
Ankit Guptaa5076012016-09-14 11:32:19 -0700136 !qdf_mem_cmp((u8 *)bss_addr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530137 vdev->last_real_peer->mac_addr.raw,
Ankit Guptaa5076012016-09-14 11:32:19 -0700138 IEEE80211_ADDR_LEN))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530139 qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
140 vdev->last_real_peer->mac_addr.raw,
141 OL_TXRX_MAC_ADDR_LEN);
Frank Liu4362e462018-01-16 11:51:55 +0800142 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530143}
144
145/**
146 * ol_txrx_add_last_real_peer() - add last peer
147 * @pdev: the data physical device
148 * @vdev: virtual device
149 * @peer_id: peer id
150 *
151 * Return: None
152 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800153static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800154ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
155 struct cdp_vdev *pvdev, uint8_t *peer_id)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530156{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800157 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
158 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530159 ol_txrx_peer_handle peer;
Yun Parkeaea8632017-04-09 09:53:45 -0700160
Frank Liu4362e462018-01-16 11:51:55 +0800161 peer = ol_txrx_find_peer_by_addr(
162 (struct cdp_pdev *)pdev,
163 vdev->hl_tdls_ap_mac_addr.raw,
164 peer_id);
165
166 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
167 if (!vdev->last_real_peer && peer &&
168 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
169 vdev->last_real_peer = peer;
170 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530171}
172
173/**
174 * is_vdev_restore_last_peer() - check for vdev last peer
175 * @peer: peer object
176 *
177 * Return: true if last peer is not null
178 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800179static bool
Leo Chang98726762016-10-28 11:07:18 -0700180is_vdev_restore_last_peer(void *ppeer)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530181{
Leo Chang98726762016-10-28 11:07:18 -0700182 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530183 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700184
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530185 vdev = peer->vdev;
186 return vdev->last_real_peer && (vdev->last_real_peer == peer);
187}
188
189/**
190 * ol_txrx_update_last_real_peer() - check for vdev last peer
191 * @pdev: the data physical device
192 * @peer: peer device
193 * @peer_id: peer id
194 * @restore_last_peer: restore last peer flag
195 *
196 * Return: None
197 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800198static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800199ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530200 uint8_t *peer_id, bool restore_last_peer)
201{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800202 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -0700203 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530204 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700205
Frank Liu4362e462018-01-16 11:51:55 +0800206 if (!restore_last_peer)
207 return;
208
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530209 vdev = peer->vdev;
Frank Liu4362e462018-01-16 11:51:55 +0800210 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530211 vdev->hl_tdls_ap_mac_addr.raw, peer_id);
Frank Liu4362e462018-01-16 11:51:55 +0800212
213 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
214 if (!vdev->last_real_peer && peer &&
215 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
216 vdev->last_real_peer = peer;
217 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530218}
219#endif
220
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530221/**
222 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
223 * fw is compatible for marking first packet after wow wakeup
224 * @value: 1 for enabled/ 0 for disabled
225 *
226 * Return: None
227 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800228static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530229{
230 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
231
232 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530233 ol_txrx_err(
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530234 "%s: pdev is NULL\n", __func__);
235 return;
236 }
237
238 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
239}
240
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530241u_int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800242ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530243{
244 u_int16_t desc_pool_size;
245 u_int16_t steady_state_tx_lifetime_ms;
246 u_int16_t safety_factor;
247
248 /*
249 * Steady-state tx latency:
250 * roughly 1-2 ms flight time
251 * + roughly 1-2 ms prep time,
252 * + roughly 1-2 ms target->host notification time.
253 * = roughly 6 ms total
254 * Thus, steady state number of frames =
255 * steady state max throughput / frame size * tx latency, e.g.
256 * 1 Gbps / 1500 bytes * 6 ms = 500
257 *
258 */
259 steady_state_tx_lifetime_ms = 6;
260
261 safety_factor = 8;
262
263 desc_pool_size =
264 ol_cfg_max_thruput_mbps(ctrl_pdev) *
265 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
266 (8 * OL_TX_AVG_FRM_BYTES) *
267 steady_state_tx_lifetime_ms *
268 safety_factor;
269
270 /* minimum */
271 if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
272 desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
273
274 /* maximum */
275 if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
276 desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
277
278 return desc_pool_size;
279}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800280
281/*=== function definitions ===*/
282
Nirav Shah22bf44d2015-12-10 15:39:48 +0530283/**
284 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
285 * wmi is enabled or not.
286 * @value: 1 for enabled/ 0 for disable
287 *
288 * Return: None
289 */
290void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
291{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530292 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700293
Nirav Shah22bf44d2015-12-10 15:39:48 +0530294 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530295 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530296 return;
297 }
298 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530299}
300
301/**
302 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
303 *
304 * Return: is_mgmt_over_wmi_enabled
305 */
306uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
307{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530308 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700309
Nirav Shah22bf44d2015-12-10 15:39:48 +0530310 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530311 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530312 return 0;
313 }
314 return pdev->is_mgmt_over_wmi_enabled;
315}
316
317
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800318#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800319static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800320ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
321 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800323 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
324 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325 struct ol_txrx_peer_t *peer;
326
327 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
328 if (!peer)
329 return NULL;
330 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800331 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800332 return peer;
333}
334
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800335static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800336{
Leo Chang98726762016-10-28 11:07:18 -0700337 struct ol_txrx_peer_t *peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700338
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800339 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530340 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530341 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530342 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343 }
344
345 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530346 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800347}
348
Yun Park0dad1002017-07-14 14:57:01 -0700349static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(struct cdp_pdev *ppdev,
350 uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800351{
Yun Park0dad1002017-07-14 14:57:01 -0700352 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800353 struct ol_txrx_peer_t *peer = NULL;
Yun Park5dd9a122018-01-12 15:00:12 -0800354 ol_txrx_vdev_handle vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355
356 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530357 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800358 "Invalid sta id passed");
359 return NULL;
360 }
361
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800362 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530363 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530364 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800365 return NULL;
366 }
367
Yun Park5dd9a122018-01-12 15:00:12 -0800368 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev, sta_id,
369 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800370 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530371 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530372 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 return NULL;
374 }
375
Yun Park5dd9a122018-01-12 15:00:12 -0800376 vdev = peer->vdev;
377 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
378
379 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800380}
381
Mohit Khannababadb82017-02-21 18:54:19 -0800382/**
383 * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
384 * @ppdev: pointer of type cdp_pdev
385 * @peer_addr: peer mac addr
386 * @peer_id: pointer to fill in the value of peer->local_id for caller
387 *
388 * This function finds a peer with given mac address and returns its peer_id.
389 * Note that this function does not increment the peer->ref_cnt.
390 * This means that the peer may be deleted in some other parallel context after
391 * its been found.
392 *
393 * Return: peer handle if peer is found, NULL if peer is not found.
394 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800395void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Yun Park0dad1002017-07-14 14:57:01 -0700396 uint8_t *peer_addr,
397 uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398{
399 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800400 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800401
Mohit Khannab7bec722017-11-10 11:43:44 -0800402 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
403 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 if (!peer)
405 return NULL;
406 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800407 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khannababadb82017-02-21 18:54:19 -0800408 return peer;
409}
410
411/**
Mohit Khannab7bec722017-11-10 11:43:44 -0800412 * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
Mohit Khannababadb82017-02-21 18:54:19 -0800413 * @pdev: pointer of type ol_txrx_pdev_handle
414 * @peer_addr: peer mac addr
415 * @peer_id: pointer to fill in the value of peer->local_id for caller
416 *
417 * This function finds the peer with given mac address and returns its peer_id.
418 * Note that this function increments the peer->ref_cnt.
419 * This makes sure that peer will be valid. This also means the caller needs to
Mohit Khannab7bec722017-11-10 11:43:44 -0800420 * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
Mohit Khannababadb82017-02-21 18:54:19 -0800421 * reference.
422 * Sample usage:
423 * {
424 * //the API call below increments the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800425 * peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800426 *
427 * // Once peer usage is done
428 *
429 * //the API call below decrements the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800430 * ol_txrx_peer_release_ref(peer, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800431 * }
432 *
433 * Return: peer handle if the peer is found, NULL if peer is not found.
434 */
Mohit Khannab7bec722017-11-10 11:43:44 -0800435ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
436 u8 *peer_addr,
437 u8 *peer_id,
438 enum peer_debug_id_type dbg_id)
Mohit Khannababadb82017-02-21 18:54:19 -0800439{
440 struct ol_txrx_peer_t *peer;
441
Mohit Khannab7bec722017-11-10 11:43:44 -0800442 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
443 dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800444 if (!peer)
445 return NULL;
446 *peer_id = peer->local_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800447 return peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448}
449
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800450static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800451{
Leo Chang98726762016-10-28 11:07:18 -0700452 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700453
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800454 return peer->local_id;
455}
456
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700457/**
458 * @brief Find a txrx peer handle from a peer's local ID
459 * @details
460 * The control SW typically uses the txrx peer handle to refer to the peer.
461 * In unusual circumstances, if it is infeasible for the control SW maintain
462 * the txrx peer handle but it can maintain a small integer local peer ID,
463 * this function allows the peer handled to be retrieved, based on the local
464 * peer ID.
465 *
466 * @param pdev - the data physical device object
467 * @param local_peer_id - the ID txrx assigned locally to the peer in question
468 * @return handle to the txrx peer object
469 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800471ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472 uint8_t local_peer_id)
473{
474 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800475 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700476
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
478 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
479 return NULL;
480 }
481
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530482 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800483 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530484 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485 return peer;
486}
487
Jingxiang Ge3badb982018-01-02 17:39:01 +0800488/**
489 * @brief Find a txrx peer handle from a peer's local ID
490 * @param pdev - the data physical device object
491 * @param local_peer_id - the ID txrx assigned locally to the peer in question
492 * @dbg_id - debug_id to track caller
493 * @return handle to the txrx peer object
494 * @details
495 * The control SW typically uses the txrx peer handle to refer to the peer.
496 * In unusual circumstances, if it is infeasible for the control SW maintain
497 * the txrx peer handle but it can maintain a small integer local peer ID,
498 * this function allows the peer handled to be retrieved, based on the local
499 * peer ID.
500 *
501 * Note that this function increments the peer->ref_cnt.
502 * This makes sure that peer will be valid. This also means the caller needs to
503 * call the corresponding API -
504 * ol_txrx_peer_release_ref
505 *
506 * reference.
507 * Sample usage:
508 * {
509 * //the API call below increments the peer->ref_cnt
510 * peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
511 *
512 * // Once peer usage is done
513 *
514 * //the API call below decrements the peer->ref_cnt
515 * ol_txrx_peer_release_ref(peer, dbg_id);
516 * }
517 *
518 * Return: peer handle if the peer is found, NULL if peer is not found.
519 */
520ol_txrx_peer_handle
521ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
522 uint8_t local_peer_id,
523 enum peer_debug_id_type dbg_id)
524{
525 struct ol_txrx_peer_t *peer = NULL;
526 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
527
528 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
529 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
530 return NULL;
531 }
532
533 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
534 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
535 peer = pdev->local_peer_ids.map[local_peer_id];
536 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
537 if (peer && peer->valid)
538 ol_txrx_peer_get_ref(peer, dbg_id);
Jingxiang Ge9f297062018-01-24 13:31:31 +0800539 else
540 peer = NULL;
Jingxiang Ge3badb982018-01-02 17:39:01 +0800541 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
542
543 return peer;
544}
545
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800546static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
547{
548 int i;
549
550 /* point the freelist to the first ID */
551 pdev->local_peer_ids.freelist = 0;
552
553 /* link each ID to the next one */
554 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
555 pdev->local_peer_ids.pool[i] = i + 1;
556 pdev->local_peer_ids.map[i] = NULL;
557 }
558
559 /* link the last ID to itself, to mark the end of the list */
560 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
561 pdev->local_peer_ids.pool[i] = i;
562
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530563 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800564}
565
566static void
567ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
568 struct ol_txrx_peer_t *peer)
569{
570 int i;
571
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530572 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573 i = pdev->local_peer_ids.freelist;
574 if (pdev->local_peer_ids.pool[i] == i) {
575 /* the list is empty, except for the list-end marker */
576 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
577 } else {
578 /* take the head ID and advance the freelist */
579 peer->local_id = i;
580 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
581 pdev->local_peer_ids.map[i] = peer;
582 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530583 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800584}
585
586static void
587ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
588 struct ol_txrx_peer_t *peer)
589{
590 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700591
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800592 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
593 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
594 return;
595 }
596 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530597 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800598 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
599 pdev->local_peer_ids.freelist = i;
600 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530601 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602}
603
604static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
605{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530606 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800607}
608
609#else
610#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
611#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
612#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
613#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
614#endif
615
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530616#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
617
618/**
619 * ol_txrx_update_group_credit() - update group credit for tx queue
620 * @group: for which credit needs to be updated
621 * @credit: credits
622 * @absolute: TXQ group absolute
623 *
624 * Return: allocated pool size
625 */
626void ol_txrx_update_group_credit(
627 struct ol_tx_queue_group_t *group,
628 int32_t credit,
629 u_int8_t absolute)
630{
631 if (absolute)
632 qdf_atomic_set(&group->credit, credit);
633 else
634 qdf_atomic_add(credit, &group->credit);
635}
636
637/**
638 * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
639 * vdev id mask and ac mask is not matching
640 * @pdev: the data physical device
641 * @group_id: TXQ group id
642 * @credit: TXQ group credit count
643 * @absolute: TXQ group absolute
644 * @vdev_id_mask: TXQ vdev group id mask
645 * @ac_mask: TQX access category mask
646 *
647 * Return: None
648 */
649void ol_txrx_update_tx_queue_groups(
650 ol_txrx_pdev_handle pdev,
651 u_int8_t group_id,
652 int32_t credit,
653 u_int8_t absolute,
654 u_int32_t vdev_id_mask,
655 u_int32_t ac_mask
656 )
657{
658 struct ol_tx_queue_group_t *group;
659 u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
660 u_int32_t membership;
661 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700662
Tiger Yu1e553e52018-01-18 16:48:00 +0800663 if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
664 ol_txrx_warn("%s: invalid group_id=%u, ignore update.\n",
665 __func__,
666 group_id);
667 return;
668 }
669
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530670 group = &pdev->txq_grps[group_id];
671
672 membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
673
674 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
675 /*
676 * if the membership (vdev id mask and ac mask)
677 * matches then no need to update tx qeue groups.
678 */
679 if (group->membership == membership)
680 /* Update Credit Only */
681 goto credit_update;
682
683
684 /*
685 * membership (vdev id mask and ac mask) is not matching
686 * TODO: ignoring ac mask for now
687 */
688 group_vdev_id_mask =
689 OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
690
691 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
692 group_vdev_bit_mask =
693 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
694 group_vdev_id_mask, vdev->vdev_id);
695 vdev_bit_mask =
696 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
697 vdev_id_mask, vdev->vdev_id);
698
699 if (group_vdev_bit_mask != vdev_bit_mask) {
700 /*
701 * Change in vdev tx queue group
702 */
703 if (!vdev_bit_mask) {
704 /* Set Group Pointer (vdev and peer) to NULL */
705 ol_tx_set_vdev_group_ptr(
706 pdev, vdev->vdev_id, NULL);
707 } else {
708 /* Set Group Pointer (vdev and peer) */
709 ol_tx_set_vdev_group_ptr(
710 pdev, vdev->vdev_id, group);
711 }
712 }
713 }
714 /* Update membership */
715 group->membership = membership;
716credit_update:
717 /* Update Credit */
718 ol_txrx_update_group_credit(group, credit, absolute);
719 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
720}
721#endif
722
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800723#ifdef WLAN_FEATURE_FASTPATH
724/**
725 * setup_fastpath_ce_handles() Update pdev with ce_handle for fastpath use.
726 *
727 * @osc: pointer to HIF context
728 * @pdev: pointer to ol pdev
729 *
730 * Return: void
731 */
Komal Seelam3d202862016-02-24 18:43:24 +0530732static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
733 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800734{
735 /*
736 * Before the HTT attach, set up the CE handles
737 * CE handles are (struct CE_state *)
738 * This is only required in the fast path
739 */
Komal Seelam7fde14c2016-02-02 13:05:57 +0530740 pdev->ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_H2T_MSG);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800741
742}
743
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800744#else /* not WLAN_FEATURE_FASTPATH */
Komal Seelam3d202862016-02-24 18:43:24 +0530745static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
746 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800747{
748}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800749#endif /* WLAN_FEATURE_FASTPATH */
750
751#ifdef QCA_LL_TX_FLOW_CONTROL_V2
752/**
753 * ol_tx_set_desc_global_pool_size() - set global pool size
754 * @num_msdu_desc: total number of descriptors
755 *
756 * Return: none
757 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800758static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800759{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530760 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700761
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800762 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530763 qdf_print("%s: pdev is NULL\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800764 return;
765 }
Nirav Shah2ae038d2015-12-23 20:36:11 +0530766 pdev->num_msdu_desc = num_msdu_desc;
767 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
768 pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
Kapil Gupta53d9b572017-06-28 17:53:25 +0530769 ol_txrx_info_high("Global pool size: %d\n",
Nirav Shah2ae038d2015-12-23 20:36:11 +0530770 pdev->num_msdu_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800771}
772
773/**
774 * ol_tx_get_desc_global_pool_size() - get global pool size
775 * @pdev: pdev handle
776 *
777 * Return: global pool size
778 */
779static inline
780uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
781{
782 return pdev->num_msdu_desc;
783}
Nirav Shah55b45a02016-01-21 10:00:16 +0530784
785/**
786 * ol_tx_get_total_free_desc() - get total free descriptors
787 * @pdev: pdev handle
788 *
789 * Return: total free descriptors
790 */
791static inline
792uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
793{
794 struct ol_tx_flow_pool_t *pool = NULL;
795 uint32_t free_desc;
796
797 free_desc = pdev->tx_desc.num_free;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530798 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530799 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
800 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530801 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530802 free_desc += pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530803 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530804 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530805 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530806
807 return free_desc;
808}
809
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800810#else
811/**
812 * ol_tx_get_desc_global_pool_size() - get global pool size
813 * @pdev: pdev handle
814 *
815 * Return: global pool size
816 */
817static inline
818uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
819{
820 return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
821}
Nirav Shah55b45a02016-01-21 10:00:16 +0530822
823/**
824 * ol_tx_get_total_free_desc() - get total free descriptors
825 * @pdev: pdev handle
826 *
827 * Return: total free descriptors
828 */
829static inline
830uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
831{
832 return pdev->tx_desc.num_free;
833}
834
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800835#endif
836
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530837#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
838
839/**
840 * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
841 * margin replenishment
842 * @desc_pool_size: tx desc pool size
843 *
844 * Return: threshold low
845 */
846static inline uint16_t
847ol_txrx_rsrc_threshold_lo(int desc_pool_size)
848{
849 int threshold_low;
Yun Parkeaea8632017-04-09 09:53:45 -0700850
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530851 /*
Yun Parkeaea8632017-04-09 09:53:45 -0700852 * 5% margin of unallocated desc is too much for per
853 * vdev mechanism.
854 * Define the value seperately.
855 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530856 threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
857
858 return threshold_low;
859}
860
861/**
862 * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
863 * during tx desc margin replenishment
864 * @desc_pool_size: tx desc pool size
865 *
866 * Return: threshold high
867 */
868static inline uint16_t
869ol_txrx_rsrc_threshold_hi(int desc_pool_size)
870{
871 int threshold_high;
872 /* when freeing up descriptors,
873 * keep going until there's a 7.5% margin
874 */
875 threshold_high = ((15 * desc_pool_size)/100)/2;
876
877 return threshold_high;
878}
879#else
880
881static inline uint16_t
882ol_txrx_rsrc_threshold_lo(int desc_pool_size)
883{
884 int threshold_low;
885 /* always maintain a 5% margin of unallocated descriptors */
886 threshold_low = (5 * desc_pool_size)/100;
887
888 return threshold_low;
889}
890
891static inline uint16_t
892ol_txrx_rsrc_threshold_hi(int desc_pool_size)
893{
894 int threshold_high;
895 /* when freeing up descriptors, keep going until
896 * there's a 15% margin
897 */
898 threshold_high = (15 * desc_pool_size)/100;
899
900 return threshold_high;
901}
902#endif
903
904#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
905
906/**
907 * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
908 * @pdev: the physical device object
909 *
910 * Return: None
911 */
912static void
913ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
914{
915 qdf_spinlock_create(&pdev->txq_log_spinlock);
916 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
917 pdev->txq_log.oldest_record_offset = 0;
918 pdev->txq_log.offset = 0;
919 pdev->txq_log.allow_wrap = 1;
920 pdev->txq_log.wrapped = 0;
921}
922
923/**
924 * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
925 * @pdev: the physical device object
926 *
927 * Return: None
928 */
929static inline void
930ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
931{
932 qdf_spinlock_destroy(&pdev->txq_log_spinlock);
933}
934
935#else
936
937static inline void
938ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
939{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530940}
941
942static inline void
943ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
944{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530945}
946
947
948#endif
949
950#if defined(DEBUG_HL_LOGGING)
951
952/**
953 * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
954 * @pdev: the physical device object
955 *
956 * Return: None
957 */
958static inline void
959ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
960{
961 qdf_spinlock_create(&pdev->grp_stat_spinlock);
962 pdev->grp_stats.last_valid_index = -1;
963 pdev->grp_stats.wrap_around = 0;
964}
965
966/**
967 * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
968 * @pdev: the physical device object
969 *
970 * Return: None
971 */
972static inline void
973ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
974{
975 qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
976}
977#else
978
979static inline void
980ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
981{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530982}
983
984static inline void
985ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
986{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530987}
988#endif
989
990#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
991
992/**
993 * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
994 * @vdev: the virtual device object
995 * @flag: flag
996 *
997 * Return: None
998 */
999void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001000ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301001{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001002 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001003
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301004 vdev->hlTdlsFlag = flag;
1005}
1006#endif
1007
1008#if defined(CONFIG_HL_SUPPORT)
1009
1010/**
1011 * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
1012 * @vdev: the virtual device object
1013 *
1014 * Return: None
1015 */
1016static void
1017ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
1018{
1019 u_int8_t i;
Yun Parkeaea8632017-04-09 09:53:45 -07001020
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301021 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1022 TAILQ_INIT(&vdev->txqs[i].head);
1023 vdev->txqs[i].paused_count.total = 0;
1024 vdev->txqs[i].frms = 0;
1025 vdev->txqs[i].bytes = 0;
1026 vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
1027 vdev->txqs[i].flag = ol_tx_queue_empty;
1028 /* aggregation is not applicable for vdev tx queues */
1029 vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
1030 ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
1031 ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
1032 }
1033}
1034
1035/**
1036 * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
1037 * @vdev: the virtual device object
1038 *
1039 * Return: None
1040 */
1041static void
1042ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1043{
1044 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1045 struct ol_tx_frms_queue_t *txq;
1046 int i;
1047
1048 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1049 txq = &vdev->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +05301050 ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301051 }
1052}
1053
1054/**
1055 * ol_txrx_peer_txqs_init() - initialise peer tx queues
1056 * @pdev: the physical device object
1057 * @peer: peer object
1058 *
1059 * Return: None
1060 */
1061static void
1062ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1063 struct ol_txrx_peer_t *peer)
1064{
1065 uint8_t i;
1066 struct ol_txrx_vdev_t *vdev = peer->vdev;
Yun Parkeaea8632017-04-09 09:53:45 -07001067
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301068 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1069 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1070 TAILQ_INIT(&peer->txqs[i].head);
1071 peer->txqs[i].paused_count.total = 0;
1072 peer->txqs[i].frms = 0;
1073 peer->txqs[i].bytes = 0;
1074 peer->txqs[i].ext_tid = i;
1075 peer->txqs[i].flag = ol_tx_queue_empty;
1076 peer->txqs[i].aggr_state = ol_tx_aggr_untried;
1077 ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
1078 ol_txrx_set_txq_peer(&peer->txqs[i], peer);
1079 }
1080 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1081
1082 /* aggregation is not applicable for mgmt and non-QoS tx queues */
1083 for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
1084 peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
1085
1086 ol_txrx_peer_pause(peer);
1087}
1088
1089/**
1090 * ol_txrx_peer_tx_queue_free() - free peer tx queues
1091 * @pdev: the physical device object
1092 * @peer: peer object
1093 *
1094 * Return: None
1095 */
1096static void
1097ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
1098 struct ol_txrx_peer_t *peer)
1099{
1100 struct ol_tx_frms_queue_t *txq;
1101 uint8_t i;
1102
1103 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1104 txq = &peer->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +05301105 ol_tx_queue_free(pdev, txq, i, true);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301106 }
1107}
1108#else
1109
1110static inline void
1111ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
1112{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301113}
1114
1115static inline void
1116ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1117{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301118}
1119
1120static inline void
1121ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1122 struct ol_txrx_peer_t *peer)
1123{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301124}
1125
1126static inline void
1127ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
1128 struct ol_txrx_peer_t *peer)
1129{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301130}
1131#endif
1132
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301133#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
1134static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1135{
1136 qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
1137}
1138
1139static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1140{
1141 qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
1142}
1143
1144static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1145{
1146 int msdu_idx;
1147 int seg_idx;
1148
Mohit Khannaca4173b2017-09-12 21:52:19 -07001149 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1150 "TSO Statistics:");
1151 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1152 "TSO pkts %lld, bytes %lld\n",
1153 pdev->stats.pub.tx.tso.tso_pkts.pkts,
1154 pdev->stats.pub.tx.tso.tso_pkts.bytes);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301155
Mohit Khannaca4173b2017-09-12 21:52:19 -07001156 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1157 "TSO Histogram for numbers of segments:\n"
1158 "Single segment %d\n"
1159 " 2-5 segments %d\n"
1160 " 6-10 segments %d\n"
1161 "11-15 segments %d\n"
1162 "16-20 segments %d\n"
1163 " 20+ segments %d\n",
1164 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
1165 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
1166 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
1167 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
1168 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
1169 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301170
Mohit Khannaca4173b2017-09-12 21:52:19 -07001171 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1172 "TSO History Buffer: Total size %d, current_index %d",
1173 NUM_MAX_TSO_MSDUS,
1174 TXRX_STATS_TSO_MSDU_IDX(pdev));
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301175
1176 for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
1177 if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
1178 continue;
Mohit Khannaca4173b2017-09-12 21:52:19 -07001179 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1180 "jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
1181 msdu_idx,
1182 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
1183 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
1184 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
1185 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301186
1187 for (seg_idx = 0;
1188 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
1189 msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
1190 seg_idx++) {
1191 struct qdf_tso_seg_t tso_seg =
1192 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
1193
Mohit Khannaca4173b2017-09-12 21:52:19 -07001194 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1195 "seg idx: %d", seg_idx);
1196 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1197 "tso_enable: %d",
1198 tso_seg.tso_flags.tso_enable);
1199 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1200 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
1201 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
1202 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
1203 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
1204 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
1205 tso_seg.tso_flags.ns);
1206 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1207 "tcp_seq_num: 0x%x ip_id: %d",
1208 tso_seg.tso_flags.tcp_seq_num,
1209 tso_seg.tso_flags.ip_id);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301210 }
1211 }
1212}
Yun Park1027e8c2017-10-13 15:17:37 -07001213
1214static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
1215{
1216 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_pkts,
1217 sizeof(struct ol_txrx_stats_elem));
1218#if defined(FEATURE_TSO)
1219 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_info,
1220 sizeof(struct ol_txrx_stats_tso_info));
1221 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_hist,
1222 sizeof(struct ol_txrx_tso_histogram));
1223#endif
1224}
1225
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301226#else
Yun Park1027e8c2017-10-13 15:17:37 -07001227
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301228static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1229{
1230 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1231 "TSO is not supported\n");
1232}
1233
1234static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1235{
1236 /*
1237 * keeping the body empty and not keeping an error print as print will
1238 * will show up everytime during driver load if TSO is not enabled.
1239 */
1240}
1241
1242static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1243{
1244 /*
1245 * keeping the body empty and not keeping an error print as print will
1246 * will show up everytime during driver unload if TSO is not enabled.
1247 */
1248}
1249
Yun Park1027e8c2017-10-13 15:17:37 -07001250static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
1251{
1252 /*
1253 * keeping the body empty and not keeping an error print as print will
1254 * will show up everytime during driver unload if TSO is not enabled.
1255 */
1256}
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301257#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
1258
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001259/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001260 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001261 * @ctrl_pdev: cfg pdev
1262 * @htc_pdev: HTC pdev
1263 * @osdev: os dev
1264 *
1265 * Return: txrx pdev handle
1266 * NULL for failure
1267 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001268static struct cdp_pdev *
1269ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -07001270 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001271{
1272 struct ol_txrx_pdev_t *pdev;
hqufd227fe2017-06-26 17:01:14 +08001273 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001274
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301275 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001276 if (!pdev)
1277 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001278
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301279 /* init LL/HL cfg here */
1280 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001281 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
1282
1283 /* store provided params */
1284 pdev->ctrl_pdev = ctrl_pdev;
1285 pdev->osdev = osdev;
1286
1287 for (i = 0; i < htt_num_sec_types; i++)
1288 pdev->sec_types[i] = (enum ol_sec_type)i;
1289
1290 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301291 ol_txrx_tso_stats_init(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001292
1293 TAILQ_INIT(&pdev->vdev_list);
1294
tfyu9fcabd72017-09-26 17:46:48 +08001295 TAILQ_INIT(&pdev->req_list);
1296 pdev->req_list_depth = 0;
1297 qdf_spinlock_create(&pdev->req_list_spinlock);
1298
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001299 /* do initial set up of the peer ID -> peer object lookup map */
1300 if (ol_txrx_peer_find_attach(pdev))
1301 goto fail1;
1302
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301303 /* initialize the counter of the target's tx buffer availability */
1304 qdf_atomic_init(&pdev->target_tx_credit);
1305 qdf_atomic_init(&pdev->orig_target_tx_credit);
1306
1307 if (ol_cfg_is_high_latency(ctrl_pdev)) {
1308 qdf_spinlock_create(&pdev->tx_queue_spinlock);
1309 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
1310 if (pdev->tx_sched.scheduler == NULL)
1311 goto fail2;
1312 }
1313 ol_txrx_pdev_txq_log_init(pdev);
1314 ol_txrx_pdev_grp_stats_init(pdev);
1315
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001316 pdev->htt_pdev =
1317 htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
1318 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301319 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001320
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301321 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
1322 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +08001323
1324 /*
1325 * Init the tid --> category table.
1326 * Regular tids (0-15) map to their AC.
1327 * Extension tids get their own categories.
1328 */
1329 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
1330 int ac = TXRX_TID_TO_WMM_AC(tid);
1331
1332 pdev->tid_to_ac[tid] = ac;
1333 }
1334 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
1335 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
1336 pdev->tid_to_ac[OL_TX_MGMT_TID] =
1337 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
1338 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
1339 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
1340 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
1341 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
1342
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001343 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001344
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301345fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001346 ol_txrx_peer_find_detach(pdev);
1347
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301348fail2:
1349 if (ol_cfg_is_high_latency(ctrl_pdev))
1350 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
1351
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001352fail1:
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301353 ol_txrx_tso_stats_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301354 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001355
1356fail0:
1357 return NULL;
1358}
1359
Komal Seelamc4b28632016-02-03 15:02:18 +05301360#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1361/**
1362 * htt_pkt_log_init() - API to initialize packet log
1363 * @handle: pdev handle
1364 * @scn: HIF context
1365 *
1366 * Return: void
1367 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001368void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +05301369{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001370 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001371
Komal Seelamc4b28632016-02-03 15:02:18 +05301372 if (handle->pkt_log_init)
1373 return;
1374
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301375 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001376 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -07001377 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08001378 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Komal Seelamc4b28632016-02-03 15:02:18 +05301379 if (pktlogmod_init(scn))
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301380 qdf_print("%s: pktlogmod_init failed", __func__);
Komal Seelamc4b28632016-02-03 15:02:18 +05301381 else
1382 handle->pkt_log_init = true;
1383 }
1384}
1385
1386/**
1387 * htt_pktlogmod_exit() - API to cleanup pktlog info
1388 * @handle: Pdev handle
1389 * @scn: HIF Context
1390 *
1391 * Return: void
1392 */
Houston Hoffman8c485042017-02-08 13:40:21 -08001393static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +05301394{
Houston Hoffman8c485042017-02-08 13:40:21 -08001395 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001396 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +05301397 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -08001398 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +05301399 handle->pkt_log_init = false;
1400 }
1401}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001402
Komal Seelamc4b28632016-02-03 15:02:18 +05301403#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001404void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -08001405static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +05301406#endif
1407
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001408/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001409 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001410 * @pdev: txrx pdev
1411 *
1412 * Return: 0 for success
1413 */
1414int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001415ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001416{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001417 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001418 uint16_t i;
1419 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001420 int ret = 0;
1421 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301422 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001423
Leo Chang376398b2015-10-23 14:19:02 -07001424 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1425 union ol_tx_desc_list_elem_t *c_element;
1426 unsigned int sig_bit;
1427 uint16_t desc_per_page;
1428
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001429 if (!osc) {
1430 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001431 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001432 }
1433
1434 /*
1435 * For LL, limit the number of host's tx descriptors to match
1436 * the number of target FW tx descriptors.
1437 * This simplifies the FW, by ensuring the host will never
1438 * download more tx descriptors than the target has space for.
1439 * The FW will drop/free low-priority tx descriptors when it
1440 * starts to run low, so that in theory the host should never
1441 * run out of tx descriptors.
1442 */
1443
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001444 /*
1445 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301446 * HL - wait for a HTT target credit initialization
1447 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001448 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301449 if (pdev->cfg.is_high_latency) {
1450 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001451
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301452 qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
1453 qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001454
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301455 pdev->tx_queue.rsrc_threshold_lo =
1456 ol_txrx_rsrc_threshold_lo(desc_pool_size);
1457 pdev->tx_queue.rsrc_threshold_hi =
1458 ol_txrx_rsrc_threshold_hi(desc_pool_size);
1459
1460 for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
1461 qdf_atomic_init(&pdev->txq_grps[i].credit);
1462
1463 ol_tx_target_credit_init(pdev, desc_pool_size);
1464 } else {
1465 qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
1466 &pdev->target_tx_credit);
1467 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1468 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001469
Nirav Shah76291962016-04-25 10:50:37 +05301470 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1471
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001472 setup_fastpath_ce_handles(osc, pdev);
1473
1474 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1475 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301476 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001477
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001478 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -08001479 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
1480 ret = htt_ipa_uc_attach(pdev->htt_pdev);
1481 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -07001482 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001483 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001484
Leo Chang376398b2015-10-23 14:19:02 -07001485 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301486 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301487 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -07001488 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1489 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1490 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301491 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001492 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -08001493 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001494 goto page_alloc_fail;
1495 }
1496 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1497 pdev->tx_desc.offset_filter = desc_per_page - 1;
1498 /* Calculate page divider to find page number */
1499 sig_bit = 0;
1500 while (desc_per_page) {
1501 sig_bit++;
1502 desc_per_page = desc_per_page >> 1;
1503 }
1504 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -08001505 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -07001506 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1507 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1508 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1509 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001510
1511 /*
1512 * Each SW tx desc (used only within the tx datapath SW) has a
1513 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1514 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1515 * desc now, to avoid doing it during time-critical transmit.
1516 */
1517 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -07001518 pdev->tx_desc.freelist =
1519 (union ol_tx_desc_list_elem_t *)
1520 (*pdev->tx_desc.desc_pages.cacheable_pages);
1521 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001522 for (i = 0; i < desc_pool_size; i++) {
1523 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001524 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301525 qdf_dma_addr_t frag_paddr = 0;
1526 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001527
Leo Chang376398b2015-10-23 14:19:02 -07001528 if (i == (desc_pool_size - 1))
1529 c_element->next = NULL;
1530 else
1531 c_element->next = (union ol_tx_desc_list_elem_t *)
1532 ol_tx_desc_find(pdev, i + 1);
1533
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001534 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001535 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301536 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001537 "%s: failed to alloc HTT tx desc (%d of %d)",
1538 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001539 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -08001540 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001541 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001542 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001543
Leo Chang376398b2015-10-23 14:19:02 -07001544 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001545 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001546 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001547 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001548 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301549 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001550 "%s: failed to alloc HTT frag dsc (%d/%d)",
1551 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001552 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001553 fail_idx = i;
1554 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001555 }
Leo Chang376398b2015-10-23 14:19:02 -07001556 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -07001557 /*
1558 * Initialize the first 6 words (TSO flags)
1559 * of the frag descriptor
1560 */
Leo Chang376398b2015-10-23 14:19:02 -07001561 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1562 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001563 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001564 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001565#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001566 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001567#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001568 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001569 0xffffffff;
1570#endif
1571#endif
Leo Chang376398b2015-10-23 14:19:02 -07001572 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301573 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001574 c_element = c_element->next;
1575 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001576 }
1577
1578 /* link SW tx descs into a freelist */
1579 pdev->tx_desc.num_free = desc_pool_size;
Poddar, Siddarth14521792017-03-14 21:19:42 +05301580 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001581 "%s first tx_desc:0x%pK Last tx desc:0x%pK\n", __func__,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001582 (uint32_t *) pdev->tx_desc.freelist,
1583 (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001584
1585 /* check what format of frames are expected to be delivered by the OS */
1586 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1587 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1588 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1589 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1590 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1591 pdev->htt_pkt_type = htt_pkt_type_eth2;
1592 else
1593 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1594 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301595 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001596 "%s Invalid standard frame type: %d",
1597 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001598 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001599 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001600 }
1601
1602 /* setup the global rx defrag waitlist */
1603 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1604
1605 /* configure where defrag timeout and duplicate detection is handled */
1606 pdev->rx.flags.defrag_timeout_check =
1607 pdev->rx.flags.dup_check =
1608 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1609
1610#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1611 /* Need to revisit this part. Currently,hardcode to riva's caps */
1612 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1613 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1614 /*
1615 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1616 * header for non-first subframe of A-MSDU.
1617 */
1618 pdev->sw_subfrm_hdr_recovery_enable = 1;
1619 /*
1620 * The Riva HW doesn't have the capability to set Protected Frame bit
1621 * in the MAC header for encrypted data frame.
1622 */
1623 pdev->sw_pf_proc_enable = 1;
1624
1625 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001626 /*
1627 * sw llc process is only needed in
1628 * 802.3 to 802.11 transform case
1629 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001630 pdev->sw_tx_llc_proc_enable = 1;
1631 pdev->sw_rx_llc_proc_enable = 1;
1632 } else {
1633 pdev->sw_tx_llc_proc_enable = 0;
1634 pdev->sw_rx_llc_proc_enable = 0;
1635 }
1636
1637 switch (pdev->frame_format) {
1638 case wlan_frm_fmt_raw:
1639 pdev->sw_tx_encap =
1640 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1641 ? 0 : 1;
1642 pdev->sw_rx_decap =
1643 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1644 ? 0 : 1;
1645 break;
1646 case wlan_frm_fmt_native_wifi:
1647 pdev->sw_tx_encap =
1648 pdev->
1649 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1650 ? 0 : 1;
1651 pdev->sw_rx_decap =
1652 pdev->
1653 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1654 ? 0 : 1;
1655 break;
1656 case wlan_frm_fmt_802_3:
1657 pdev->sw_tx_encap =
1658 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1659 ? 0 : 1;
1660 pdev->sw_rx_decap =
1661 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1662 ? 0 : 1;
1663 break;
1664 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301665 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001666 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1667 pdev->frame_format,
1668 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001669 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001670 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001671 }
1672#endif
1673
1674 /*
1675 * Determine what rx processing steps are done within the host.
1676 * Possibilities:
1677 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1678 * (This is unlikely; even if the target is doing rx->tx forwarding,
1679 * the host should be doing rx->tx forwarding too, as a back up for
1680 * the target's rx->tx forwarding, in case the target runs short on
1681 * memory, and can't store rx->tx frames that are waiting for
1682 * missing prior rx frames to arrive.)
1683 * 2. Just rx -> tx forwarding.
1684 * This is the typical configuration for HL, and a likely
1685 * configuration for LL STA or small APs (e.g. retail APs).
1686 * 3. Both PN check and rx -> tx forwarding.
1687 * This is the typical configuration for large LL APs.
1688 * Host-side PN check without rx->tx forwarding is not a valid
1689 * configuration, since the PN check needs to be done prior to
1690 * the rx->tx forwarding.
1691 */
1692 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001693 /*
1694 * PN check, rx-tx forwarding and rx reorder is done by
1695 * the target
1696 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001697 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1698 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1699 else
1700 pdev->rx_opt_proc = ol_rx_fwd_check;
1701 } else {
1702 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1703 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1704 /*
1705 * PN check done on host,
1706 * rx->tx forwarding not done at all.
1707 */
1708 pdev->rx_opt_proc = ol_rx_pn_check_only;
1709 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1710 /*
1711 * Both PN check and rx->tx forwarding done
1712 * on host.
1713 */
1714 pdev->rx_opt_proc = ol_rx_pn_check;
1715 } else {
1716#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1717"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301718 QDF_TRACE(QDF_MODULE_ID_TXRX,
1719 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001720 "%s: %s", __func__, TRACESTR01);
1721#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001722 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001723 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001724 }
1725 } else {
1726 /* PN check done on target */
1727 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1728 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1729 /*
1730 * rx->tx forwarding done on host (possibly as
1731 * back-up for target-side primary rx->tx
1732 * forwarding)
1733 */
1734 pdev->rx_opt_proc = ol_rx_fwd_check;
1735 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001736 /*
1737 * rx->tx forwarding either done in target,
1738 * or not done at all
1739 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001740 pdev->rx_opt_proc = ol_rx_deliver;
1741 }
1742 }
1743 }
1744
1745 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301746 qdf_spinlock_create(&pdev->tx_mutex);
1747 qdf_spinlock_create(&pdev->peer_ref_mutex);
1748 qdf_spinlock_create(&pdev->rx.mutex);
1749 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001750 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001751 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1752
Yun Parkf01f6e22017-01-18 17:27:02 -08001753 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1754 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001755 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001756 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001757
Yun Parkf01f6e22017-01-18 17:27:02 -08001758 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1759 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001760 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001761 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001762
1763#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1764 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1765#endif
1766
1767 /*
1768 * WDI event attach
1769 */
1770 wdi_event_attach(pdev);
1771
1772 /*
1773 * Initialize rx PN check characteristics for different security types.
1774 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301775 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001776
1777 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1778 pdev->rx_pn[htt_sec_type_tkip].len =
1779 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1780 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1781 pdev->rx_pn[htt_sec_type_tkip].cmp =
1782 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1783 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1784
1785 /* WAPI: 128-bit PN */
1786 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1787 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1788
1789 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1790
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001791 ol_txrx_dbg("Created pdev %pK\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001792
1793 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1794
1795#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1796#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1797
1798/* #if 1 -- TODO: clean this up */
1799#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1800 /* avg = 100% * new + 0% * old */ \
1801 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1802/*
Yun Parkeaea8632017-04-09 09:53:45 -07001803 * #else
1804 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1805 * //avg = 25% * new + 25% * old
1806 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1807 * #endif
1808 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001809 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1810 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1811#endif
1812
1813 ol_txrx_local_peer_id_pool_init(pdev);
1814
1815 pdev->cfg.ll_pause_txq_limit =
1816 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1817
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301818 /* TX flow control for peer who is in very bad link status */
1819 ol_tx_badpeer_flow_cl_init(pdev);
1820
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001821#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301822 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301823 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001824
1825 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301826 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001827 {
1828 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001829
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001830 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301831 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001832 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1833 * 1000);
1834 /*
1835 * Compute a factor and shift that together are equal to the
1836 * inverse of the bin_width time, so that rather than dividing
1837 * by the bin width time, approximately the same result can be
1838 * obtained much more efficiently by a multiply + shift.
1839 * multiply_factor >> shift = 1 / bin_width_time, so
1840 * multiply_factor = (1 << shift) / bin_width_time.
1841 *
1842 * Pick the shift semi-arbitrarily.
1843 * If we knew statically what the bin_width would be, we could
1844 * choose a shift that minimizes the error.
1845 * Since the bin_width is determined dynamically, simply use a
1846 * shift that is about half of the uint32_t size. This should
1847 * result in a relatively large multiplier value, which
1848 * minimizes error from rounding the multiplier to an integer.
1849 * The rounding error only becomes significant if the tick units
1850 * are on the order of 1 microsecond. In most systems, it is
1851 * expected that the tick units will be relatively low-res,
1852 * on the order of 1 millisecond. In such systems the rounding
1853 * error is negligible.
1854 * It would be more accurate to dynamically try out different
1855 * shifts and choose the one that results in the smallest
1856 * rounding error, but that extra level of fidelity is
1857 * not needed.
1858 */
1859 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1860 pdev->tx_delay.hist_internal_bin_width_mult =
1861 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1862 1000 + (bin_width_1000ticks >> 1)) /
1863 bin_width_1000ticks;
1864 }
1865#endif /* QCA_COMPUTE_TX_DELAY */
1866
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001867 /* Thermal Mitigation */
1868 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001869
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001870 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001871
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301872 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1873
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001874 ol_tx_register_flow_control(pdev);
1875
1876 return 0; /* success */
1877
Leo Chang376398b2015-10-23 14:19:02 -07001878pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001879 OL_RX_REORDER_TRACE_DETACH(pdev);
1880
Leo Chang376398b2015-10-23 14:19:02 -07001881reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301882 qdf_spinlock_destroy(&pdev->tx_mutex);
1883 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1884 qdf_spinlock_destroy(&pdev->rx.mutex);
1885 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301886 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001887 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1888
Leo Chang376398b2015-10-23 14:19:02 -07001889control_init_fail:
1890desc_alloc_fail:
1891 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001892 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001893 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001894
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301895 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001896 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001897
Leo Chang376398b2015-10-23 14:19:02 -07001898page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001899 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1900 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001901uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001902 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301903htt_attach_fail:
1904 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001905ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001906 return ret; /* fail */
1907}
1908
Dhanashri Atre12a08392016-02-17 13:10:34 -08001909/**
1910 * ol_txrx_pdev_attach_target() - send target configuration
1911 *
1912 * @pdev - the physical device being initialized
1913 *
1914 * The majority of the data SW setup are done by the pdev_attach
1915 * functions, but this function completes the data SW setup by
1916 * sending datapath configuration messages to the target.
1917 *
1918 * Return: 0 - success 1 - failure
1919 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001920static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001921{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001922 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001923
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05301924 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001925}
1926
Dhanashri Atre12a08392016-02-17 13:10:34 -08001927/**
Mohit Khanna54f3a382017-03-13 17:56:32 -07001928 * ol_tx_free_descs_inuse - free tx descriptors which are in use
1929 * @pdev - the physical device for which tx descs need to be freed
1930 *
1931 * Cycle through the list of TX descriptors (for a pdev) which are in use,
1932 * for which TX completion has not been received and free them. Should be
1933 * called only when the interrupts are off and all lower layer RX is stopped.
1934 * Otherwise there may be a race condition with TX completions.
1935 *
1936 * Return: None
1937 */
1938static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1939{
1940 int i;
1941 void *htt_tx_desc;
1942 struct ol_tx_desc_t *tx_desc;
1943 int num_freed_tx_desc = 0;
1944
1945 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1946 tx_desc = ol_tx_desc_find(pdev, i);
1947 /*
1948 * Confirm that each tx descriptor is "empty", i.e. it has
1949 * no tx frame attached.
1950 * In particular, check that there are no frames that have
1951 * been given to the target to transmit, for which the
1952 * target has never provided a response.
1953 */
1954 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
1955 ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
1956 ol_tx_desc_frame_free_nonstd(pdev,
1957 tx_desc, 1);
1958 num_freed_tx_desc++;
1959 }
1960 htt_tx_desc = tx_desc->htt_tx_desc;
1961 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1962 }
1963
1964 if (num_freed_tx_desc)
1965 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1966 "freed %d tx frames for which no resp from target",
1967 num_freed_tx_desc);
1968
1969}
1970
1971/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301972 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08001973 * @pdev - the data physical device object being removed
1974 * @force - delete the pdev (and its vdevs and peers) even if
1975 * there are outstanding references by the target to the vdevs
1976 * and peers within the pdev
1977 *
1978 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301979 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08001980 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301981 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08001982 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301983static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001984{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001985 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001986
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001987 /* preconditions */
1988 TXRX_ASSERT2(pdev);
1989
1990 /* check that the pdev has no vdevs allocated */
1991 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1992
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001993#ifdef QCA_SUPPORT_TX_THROTTLE
1994 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301995 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1996 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001997#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301998 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1999 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002000#endif
2001#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002002
2003 if (force) {
2004 /*
2005 * The assertion above confirms that all vdevs within this pdev
2006 * were detached. However, they may not have actually been
2007 * deleted.
2008 * If the vdev had peers which never received a PEER_UNMAP msg
2009 * from the target, then there are still zombie peer objects,
2010 * and the vdev parents of the zombie peers are also zombies,
2011 * hanging around until their final peer gets deleted.
2012 * Go through the peer hash table and delete any peers left.
2013 * As a side effect, this will complete the deletion of any
2014 * vdevs that are waiting for their peers to finish deletion.
2015 */
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002016 ol_txrx_dbg("Force delete for pdev %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002017 pdev);
2018 ol_txrx_peer_find_hash_erase(pdev);
2019 }
2020
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05302021 /* to get flow pool status before freeing descs */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07002022 ol_tx_dump_flow_pool_info((void *)pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07002023 ol_tx_free_descs_inuse(pdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05302024 ol_tx_deregister_flow_control(pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07002025
2026 /*
2027 * ol_tso_seg_list_deinit should happen after
2028 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
2029 * which is being de-initilized in ol_tso_seg_list_deinit
2030 */
2031 ol_tso_seg_list_deinit(pdev);
2032 ol_tso_num_seg_list_deinit(pdev);
2033
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05302034 /* Stop the communication between HTT and target at first */
2035 htt_detach_target(pdev->htt_pdev);
2036
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302037 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07002038 &pdev->tx_desc.desc_pages, 0, true);
2039 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002040
2041 /* Detach micro controller data path offload resource */
2042 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
2043 htt_ipa_uc_detach(pdev->htt_pdev);
2044
2045 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05302046 ol_tx_desc_dup_detect_deinit(pdev);
2047
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302048 qdf_spinlock_destroy(&pdev->tx_mutex);
2049 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
2050 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
2051 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07002052 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002053#ifdef QCA_SUPPORT_TX_THROTTLE
2054 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302055 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002056#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302057
2058 /* TX flow control for peer who is in very bad link status */
2059 ol_tx_badpeer_flow_cl_deinit(pdev);
2060
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002061 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
2062
2063 OL_RX_REORDER_TRACE_DETACH(pdev);
2064 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302065
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002066 /*
2067 * WDI event detach
2068 */
2069 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302070
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002071 ol_txrx_local_peer_id_cleanup(pdev);
2072
2073#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302074 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002075#endif
Houston Hoffmane5ec0492017-01-30 12:28:32 -08002076 qdf_mem_free(ppdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002077}
2078
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302079/**
2080 * ol_txrx_pdev_detach() - delete the data SW state
2081 * @ppdev - the data physical device object being removed
2082 * @force - delete the pdev (and its vdevs and peers) even if
2083 * there are outstanding references by the target to the vdevs
2084 * and peers within the pdev
2085 *
2086 * This function is used when the WLAN driver is being removed to
2087 * remove the host data component within the driver.
2088 * All virtual devices within the physical device need to be deleted
2089 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
2090 *
2091 * Return: None
2092 */
2093static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
2094{
2095 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
tfyu9fcabd72017-09-26 17:46:48 +08002096 struct ol_txrx_stats_req_internal *req;
2097 int i = 0;
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302098
2099 /*checking to ensure txrx pdev structure is not NULL */
2100 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302101 ol_txrx_err(
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302102 "NULL pdev passed to %s\n", __func__);
2103 return;
2104 }
2105
2106 htt_pktlogmod_exit(pdev);
2107
tfyu9fcabd72017-09-26 17:46:48 +08002108 qdf_spin_lock_bh(&pdev->req_list_spinlock);
2109 if (pdev->req_list_depth > 0)
2110 ol_txrx_err(
2111 "Warning: the txrx req list is not empty, depth=%d\n",
2112 pdev->req_list_depth
2113 );
2114 TAILQ_FOREACH(req, &pdev->req_list, req_list_elem) {
2115 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
2116 pdev->req_list_depth--;
2117 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05302118 "%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
tfyu9fcabd72017-09-26 17:46:48 +08002119 i++,
2120 req,
2121 req->base.print.verbose,
2122 req->base.print.concise,
2123 req->base.stats_type_upload_mask,
2124 req->base.stats_type_reset_mask
2125 );
2126 qdf_mem_free(req);
2127 }
2128 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
2129
2130 qdf_spinlock_destroy(&pdev->req_list_spinlock);
2131
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302132 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
2133
2134 if (pdev->cfg.is_high_latency)
2135 ol_tx_sched_detach(pdev);
2136
2137 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
2138
2139 htt_pdev_free(pdev->htt_pdev);
2140 ol_txrx_peer_find_detach(pdev);
2141 ol_txrx_tso_stats_deinit(pdev);
2142
2143 ol_txrx_pdev_txq_log_destroy(pdev);
2144 ol_txrx_pdev_grp_stat_destroy(pdev);
2145}
2146
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302147#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
2148
2149/**
2150 * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
2151 * @vdev: the virtual device object
2152 *
2153 * Return: None
2154 */
2155static inline void
2156ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2157{
2158 qdf_atomic_init(&vdev->tx_desc_count);
2159}
2160#else
2161
2162static inline void
2163ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2164{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302165}
2166#endif
2167
Dhanashri Atre12a08392016-02-17 13:10:34 -08002168/**
2169 * ol_txrx_vdev_attach - Allocate and initialize the data object
2170 * for a new virtual device.
2171 *
2172 * @data_pdev - the physical device the virtual device belongs to
2173 * @vdev_mac_addr - the MAC address of the virtual device
2174 * @vdev_id - the ID used to identify the virtual device to the target
2175 * @op_mode - whether this virtual device is operating as an AP,
2176 * an IBSS, or a STA
2177 *
2178 * Return: success: handle to new data vdev object, failure: NULL
2179 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002180static struct cdp_vdev *
2181ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002182 uint8_t *vdev_mac_addr,
2183 uint8_t vdev_id, enum wlan_op_mode op_mode)
2184{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002185 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002186 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002187 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002188
2189 /* preconditions */
2190 TXRX_ASSERT2(pdev);
2191 TXRX_ASSERT2(vdev_mac_addr);
2192
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302193 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002194 if (!vdev)
2195 return NULL; /* failure */
2196
2197 /* store provided params */
2198 vdev->pdev = pdev;
2199 vdev->vdev_id = vdev_id;
2200 vdev->opmode = op_mode;
2201
2202 vdev->delete.pending = 0;
2203 vdev->safemode = 0;
2204 vdev->drop_unenc = 1;
2205 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05302206 vdev->fwd_tx_packets = 0;
2207 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002208
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302209 ol_txrx_vdev_tx_desc_cnt_init(vdev);
2210
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302211 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002212 OL_TXRX_MAC_ADDR_LEN);
2213
2214 TAILQ_INIT(&vdev->peer_list);
2215 vdev->last_real_peer = NULL;
2216
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002217 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302218
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002219#ifdef QCA_IBSS_SUPPORT
2220 vdev->ibss_peer_num = 0;
2221 vdev->ibss_peer_heart_beat_timer = 0;
2222#endif
2223
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302224 ol_txrx_vdev_txqs_init(vdev);
2225
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302226 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002227 vdev->ll_pause.paused_reason = 0;
2228 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
2229 vdev->ll_pause.txq.depth = 0;
wadesong5e2e8012017-08-21 16:56:03 +08002230 qdf_atomic_init(&vdev->delete.detaching);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302231 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002232 &vdev->ll_pause.timer,
2233 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302234 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302235 qdf_atomic_init(&vdev->os_q_paused);
2236 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002237 vdev->tx_fl_lwm = 0;
2238 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002239 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002240 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05302241 qdf_mem_zero(&vdev->last_peer_mac_addr,
2242 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302243 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002244 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002245 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002246 vdev->osif_fc_ctx = NULL;
2247
2248 /* Default MAX Q depth for every VDEV */
2249 vdev->ll_pause.max_q_depth =
2250 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002251 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002252 /* add this vdev into the pdev's list */
2253 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
2254
Poddar, Siddarth14521792017-03-14 21:19:42 +05302255 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002256 "Created vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002257 vdev,
2258 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2259 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2260 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2261
2262 /*
2263 * We've verified that htt_op_mode == wlan_op_mode,
2264 * so no translation is needed.
2265 */
2266 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
2267
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002268 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002269}
2270
Dhanashri Atre12a08392016-02-17 13:10:34 -08002271/**
2272 *ol_txrx_vdev_register - Link a vdev's data object with the
2273 * matching OS shim vdev object.
2274 *
2275 * @txrx_vdev: the virtual device's data object
2276 * @osif_vdev: the virtual device's OS shim object
2277 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
2278 *
2279 * The data object for a virtual device is created by the
2280 * function ol_txrx_vdev_attach. However, rather than fully
2281 * linking the data vdev object with the vdev objects from the
2282 * other subsystems that the data vdev object interacts with,
2283 * the txrx_vdev_attach function focuses primarily on creating
2284 * the data vdev object. After the creation of both the data
2285 * vdev object and the OS shim vdev object, this
2286 * txrx_osif_vdev_attach function is used to connect the two
2287 * vdev objects, so the data SW can use the OS shim vdev handle
2288 * when passing rx data received by a vdev up to the OS shim.
2289 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002290static void ol_txrx_vdev_register(struct cdp_vdev *pvdev,
2291 void *osif_vdev,
2292 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002293{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002294 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002295
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002296 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
2297 qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
2298 qdf_assert(0);
2299 return;
2300 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002301
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002302 vdev->osif_dev = osif_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002303 vdev->rx = txrx_ops->rx.rx;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002304 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002305}
2306
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002307#ifdef currently_unused
Dhanashri Atre12a08392016-02-17 13:10:34 -08002308/**
2309 * ol_txrx_set_curchan - Setup the current operating channel of
2310 * the device
2311 * @pdev - the data physical device object
2312 * @chan_mhz - the channel frequency (mhz) packets on
2313 *
2314 * Mainly used when populating monitor mode status that requires
2315 * the current operating channel
2316 *
2317 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002318void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
2319{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002320}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002321#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002322
2323void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
2324{
2325 vdev->safemode = val;
2326}
2327
Dhanashri Atre12a08392016-02-17 13:10:34 -08002328/**
2329 * ol_txrx_set_privacy_filters - set the privacy filter
2330 * @vdev - the data virtual device object
2331 * @filter - filters to be set
2332 * @num - the number of filters
2333 *
2334 * Rx related. Set the privacy filters. When rx packets, check
2335 * the ether type, filter type and packet type to decide whether
2336 * discard these packets.
2337 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002338static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002339ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
2340 void *filters, uint32_t num)
2341{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302342 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002343 num * sizeof(struct privacy_exemption));
2344 vdev->num_filters = num;
2345}
2346
2347void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
2348{
2349 vdev->drop_unenc = val;
2350}
2351
gbian016a42e2017-03-01 18:49:11 +08002352#if defined(CONFIG_HL_SUPPORT)
2353
2354static void
2355ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2356{
2357 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2358 int i;
2359 struct ol_tx_desc_t *tx_desc;
2360
2361 qdf_spin_lock_bh(&pdev->tx_mutex);
2362 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2363 tx_desc = ol_tx_desc_find(pdev, i);
2364 if (tx_desc->vdev == vdev)
2365 tx_desc->vdev = NULL;
2366 }
2367 qdf_spin_unlock_bh(&pdev->tx_mutex);
2368}
2369
2370#else
2371
2372static void
2373ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2374{
2375
2376}
2377
2378#endif
2379
Dhanashri Atre12a08392016-02-17 13:10:34 -08002380/**
2381 * ol_txrx_vdev_detach - Deallocate the specified data virtual
2382 * device object.
2383 * @data_vdev: data object for the virtual device in question
2384 * @callback: function to call (if non-NULL) once the vdev has
2385 * been wholly deleted
2386 * @callback_context: context to provide in the callback
2387 *
2388 * All peers associated with the virtual device need to be deleted
2389 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
2390 * However, for the peers to be fully deleted, the peer deletion has to
2391 * percolate through the target data FW and back up to the host data SW.
2392 * Thus, even though the host control SW may have issued a peer_detach
2393 * call for each of the vdev's peers, the peer objects may still be
2394 * allocated, pending removal of all references to them by the target FW.
2395 * In this case, though the vdev_detach function call will still return
2396 * immediately, the vdev itself won't actually be deleted, until the
2397 * deletions of all its peers complete.
2398 * The caller can provide a callback function pointer to be notified when
2399 * the vdev deletion actually happens - whether it's directly within the
2400 * vdev_detach call, or if it's deferred until all in-progress peer
2401 * deletions have completed.
2402 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002403static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002404ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002405 ol_txrx_vdev_delete_cb callback, void *context)
2406{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002407 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
wadesong5e2e8012017-08-21 16:56:03 +08002408 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002409
2410 /* preconditions */
2411 TXRX_ASSERT2(vdev);
wadesong5e2e8012017-08-21 16:56:03 +08002412 pdev = vdev->pdev;
2413
2414 /* prevent anyone from restarting the ll_pause timer again */
2415 qdf_atomic_set(&vdev->delete.detaching, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002416
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302417 ol_txrx_vdev_tx_queue_free(vdev);
2418
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302419 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302420 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002421 vdev->ll_pause.is_q_timer_on = false;
2422 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302423 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07002424
Nirav Shahcbc6d722016-03-01 16:24:53 +05302425 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
2426 qdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
Anurag Chouhandf2b2682016-02-29 14:15:27 +05302427 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302428 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002429 vdev->ll_pause.txq.head = next;
2430 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302431 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
wadesong5e2e8012017-08-21 16:56:03 +08002432
2433 /* ll_pause timer should be deleted without any locks held, and
2434 * no timer function should be executed after this point because
2435 * qdf_timer_free is deleting the timer synchronously.
2436 */
2437 qdf_timer_free(&vdev->ll_pause.timer);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302438 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002439
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302440 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002441 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002442 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002443 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302444 qdf_spin_unlock_bh(&vdev->flow_control_lock);
2445 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002446
2447 /* remove the vdev from its parent pdev's list */
2448 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
2449
2450 /*
2451 * Use peer_ref_mutex while accessing peer_list, in case
2452 * a peer is in the process of being removed from the list.
2453 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302454 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002455 /* check that the vdev has no peers allocated */
2456 if (!TAILQ_EMPTY(&vdev->peer_list)) {
2457 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05302458 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002459 "%s: not deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002460 __func__, vdev,
2461 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2462 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2463 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2464 /* indicate that the vdev needs to be deleted */
2465 vdev->delete.pending = 1;
2466 vdev->delete.callback = callback;
2467 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302468 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002469 return;
2470 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302471 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002472 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002473
Poddar, Siddarth14521792017-03-14 21:19:42 +05302474 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002475 "%s: deleting vdev obj %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002476 __func__, vdev,
2477 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2478 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2479 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2480
2481 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2482
2483 /*
Yun Parkeaea8632017-04-09 09:53:45 -07002484 * The ol_tx_desc_free might access the invalid content of vdev referred
2485 * by tx desc, since this vdev might be detached in another thread
2486 * asynchronous.
2487 *
2488 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2489 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2490 * to avoid crash.
2491 *
2492 */
gbian016a42e2017-03-01 18:49:11 +08002493 ol_txrx_tx_desc_reset_vdev(vdev);
2494
2495 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002496 * Doesn't matter if there are outstanding tx frames -
2497 * they will be freed once the target sends a tx completion
2498 * message for them.
2499 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302500 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002501 if (callback)
2502 callback(context);
2503}
2504
2505/**
2506 * ol_txrx_flush_rx_frames() - flush cached rx frames
2507 * @peer: peer
2508 * @drop: set flag to drop frames
2509 *
2510 * Return: None
2511 */
2512void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302513 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002514{
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002515 struct ol_txrx_cached_bufq_t *bufqi;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002516 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302517 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002518 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002519
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302520 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2521 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002522 return;
2523 }
2524
Dhanashri Atre182b0272016-02-17 15:35:07 -08002525 qdf_assert(peer->vdev);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302526 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002527 bufqi = &peer->bufq_info;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002528
Dhanashri Atre50141c52016-04-07 13:15:29 -07002529 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002530 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002531 else
2532 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302533 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002534
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002535 qdf_spin_lock_bh(&bufqi->bufq_lock);
2536 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002537 typeof(*cache_buf), list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002538 while (!list_empty(&bufqi->cached_bufq)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002539 list_del(&cache_buf->list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002540 bufqi->curr--;
2541 qdf_assert(bufqi->curr >= 0);
2542 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002543 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302544 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002545 } else {
2546 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002547 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302548 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302549 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002550 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302551 qdf_mem_free(cache_buf);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002552 qdf_spin_lock_bh(&bufqi->bufq_lock);
2553 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002554 typeof(*cache_buf), list);
2555 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002556 bufqi->qdepth_no_thresh = bufqi->curr;
2557 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302558 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002559}
2560
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07002561static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302562{
2563 uint8_t sta_id;
2564 struct ol_txrx_peer_t *peer;
2565 struct ol_txrx_pdev_t *pdev;
2566
2567 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2568 if (!pdev)
2569 return;
2570
2571 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002572 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2573 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302574 if (!peer)
2575 continue;
2576 ol_txrx_flush_rx_frames(peer, 1);
2577 }
2578}
2579
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302580/* Define short name to use in cds_trigger_recovery */
2581#define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2582
Dhanashri Atre12a08392016-02-17 13:10:34 -08002583/**
Naveen Rawat17c42a82018-02-01 19:18:27 -08002584 * ol_txrx_dump_peer_access_list() - dump peer access list
2585 * @peer: peer handle
2586 *
2587 * This function will dump if any peer debug ids are still accessing peer
2588 *
2589 * Return: None
2590 */
2591static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2592{
2593 u32 i;
2594 u32 pending_ref;
2595
2596 for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2597 pending_ref = qdf_atomic_read(&peer->access_list[i]);
2598 if (pending_ref)
2599 ol_txrx_info_high("id %d pending refs %d",
2600 i, pending_ref);
2601 }
2602}
2603
2604/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08002605 * ol_txrx_peer_attach - Allocate and set up references for a
2606 * data peer object.
2607 * @data_pdev: data physical device object that will indirectly
2608 * own the data_peer object
2609 * @data_vdev - data virtual device object that will directly
2610 * own the data_peer object
2611 * @peer_mac_addr - MAC address of the new peer
2612 *
2613 * When an association with a peer starts, the host's control SW
2614 * uses this function to inform the host data SW.
2615 * The host data SW allocates its own peer object, and stores a
2616 * reference to the control peer object within the data peer object.
2617 * The host data SW also stores a reference to the virtual device
2618 * that the peer is associated with. This virtual device handle is
2619 * used when the data SW delivers rx data frames to the OS shim layer.
2620 * The host data SW returns a handle to the new peer data object,
2621 * so a reference within the control peer object can be set to the
2622 * data peer object.
2623 *
2624 * Return: handle to new data peer object, or NULL if the attach
2625 * fails
2626 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002627static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002628ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002629{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002630 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002631 struct ol_txrx_peer_t *peer;
2632 struct ol_txrx_peer_t *temp_peer;
2633 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002634 bool wait_on_deletion = false;
2635 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002636 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302637 bool cmp_wait_mac = false;
2638 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002639
2640 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002641 TXRX_ASSERT2(vdev);
2642 TXRX_ASSERT2(peer_mac_addr);
2643
Dhanashri Atre12a08392016-02-17 13:10:34 -08002644 pdev = vdev->pdev;
2645 TXRX_ASSERT2(pdev);
2646
Abhishek Singh217d9782017-04-28 23:49:11 +05302647 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2648 QDF_MAC_ADDR_SIZE))
2649 cmp_wait_mac = true;
2650
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302651 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002652 /* check for duplicate existing peer */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002653 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2654 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2655 (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302656 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002657 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exists.\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002658 vdev->vdev_id,
2659 peer_mac_addr[0], peer_mac_addr[1],
2660 peer_mac_addr[2], peer_mac_addr[3],
2661 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302662 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002663 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002664 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002665 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302666 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002667 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302668 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002669 return NULL;
2670 }
2671 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302672 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2673 &temp_peer->mac_addr,
2674 &vdev->last_peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302675 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002676 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exists.\n",
Abhishek Singh217d9782017-04-28 23:49:11 +05302677 vdev->vdev_id,
2678 vdev->last_peer_mac_addr.raw[0],
2679 vdev->last_peer_mac_addr.raw[1],
2680 vdev->last_peer_mac_addr.raw[2],
2681 vdev->last_peer_mac_addr.raw[3],
2682 vdev->last_peer_mac_addr.raw[4],
2683 vdev->last_peer_mac_addr.raw[5]);
2684 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2685 vdev->wait_on_peer_id = temp_peer->local_id;
2686 qdf_event_reset(&vdev->wait_delete_comp);
2687 wait_on_deletion = true;
2688 break;
2689 } else {
2690 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2691 ol_txrx_err("peer not found");
2692 return NULL;
2693 }
2694 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002695 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302696 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002697
Abhishek Singh217d9782017-04-28 23:49:11 +05302698 qdf_mem_zero(&vdev->last_peer_mac_addr,
2699 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002700 if (wait_on_deletion) {
2701 /* wait for peer deletion */
Nachiket Kukade0396b732017-11-14 16:35:16 +05302702 rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002703 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002704 if (QDF_STATUS_SUCCESS != rc) {
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002705 ol_txrx_err("error waiting for peer_id(%d) deletion, status %d\n",
Dustin Brown100201e2017-07-10 11:48:40 -07002706 vdev->wait_on_peer_id, (int) rc);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002707 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08002708 ol_txrx_dump_peer_access_list(temp_peer);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002709 wlan_roam_debug_dump_table();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002710 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Dustin Brown100201e2017-07-10 11:48:40 -07002711
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002712 return NULL;
2713 }
2714 }
2715
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302716 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002717 if (!peer)
2718 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002719
2720 /* store provided params */
2721 peer->vdev = vdev;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302722 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002723 OL_TXRX_MAC_ADDR_LEN);
2724
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302725 ol_txrx_peer_txqs_init(pdev, peer);
2726
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002727 INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302728 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002729 /* add this peer into the vdev's list */
2730 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302731 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002732 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
Frank Liu4362e462018-01-16 11:51:55 +08002733 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2734 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002735 vdev->last_real_peer = peer;
Frank Liu4362e462018-01-16 11:51:55 +08002736 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2737 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002738
2739 peer->rx_opt_proc = pdev->rx_opt_proc;
2740
2741 ol_rx_peer_init(pdev, peer);
2742
2743 /* initialize the peer_id */
2744 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2745 peer->peer_ids[i] = HTT_INVALID_PEER;
2746
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302747 qdf_spinlock_create(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002748 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2749
2750 peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002751
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302752 qdf_atomic_init(&peer->delete_in_progress);
2753 qdf_atomic_init(&peer->flush_in_progress);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302754 qdf_atomic_init(&peer->ref_cnt);
Mohit Khannab7bec722017-11-10 11:43:44 -08002755
2756 for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2757 qdf_atomic_init(&peer->access_list[i]);
2758
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002759 /* keep one reference for attach */
Mohit Khannab7bec722017-11-10 11:43:44 -08002760 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002761
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002762 /* Set a flag to indicate peer create is pending in firmware */
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002763 qdf_atomic_init(&peer->fw_create_pending);
2764 qdf_atomic_set(&peer->fw_create_pending, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002765
2766 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002767 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2768 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002769
2770 ol_txrx_peer_find_hash_add(pdev, peer);
2771
Mohit Khanna47384bc2016-08-15 15:37:05 -07002772 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002773 "vdev %pK created peer %pK ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Mohit Khanna47384bc2016-08-15 15:37:05 -07002774 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002775 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2776 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2777 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2778 /*
2779 * For every peer MAp message search and set if bss_peer
2780 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002781 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2782 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002783 peer->bss_peer = 1;
2784
2785 /*
2786 * The peer starts in the "disc" state while association is in progress.
2787 * Once association completes, the peer will get updated to "auth" state
2788 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2789 * or else to the "conn" state. For non-open mode, the peer will
2790 * progress to "auth" state once the authentication completes.
2791 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002792 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002793 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002794 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002795
2796#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2797 peer->rssi_dbm = HTT_RSSI_INVALID;
2798#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002799 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2800 !pdev->self_peer) {
2801 pdev->self_peer = peer;
2802 /*
2803 * No Tx in monitor mode, otherwise results in target assert.
2804 * Setting disable_intrabss_fwd to true
2805 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002806 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002807 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002808
2809 ol_txrx_local_peer_id_alloc(pdev, peer);
2810
Leo Chang98726762016-10-28 11:07:18 -07002811 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002812}
2813
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302814#undef PEER_DEL_TIMEOUT
2815
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002816/*
2817 * Discarding tx filter - removes all data frames (disconnected state)
2818 */
2819static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2820{
2821 return A_ERROR;
2822}
2823
2824/*
2825 * Non-autentication tx filter - filters out data frames that are not
2826 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2827 * data frames (connected state)
2828 */
2829static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2830{
2831 return
2832 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2833 tx_msdu_info->htt.info.ethertype ==
2834 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2835}
2836
2837/*
2838 * Pass-through tx filter - lets all data frames through (authenticated state)
2839 */
2840static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2841{
2842 return A_OK;
2843}
2844
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002845/**
2846 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2847 * @peer: handle to peer
2848 *
2849 * returns mac addrs for module which do not know peer type
2850 *
2851 * Return: the mac_addr from peer
2852 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002853static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002854ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002855{
Leo Chang98726762016-10-28 11:07:18 -07002856 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002857
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002858 if (!peer)
2859 return NULL;
2860
2861 return peer->mac_addr.raw;
2862}
2863
Abhishek Singhcfb44482017-03-10 12:42:37 +05302864#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002865/**
2866 * ol_txrx_get_pn_info() - Returns pn info from peer
2867 * @peer: handle to peer
2868 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2869 * @last_pn: return last_rmf_pn value from peer.
2870 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2871 *
2872 * Return: NONE
2873 */
2874void
Leo Chang98726762016-10-28 11:07:18 -07002875ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002876 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2877{
Leo Chang98726762016-10-28 11:07:18 -07002878 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002879 *last_pn_valid = &peer->last_rmf_pn_valid;
2880 *last_pn = &peer->last_rmf_pn;
2881 *rmf_pn_replays = &peer->rmf_pn_replays;
2882}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302883#else
2884void
2885ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2886 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2887{
2888}
2889#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002890
2891/**
2892 * ol_txrx_get_opmode() - Return operation mode of vdev
2893 * @vdev: vdev handle
2894 *
2895 * Return: operation mode.
2896 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002897static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002898{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002899 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002900
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002901 return vdev->opmode;
2902}
2903
2904/**
2905 * ol_txrx_get_peer_state() - Return peer state of peer
2906 * @peer: peer handle
2907 *
2908 * Return: return peer state
2909 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002910static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002911{
Leo Chang98726762016-10-28 11:07:18 -07002912 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002913
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002914 return peer->state;
2915}
2916
2917/**
2918 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2919 * @peer: peer handle
2920 *
2921 * Return: vdev handle from peer
2922 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002923static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002924{
Leo Chang98726762016-10-28 11:07:18 -07002925 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002926
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002927 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002928}
2929
2930/**
2931 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2932 * @vdev: vdev handle
2933 *
2934 * Return: vdev mac address
2935 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002936static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002937ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002938{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002939 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002940
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002941 if (!vdev)
2942 return NULL;
2943
2944 return vdev->mac_addr.raw;
2945}
2946
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002947#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002948/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002949 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002950 * vdev
2951 * @vdev: vdev handle
2952 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002953 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002954 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002955struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002956ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2957{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002958 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002959}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002960#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002961
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002962#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002963/**
2964 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2965 * @vdev: vdev handle
2966 *
2967 * Return: Handle to pdev
2968 */
2969ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2970{
2971 return vdev->pdev;
2972}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002973#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002974
2975/**
2976 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2977 * @vdev: vdev handle
2978 *
2979 * Return: Handle to control pdev
2980 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002981static struct cdp_cfg *
2982ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002983{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002984 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002985
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002986 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002987}
2988
2989/**
2990 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2991 * @vdev: vdev handle
2992 *
2993 * Return: Rx Fwd disabled status
2994 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002995static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002996ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002997{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002998 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002999 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
3000 vdev->pdev->ctrl_pdev;
3001 return cfg->rx_fwd_disabled;
3002}
3003
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07003004#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003005/**
3006 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
3007 * @vdev: vdev handle
3008 * @peer_num_delta: peer nums to be adjusted
3009 *
3010 * Return: -1 for failure or total peer nums after adjustment.
3011 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003012static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003013ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003014 int16_t peer_num_delta)
3015{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003016 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003017 int16_t new_peer_num;
3018
3019 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07003020 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003021 return OL_TXRX_INVALID_NUM_PEERS;
3022
3023 vdev->ibss_peer_num = new_peer_num;
3024
3025 return new_peer_num;
3026}
3027
3028/**
3029 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
3030 * beat timer
3031 * @vdev: vdev handle
3032 * @timer_value_sec: new heart beat timer value
3033 *
3034 * Return: Old timer value set in vdev.
3035 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003036static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
3037 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003038{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003039 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003040 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
3041
3042 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
3043
3044 return old_timer_value;
3045}
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07003046#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003047
3048/**
3049 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
3050 * @vdev: vdev handle
3051 * @callback: callback function to remove the peer.
3052 * @callback_context: handle for callback function
3053 * @remove_last_peer: Does it required to last peer.
3054 *
3055 * Return: NONE
3056 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003057static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003058ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003059 ol_txrx_vdev_peer_remove_cb callback,
3060 void *callback_context, bool remove_last_peer)
3061{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003062 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003063 ol_txrx_peer_handle peer, temp;
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003064 int self_removed = 0;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003065 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003066 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003067
3068 temp = NULL;
3069 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
3070 peer_list_elem) {
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05303071 if (qdf_atomic_read(&peer->delete_in_progress))
3072 continue;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003073 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003074 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05303075 callback(callback_context, temp->mac_addr.raw,
Jiachao Wu641760e2018-01-21 12:11:31 +08003076 vdev->vdev_id, temp);
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003077 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003078 }
3079 /* self peer is deleted last */
3080 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003081 self_removed = 1;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003082 break;
Yun Parkeaea8632017-04-09 09:53:45 -07003083 }
3084 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003085 }
3086
Mohit Khanna137b97d2016-04-21 16:11:33 -07003087 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
3088
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07003089 if (self_removed)
3090 ol_txrx_info("%s: self peer removed by caller ",
3091 __func__);
3092
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003093 if (remove_last_peer) {
3094 /* remove IBSS bss peer last */
3095 peer = TAILQ_FIRST(&vdev->peer_list);
3096 callback(callback_context, (uint8_t *) &vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08003097 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003098 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003099}
3100
3101/**
3102 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
3103 * @vdev: vdev handle
3104 * @callback: callback function to remove the peer.
3105 * @callback_context: handle for callback function
3106 *
3107 * Return: NONE
3108 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003109static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003110ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003111 ol_txrx_vdev_peer_remove_cb callback,
3112 void *callback_context)
3113{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003114 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003115 ol_txrx_peer_handle peer = NULL;
Jiachao Wu641760e2018-01-21 12:11:31 +08003116 ol_txrx_peer_handle tmp_peer = NULL;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003117
Jiachao Wu641760e2018-01-21 12:11:31 +08003118 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, peer_list_elem, tmp_peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05303119 ol_txrx_info_high(
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003120 "%s: peer found for vdev id %d. deleting the peer",
3121 __func__, vdev->vdev_id);
3122 callback(callback_context, (uint8_t *)&vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08003123 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003124 }
3125}
3126
3127/**
3128 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
3129 * @vdev: vdev handle
3130 * @ocb_set_chan: OCB channel information to be set in vdev.
3131 *
3132 * Return: NONE
3133 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003134static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003135 struct ol_txrx_ocb_set_chan ocb_set_chan)
3136{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003137 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003138
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003139 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
3140 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
3141}
3142
3143/**
3144 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
3145 * @vdev: vdev handle
3146 *
3147 * Return: handle to struct ol_txrx_ocb_chan_info
3148 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003149static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003150ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003151{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003152 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07003153
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003154 return vdev->ocb_channel_info;
3155}
3156
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003157/**
3158 * @brief specify the peer's authentication state
3159 * @details
3160 * Specify the peer's authentication state (none, connected, authenticated)
3161 * to allow the data SW to determine whether to filter out invalid data frames.
3162 * (In the "connected" state, where security is enabled, but authentication
3163 * has not completed, tx and rx data frames other than EAPOL or WAPI should
3164 * be discarded.)
3165 * This function is only relevant for systems in which the tx and rx filtering
3166 * are done in the host rather than in the target.
3167 *
3168 * @param data_peer - which peer has changed its state
3169 * @param state - the new state of the peer
3170 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003171 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003172 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003173QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003174 uint8_t *peer_mac,
3175 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003176{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003177 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003178 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003179 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003180
Anurag Chouhanc5548422016-02-24 18:33:27 +05303181 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303182 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05303183 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303184 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003185 }
3186
Mohit Khannab7bec722017-11-10 11:43:44 -08003187 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
3188 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003189 if (NULL == peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303190 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303191 "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
3192 __func__,
3193 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
3194 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303195 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003196 }
3197
3198 /* TODO: Should we send WMI command of the connection state? */
3199 /* avoid multiple auth state change. */
3200 if (peer->state == state) {
3201#ifdef TXRX_PRINT_VERBOSE_ENABLE
Poddar, Siddarth14521792017-03-14 21:19:42 +05303202 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003203 "%s: no state change, returns directly\n",
3204 __func__);
3205#endif
Mohit Khannab7bec722017-11-10 11:43:44 -08003206 peer_ref_cnt = ol_txrx_peer_release_ref
3207 (peer,
3208 PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303209 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003210 }
3211
Poddar, Siddarth14521792017-03-14 21:19:42 +05303212 ol_txrx_dbg("%s: change from %d to %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003213 __func__, peer->state, state);
3214
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003215 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003216 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003217 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003218 ? ol_tx_filter_non_auth
3219 : ol_tx_filter_discard);
3220
3221 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003222 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003223 int tid;
3224 /*
3225 * Pause all regular (non-extended) TID tx queues until
3226 * data arrives and ADDBA negotiation has completed.
3227 */
Poddar, Siddarth14521792017-03-14 21:19:42 +05303228 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003229 "%s: pause peer and unpause mgmt/non-qos\n",
3230 __func__);
3231 ol_txrx_peer_pause(peer); /* pause all tx queues */
3232 /* unpause mgmt and non-QoS tx queues */
3233 for (tid = OL_TX_NUM_QOS_TIDS;
3234 tid < OL_TX_NUM_TIDS; tid++)
3235 ol_txrx_peer_tid_unpause(peer, tid);
3236 }
3237 }
Mohit Khannab7bec722017-11-10 11:43:44 -08003238 peer_ref_cnt = ol_txrx_peer_release_ref(peer,
3239 PEER_DEBUG_ID_OL_INTERNAL);
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003240 /*
Mohit Khannab7bec722017-11-10 11:43:44 -08003241 * after ol_txrx_peer_release_ref, peer object cannot be accessed
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003242 * if the return code was 0
3243 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003244 if (peer_ref_cnt > 0)
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003245 /*
3246 * Set the state after the Pause to avoid the race condiction
3247 * with ADDBA check in tx path
3248 */
3249 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303250 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003251}
3252
3253void
3254ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
3255{
3256 peer->keyinstalled = val;
3257}
3258
3259void
3260ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
3261 uint8_t *peer_mac,
3262 union ol_txrx_peer_update_param_t *param,
3263 enum ol_txrx_peer_update_select_t select)
3264{
3265 struct ol_txrx_peer_t *peer;
3266
Mohit Khannab7bec722017-11-10 11:43:44 -08003267 peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
3268 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003269 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303270 ol_txrx_dbg("%s: peer is null",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003271 __func__);
3272 return;
3273 }
3274
3275 switch (select) {
3276 case ol_txrx_peer_update_qos_capable:
3277 {
3278 /* save qos_capable here txrx peer,
3279 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
3280 */
3281 peer->qos_capable = param->qos_capable;
3282 /*
3283 * The following function call assumes that the peer has a
3284 * single ID. This is currently true, and
3285 * is expected to remain true.
3286 */
3287 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
3288 peer->peer_ids[0],
3289 peer->qos_capable);
3290 break;
3291 }
3292 case ol_txrx_peer_update_uapsdMask:
3293 {
3294 peer->uapsd_mask = param->uapsd_mask;
3295 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
3296 peer->peer_ids[0],
3297 peer->uapsd_mask);
3298 break;
3299 }
3300 case ol_txrx_peer_update_peer_security:
3301 {
3302 enum ol_sec_type sec_type = param->sec_type;
3303 enum htt_sec_type peer_sec_type = htt_sec_type_none;
3304
3305 switch (sec_type) {
3306 case ol_sec_type_none:
3307 peer_sec_type = htt_sec_type_none;
3308 break;
3309 case ol_sec_type_wep128:
3310 peer_sec_type = htt_sec_type_wep128;
3311 break;
3312 case ol_sec_type_wep104:
3313 peer_sec_type = htt_sec_type_wep104;
3314 break;
3315 case ol_sec_type_wep40:
3316 peer_sec_type = htt_sec_type_wep40;
3317 break;
3318 case ol_sec_type_tkip:
3319 peer_sec_type = htt_sec_type_tkip;
3320 break;
3321 case ol_sec_type_tkip_nomic:
3322 peer_sec_type = htt_sec_type_tkip_nomic;
3323 break;
3324 case ol_sec_type_aes_ccmp:
3325 peer_sec_type = htt_sec_type_aes_ccmp;
3326 break;
3327 case ol_sec_type_wapi:
3328 peer_sec_type = htt_sec_type_wapi;
3329 break;
3330 default:
3331 peer_sec_type = htt_sec_type_none;
3332 break;
3333 }
3334
3335 peer->security[txrx_sec_ucast].sec_type =
3336 peer->security[txrx_sec_mcast].sec_type =
3337 peer_sec_type;
3338
3339 break;
3340 }
3341 default:
3342 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303343 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003344 "ERROR: unknown param %d in %s", select,
3345 __func__);
3346 break;
3347 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003348 } /* switch */
Mohit Khannab7bec722017-11-10 11:43:44 -08003349 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003350}
3351
3352uint8_t
3353ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3354{
3355
3356 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07003357
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003358 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3359 if (peer)
3360 return peer->uapsd_mask;
3361 return 0;
3362}
3363
3364uint8_t
3365ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3366{
3367
3368 struct ol_txrx_peer_t *peer_t =
3369 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3370 if (peer_t != NULL)
3371 return peer_t->qos_capable;
3372 return 0;
3373}
3374
Mohit Khannab7bec722017-11-10 11:43:44 -08003375/**
Mohit Khannab7bec722017-11-10 11:43:44 -08003376 * ol_txrx_peer_free_tids() - free tids for the peer
3377 * @peer: peer handle
3378 *
3379 * Return: None
3380 */
3381static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
3382{
3383 int i = 0;
3384 /*
3385 * 'array' is allocated in addba handler and is supposed to be
3386 * freed in delba handler. There is the case (for example, in
3387 * SSR) where delba handler is not called. Because array points
3388 * to address of 'base' by default and is reallocated in addba
3389 * handler later, only free the memory when the array does not
3390 * point to base.
3391 */
3392 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
3393 if (peer->tids_rx_reorder[i].array !=
3394 &peer->tids_rx_reorder[i].base) {
3395 ol_txrx_dbg(
3396 "%s, delete reorder arr, tid:%d\n",
3397 __func__, i);
3398 qdf_mem_free(peer->tids_rx_reorder[i].array);
3399 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
3400 (uint8_t)i);
3401 }
3402 }
3403}
3404
3405/**
3406 * ol_txrx_peer_release_ref() - release peer reference
3407 * @peer: peer handle
3408 *
3409 * Release peer reference and delete peer if refcount is 0
3410 *
wadesong9f2b1102017-12-20 22:58:35 +08003411 * Return: Resulting peer ref_cnt after this function is invoked
Mohit Khannab7bec722017-11-10 11:43:44 -08003412 */
3413int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
3414 enum peer_debug_id_type debug_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003415{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003416 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003417 struct ol_txrx_vdev_t *vdev;
3418 struct ol_txrx_pdev_t *pdev;
Jingxiang Ge3badb982018-01-02 17:39:01 +08003419 bool ref_silent = false;
Jingxiang Ge190679b2018-01-30 08:56:19 +08003420 int access_list = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003421
3422 /* preconditions */
3423 TXRX_ASSERT2(peer);
3424
3425 vdev = peer->vdev;
3426 if (NULL == vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303427 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003428 "The vdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003429 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003430 }
3431
3432 pdev = vdev->pdev;
3433 if (NULL == pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303434 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003435 "The pdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003436 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003437 }
3438
Mohit Khannab7bec722017-11-10 11:43:44 -08003439 if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
3440 ol_txrx_err("incorrect debug_id %d ", debug_id);
3441 return -EINVAL;
3442 }
3443
Jingxiang Ge3badb982018-01-02 17:39:01 +08003444 if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
3445 ref_silent = true;
3446
3447 if (!ref_silent)
3448 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3449 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3450 peer, 0,
3451 qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003452
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003453
3454 /*
3455 * Hold the lock all the way from checking if the peer ref count
3456 * is zero until the peer references are removed from the hash
3457 * table and vdev list (if the peer ref count is zero).
3458 * This protects against a new HL tx operation starting to use the
3459 * peer object just after this function concludes it's done being used.
3460 * Furthermore, the lock needs to be held while checking whether the
3461 * vdev's list of peers is empty, to make sure that list is not modified
3462 * concurrently with the empty check.
3463 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303464 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003465
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003466 /*
3467 * Check for the reference count before deleting the peer
3468 * as we noticed that sometimes we are re-entering this
3469 * function again which is leading to dead-lock.
3470 * (A double-free should never happen, so assert if it does.)
3471 */
3472 rc = qdf_atomic_read(&(peer->ref_cnt));
3473
3474 if (rc == 0) {
3475 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3476 ol_txrx_err("The Peer is not present anymore\n");
3477 qdf_assert(0);
3478 return -EACCES;
3479 }
3480 /*
3481 * now decrement rc; this will be the return code.
3482 * 0 : peer deleted
3483 * >0: peer ref removed, but still has other references
3484 * <0: sanity failed - no changes to the state of the peer
3485 */
3486 rc--;
3487
Mohit Khannab7bec722017-11-10 11:43:44 -08003488 if (!qdf_atomic_read(&peer->access_list[debug_id])) {
3489 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3490 ol_txrx_err("peer %p ref was not taken by %d",
3491 peer, debug_id);
3492 ol_txrx_dump_peer_access_list(peer);
3493 QDF_BUG(0);
3494 return -EACCES;
3495 }
Mohit Khannab7bec722017-11-10 11:43:44 -08003496 qdf_atomic_dec(&peer->access_list[debug_id]);
3497
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07003498 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Mohit Khannab7bec722017-11-10 11:43:44 -08003499 u16 peer_id;
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003500 wlan_roam_debug_log(vdev->vdev_id,
3501 DEBUG_DELETING_PEER_OBJ,
3502 DEBUG_INVALID_PEER_ID,
3503 &peer->mac_addr.raw, peer, 0,
3504 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003505 peer_id = peer->local_id;
3506 /* remove the reference to the peer from the hash table */
3507 ol_txrx_peer_find_hash_remove(pdev, peer);
3508
3509 /* remove the peer from its parent vdev's list */
3510 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3511
3512 /* cleanup the Rx reorder queues for this peer */
3513 ol_rx_peer_cleanup(vdev, peer);
3514
Jingxiang Ge3badb982018-01-02 17:39:01 +08003515 qdf_spinlock_destroy(&peer->peer_info_lock);
3516 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
3517
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003518 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303519 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003520
3521 /*
3522 * Set wait_delete_comp event if the current peer id matches
3523 * with registered peer id.
3524 */
3525 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303526 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003527 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3528 }
3529
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003530 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3531 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003532
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003533 /* check whether the parent vdev has no peers left */
3534 if (TAILQ_EMPTY(&vdev->peer_list)) {
3535 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003536 * Check if the parent vdev was waiting for its peers
3537 * to be deleted, in order for it to be deleted too.
3538 */
3539 if (vdev->delete.pending) {
3540 ol_txrx_vdev_delete_cb vdev_delete_cb =
3541 vdev->delete.callback;
3542 void *vdev_delete_context =
3543 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303544 /*
3545 * Now that there are no references to the peer,
3546 * we can release the peer reference lock.
3547 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303548 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303549
gbian016a42e2017-03-01 18:49:11 +08003550 /*
Yun Parkeaea8632017-04-09 09:53:45 -07003551 * The ol_tx_desc_free might access the invalid
3552 * content of vdev referred by tx desc, since
3553 * this vdev might be detached in another thread
3554 * asynchronous.
3555 *
3556 * Go through tx desc pool to set corresponding
3557 * tx desc's vdev to NULL when detach this vdev,
3558 * and add vdev checking in the ol_tx_desc_free
3559 * to avoid crash.
3560 */
gbian016a42e2017-03-01 18:49:11 +08003561 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05303562 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003563 "%s: deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
Yun Parkeaea8632017-04-09 09:53:45 -07003564 __func__, vdev,
3565 vdev->mac_addr.raw[0],
3566 vdev->mac_addr.raw[1],
3567 vdev->mac_addr.raw[2],
3568 vdev->mac_addr.raw[3],
3569 vdev->mac_addr.raw[4],
3570 vdev->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003571 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303572 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003573 if (vdev_delete_cb)
3574 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303575 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303576 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003577 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303578 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303579 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303580 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003581
Mohit Khannab7bec722017-11-10 11:43:44 -08003582 ol_txrx_info_high("[%d][%d]: Deleting peer %p ref_cnt -> %d %s",
3583 debug_id,
3584 qdf_atomic_read(&peer->access_list[debug_id]),
3585 peer, rc,
3586 qdf_atomic_read(&peer->fw_create_pending)
3587 == 1 ?
3588 "(No Maps received)" : "");
Mohit Khanna8ee37c62017-08-07 17:15:20 -07003589
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303590 ol_txrx_peer_tx_queue_free(pdev, peer);
3591
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -07003592 /* Remove mappings from peer_id to peer object */
3593 ol_txrx_peer_clear_map_peer(pdev, peer);
3594
wadesong9f2b1102017-12-20 22:58:35 +08003595 /* Remove peer pointer from local peer ID map */
3596 ol_txrx_local_peer_id_free(pdev, peer);
3597
Mohit Khannab7bec722017-11-10 11:43:44 -08003598 ol_txrx_peer_free_tids(peer);
3599
3600 ol_txrx_dump_peer_access_list(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003601
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303602 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003603 } else {
Jingxiang Ge190679b2018-01-30 08:56:19 +08003604 access_list = qdf_atomic_read(
3605 &peer->access_list[debug_id]);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303606 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Jingxiang Ge3badb982018-01-02 17:39:01 +08003607 if (!ref_silent)
3608 ol_txrx_info_high("[%d][%d]: ref delete peer %p ref_cnt -> %d",
3609 debug_id,
Jingxiang Ge190679b2018-01-30 08:56:19 +08003610 access_list,
Jingxiang Ge3badb982018-01-02 17:39:01 +08003611 peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003612 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003613 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003614}
3615
Dhanashri Atre12a08392016-02-17 13:10:34 -08003616/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003617 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3618 * @peer: pointer to ol txrx peer structure
3619 *
3620 * Return: QDF Status
3621 */
3622static QDF_STATUS
3623ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3624{
3625 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3626 /* Drop pending Rx frames in CDS */
3627 if (sched_ctx)
3628 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3629
3630 /* Purge the cached rx frame queue */
3631 ol_txrx_flush_rx_frames(peer, 1);
3632
3633 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003634 peer->state = OL_TXRX_PEER_STATE_DISC;
3635 qdf_spin_unlock_bh(&peer->peer_info_lock);
3636
3637 return QDF_STATUS_SUCCESS;
3638}
3639
3640/**
3641 * ol_txrx_clear_peer() - clear peer
3642 * @sta_id: sta id
3643 *
3644 * Return: QDF Status
3645 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003646static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003647{
3648 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003649 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003650
3651 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303652 ol_txrx_err("%s: Unable to find pdev!",
Mohit Khanna0696eef2016-04-14 16:14:08 -07003653 __func__);
3654 return QDF_STATUS_E_FAILURE;
3655 }
3656
3657 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303658 ol_txrx_err("Invalid sta id %d", sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003659 return QDF_STATUS_E_INVAL;
3660 }
3661
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003662 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Kabilan Kannanfa163982018-01-30 12:03:41 -08003663
3664 /* Return success, if the peer is already cleared by
3665 * data path via peer detach function.
3666 */
Mohit Khanna0696eef2016-04-14 16:14:08 -07003667 if (!peer)
Kabilan Kannanfa163982018-01-30 12:03:41 -08003668 return QDF_STATUS_SUCCESS;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003669
3670 return ol_txrx_clear_peer_internal(peer);
3671
3672}
3673
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003674void peer_unmap_timer_work_function(void *param)
3675{
3676 WMA_LOGE("Enter: %s", __func__);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003677 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08003678 ol_txrx_dump_peer_access_list(param);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003679 wlan_roam_debug_dump_table();
Anurag Chouhan4085ff72017-10-05 18:09:56 +05303680 cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003681}
3682
Mohit Khanna0696eef2016-04-14 16:14:08 -07003683/**
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003684 * peer_unmap_timer_handler() - peer unmap timer function
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003685 * @data: peer object pointer
3686 *
3687 * Return: none
3688 */
3689void peer_unmap_timer_handler(void *data)
3690{
3691 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003692 ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003693
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003694 ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003695 peer, qdf_atomic_read(&peer->ref_cnt));
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003696 ol_txrx_err("peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003697 peer,
3698 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3699 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3700 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Nachiket Kukadea48fd772017-07-28 18:48:57 +05303701 if (!cds_is_driver_recovering() && !cds_is_fw_down()) {
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003702 qdf_create_work(0, &txrx_pdev->peer_unmap_timer_work,
3703 peer_unmap_timer_work_function,
Naveen Rawat17c42a82018-02-01 19:18:27 -08003704 peer);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003705 qdf_sched_work(0, &txrx_pdev->peer_unmap_timer_work);
Deepak Dhamdhered42ab7c2017-04-13 19:32:16 -07003706 } else {
3707 ol_txrx_err("Recovery is in progress, ignore!");
3708 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003709}
3710
3711
3712/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003713 * ol_txrx_peer_detach() - Delete a peer's data object.
3714 * @peer - the object to detach
Naveen Rawatf4ada152017-09-05 14:56:12 -07003715 * @bitmap - bitmap indicating special handling of request.
Dhanashri Atre12a08392016-02-17 13:10:34 -08003716 *
3717 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003718 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003719 * stored in the control peer object to the data peer
3720 * object (set up by a call to ol_peer_store()) is provided.
3721 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003722 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003723 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003724static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003725{
Leo Chang98726762016-10-28 11:07:18 -07003726 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003727 struct ol_txrx_vdev_t *vdev = peer->vdev;
3728
3729 /* redirect peer's rx delivery function to point to a discard func */
3730 peer->rx_opt_proc = ol_rx_discard;
3731
3732 peer->valid = 0;
3733
Mohit Khanna0696eef2016-04-14 16:14:08 -07003734 /* flush all rx packets before clearing up the peer local_id */
3735 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003736
3737 /* debug print to dump rx reorder state */
3738 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3739
Poddar, Siddarth14521792017-03-14 21:19:42 +05303740 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003741 "%s:peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003742 __func__, peer,
3743 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3744 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3745 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003746
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303747 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003748 if (vdev->last_real_peer == peer)
3749 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303750 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003751 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3752
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003753 /*
3754 * set delete_in_progress to identify that wma
3755 * is waiting for unmap massage for this peer
3756 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303757 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003758
Lin Bai973e6922018-01-08 17:59:19 +08003759 if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
Naveen Rawatf4ada152017-09-05 14:56:12 -07003760 if (vdev->opmode == wlan_op_mode_sta) {
3761 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3762 &peer->mac_addr,
3763 sizeof(union ol_txrx_align_mac_addr_t));
Abhishek Singh217d9782017-04-28 23:49:11 +05303764
Lin Bai973e6922018-01-08 17:59:19 +08003765 /*
3766 * Create a timer to track unmap events when the
3767 * sta peer gets deleted.
3768 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003769 qdf_timer_start(&peer->peer_unmap_timer,
3770 OL_TXRX_PEER_UNMAP_TIMEOUT);
Mohit Khannab7bec722017-11-10 11:43:44 -08003771 ol_txrx_info_high
3772 ("started peer_unmap_timer for peer %pK",
3773 peer);
Naveen Rawatf4ada152017-09-05 14:56:12 -07003774 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003775 }
3776
3777 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003778 * Remove the reference added during peer_attach.
3779 * The peer will still be left allocated until the
3780 * PEER_UNMAP message arrives to remove the other
3781 * reference, added by the PEER_MAP message.
3782 */
Mohit Khannab7bec722017-11-10 11:43:44 -08003783 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003784}
3785
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003786/**
3787 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
Lin Bai973e6922018-01-08 17:59:19 +08003788 * @ppeer - the object to detach
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003789 *
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003790 * Detach a peer and force peer object to be removed. It is called during
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003791 * roaming scenario when the firmware has already deleted a peer.
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003792 * Remove it from the peer_id_to_object map. Peer object is actually freed
3793 * when last reference is deleted.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003794 *
3795 * Return: None
3796 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003797static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003798{
Leo Chang98726762016-10-28 11:07:18 -07003799 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003800 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3801
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003802 ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d",
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003803 __func__, peer, qdf_atomic_read(&peer->ref_cnt));
3804
3805 /* Clear the peer_id_to_obj map entries */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003806 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
Lin Bai973e6922018-01-08 17:59:19 +08003807 ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003808}
3809
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003810/**
3811 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3812 * @txrx_pdev: Pointer to txrx pdev
3813 *
3814 * Return: none
3815 */
3816static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3817{
3818 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003819 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003820
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303821 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3822 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3823 else
3824 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003825
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003826 num_free = ol_tx_get_total_free_desc(pdev);
3827
Kapil Gupta53d9b572017-06-28 17:53:25 +05303828 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303829 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003830 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003831
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003832}
3833
3834/**
3835 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3836 * @timeout: timeout in ms
3837 *
3838 * Wait for tx queue to be empty, return timeout error if
3839 * queue doesn't empty before timeout occurs.
3840 *
3841 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303842 * QDF_STATUS_SUCCESS if the queue empties,
3843 * QDF_STATUS_E_TIMEOUT in case of timeout,
3844 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003845 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003846static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003847{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003848 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003849
3850 if (txrx_pdev == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303851 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003852 "%s: txrx context is null", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303853 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003854 }
3855
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003856 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303857 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003858 if (timeout <= 0) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303859 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303860 "%s: tx frames are pending", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003861 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303862 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003863 }
3864 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3865 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303866 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003867}
3868
3869#ifndef QCA_WIFI_3_0_EMU
Himanshu Agarwal83a87572017-05-25 14:09:50 +05303870#define SUSPEND_DRAIN_WAIT 500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003871#else
3872#define SUSPEND_DRAIN_WAIT 3000
3873#endif
3874
Yue Ma1e11d792016-02-26 18:58:44 -08003875#ifdef FEATURE_RUNTIME_PM
3876/**
3877 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3878 * @txrx_pdev: TXRX pdev context
3879 *
3880 * TXRX is ready to runtime suspend if there are no pending packets
3881 * in the tx queue.
3882 *
3883 * Return: QDF_STATUS
3884 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003885static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003886{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003887 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003888
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003889 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003890 return QDF_STATUS_E_BUSY;
3891 else
3892 return QDF_STATUS_SUCCESS;
3893}
3894
3895/**
3896 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3897 * @txrx_pdev: TXRX pdev context
3898 *
3899 * This is a dummy function for symmetry.
3900 *
3901 * Return: QDF_STATUS_SUCCESS
3902 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003903static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003904{
3905 return QDF_STATUS_SUCCESS;
3906}
3907#endif
3908
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003909/**
3910 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003911 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003912 *
3913 * Ensure that ol_txrx is ready for bus suspend
3914 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303915 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003916 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003917static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003918{
3919 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3920}
3921
3922/**
3923 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003924 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003925 *
3926 * Dummy function for symetry
3927 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303928 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003929 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003930static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003931{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303932 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003933}
3934
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003935/**
3936 * ol_txrx_get_tx_pending - Get the number of pending transmit
3937 * frames that are awaiting completion.
3938 *
3939 * @pdev - the data physical device object
3940 * Mainly used in clean up path to make sure all buffers have been freed
3941 *
3942 * Return: count of pending frames
3943 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003944int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003945{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003946 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003947 uint32_t total;
3948
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303949 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3950 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3951 else
3952 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003953
Nirav Shah55b45a02016-01-21 10:00:16 +05303954 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003955}
3956
3957void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3958{
3959 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07003960 /*
3961 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303962 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07003963 * which is tha same with normal data send complete path
3964 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003965 htt_tx_pending_discard(pdev_handle->htt_pdev);
3966
3967 TAILQ_INIT(&tx_descs);
3968 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3969 /* Discard Frames in Discard List */
3970 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3971
3972 ol_tx_discard_target_frms(pdev_handle);
3973}
3974
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003975static inline
3976uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3977{
3978 return (uint64_t) ((size_t) req);
3979}
3980
3981static inline
3982struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3983{
3984 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3985}
3986
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003987#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003988void
3989ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3990 uint8_t cfg_stats_type, uint32_t cfg_val)
3991{
3992 uint64_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07003993
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003994 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3995 0 /* reset mask */,
3996 cfg_stats_type, cfg_val, dummy_cookie);
3997}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003998#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003999
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004000static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004001ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07004002 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004003{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004004 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004005 struct ol_txrx_pdev_t *pdev = vdev->pdev;
4006 uint64_t cookie;
4007 struct ol_txrx_stats_req_internal *non_volatile_req;
4008
4009 if (!pdev ||
4010 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
4011 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
4012 return A_ERROR;
4013 }
4014
4015 /*
4016 * Allocate a non-transient stats request object.
4017 * (The one provided as an argument is likely allocated on the stack.)
4018 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304019 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004020 if (!non_volatile_req)
4021 return A_NO_MEMORY;
4022
4023 /* copy the caller's specifications */
4024 non_volatile_req->base = *req;
4025 non_volatile_req->serviced = 0;
4026 non_volatile_req->offset = 0;
4027
4028 /* use the non-volatile request object's address as the cookie */
4029 cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
4030
tfyu9fcabd72017-09-26 17:46:48 +08004031 if (response_expected) {
4032 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4033 TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
4034 pdev->req_list_depth++;
4035 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4036 }
4037
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004038 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
4039 req->stats_type_upload_mask,
4040 req->stats_type_reset_mask,
4041 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
4042 cookie)) {
tfyu9fcabd72017-09-26 17:46:48 +08004043 if (response_expected) {
4044 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4045 TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem);
4046 pdev->req_list_depth--;
4047 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4048 }
4049
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304050 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004051 return A_ERROR;
4052 }
4053
Nirav Shahd2310422016-01-21 18:58:06 +05304054 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304055 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05304056
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004057 return A_OK;
4058}
Dhanashri Atre12a08392016-02-17 13:10:34 -08004059
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004060void
4061ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
4062 uint64_t cookie, uint8_t *stats_info_list)
4063{
4064 enum htt_dbg_stats_type type;
4065 enum htt_dbg_stats_status status;
4066 int length;
4067 uint8_t *stats_data;
tfyu9fcabd72017-09-26 17:46:48 +08004068 struct ol_txrx_stats_req_internal *req, *tmp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004069 int more = 0;
tfyu9fcabd72017-09-26 17:46:48 +08004070 int found = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004071
4072 req = ol_txrx_u64_to_stats_ptr(cookie);
4073
tfyu9fcabd72017-09-26 17:46:48 +08004074 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4075 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4076 if (req == tmp) {
4077 found = 1;
4078 break;
4079 }
4080 }
4081 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4082
4083 if (!found) {
4084 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05304085 "req(%pK) from firmware can't be found in the list\n", req);
tfyu9fcabd72017-09-26 17:46:48 +08004086 return;
4087 }
4088
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004089 do {
4090 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
4091 &length, &stats_data);
4092 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
4093 break;
4094 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
4095 status == HTT_DBG_STATS_STATUS_PARTIAL) {
4096 uint8_t *buf;
4097 int bytes = 0;
4098
4099 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
4100 more = 1;
4101 if (req->base.print.verbose || req->base.print.concise)
4102 /* provide the header along with the data */
4103 htt_t2h_stats_print(stats_info_list,
4104 req->base.print.concise);
4105
4106 switch (type) {
4107 case HTT_DBG_STATS_WAL_PDEV_TXRX:
4108 bytes = sizeof(struct wlan_dbg_stats);
4109 if (req->base.copy.buf) {
4110 int lmt;
4111
4112 lmt = sizeof(struct wlan_dbg_stats);
4113 if (req->base.copy.byte_limit < lmt)
4114 lmt = req->base.copy.byte_limit;
4115 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304116 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004117 }
4118 break;
4119 case HTT_DBG_STATS_RX_REORDER:
4120 bytes = sizeof(struct rx_reorder_stats);
4121 if (req->base.copy.buf) {
4122 int lmt;
4123
4124 lmt = sizeof(struct rx_reorder_stats);
4125 if (req->base.copy.byte_limit < lmt)
4126 lmt = req->base.copy.byte_limit;
4127 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304128 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004129 }
4130 break;
4131 case HTT_DBG_STATS_RX_RATE_INFO:
4132 bytes = sizeof(wlan_dbg_rx_rate_info_t);
4133 if (req->base.copy.buf) {
4134 int lmt;
4135
4136 lmt = sizeof(wlan_dbg_rx_rate_info_t);
4137 if (req->base.copy.byte_limit < lmt)
4138 lmt = req->base.copy.byte_limit;
4139 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304140 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004141 }
4142 break;
4143
4144 case HTT_DBG_STATS_TX_RATE_INFO:
4145 bytes = sizeof(wlan_dbg_tx_rate_info_t);
4146 if (req->base.copy.buf) {
4147 int lmt;
4148
4149 lmt = sizeof(wlan_dbg_tx_rate_info_t);
4150 if (req->base.copy.byte_limit < lmt)
4151 lmt = req->base.copy.byte_limit;
4152 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304153 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004154 }
4155 break;
4156
4157 case HTT_DBG_STATS_TX_PPDU_LOG:
4158 bytes = 0;
4159 /* TO DO: specify how many bytes are present */
4160 /* TO DO: add copying to the requestor's buf */
4161
4162 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07004163 bytes = sizeof(struct
4164 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004165 if (req->base.copy.buf) {
4166 int limit;
4167
Yun Parkeaea8632017-04-09 09:53:45 -07004168 limit = sizeof(struct
4169 rx_remote_buffer_mgmt_stats);
4170 if (req->base.copy.byte_limit < limit)
4171 limit = req->base.copy.
4172 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004173 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304174 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004175 }
4176 break;
4177
4178 case HTT_DBG_STATS_TXBF_INFO:
4179 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
4180 if (req->base.copy.buf) {
4181 int limit;
4182
Yun Parkeaea8632017-04-09 09:53:45 -07004183 limit = sizeof(struct
4184 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004185 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004186 limit = req->base.copy.
4187 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004188 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304189 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004190 }
4191 break;
4192
4193 case HTT_DBG_STATS_SND_INFO:
4194 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
4195 if (req->base.copy.buf) {
4196 int limit;
4197
Yun Parkeaea8632017-04-09 09:53:45 -07004198 limit = sizeof(struct
4199 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004200 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004201 limit = req->base.copy.
4202 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004203 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304204 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004205 }
4206 break;
4207
4208 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07004209 bytes = sizeof(struct
4210 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004211 if (req->base.copy.buf) {
4212 int limit;
4213
Yun Parkeaea8632017-04-09 09:53:45 -07004214 limit = sizeof(struct
4215 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004216 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004217 limit = req->base.copy.
4218 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004219 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304220 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004221 }
4222 break;
4223
4224 case HTT_DBG_STATS_ERROR_INFO:
4225 bytes =
4226 sizeof(struct wlan_dbg_wifi2_error_stats);
4227 if (req->base.copy.buf) {
4228 int limit;
4229
Yun Parkeaea8632017-04-09 09:53:45 -07004230 limit = sizeof(struct
4231 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004232 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004233 limit = req->base.copy.
4234 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004235 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304236 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004237 }
4238 break;
4239
4240 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
4241 bytes =
4242 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
4243 if (req->base.copy.buf) {
4244 int limit;
4245
4246 limit = sizeof(struct
4247 rx_txbf_musu_ndpa_pkts_stats);
4248 if (req->base.copy.byte_limit < limit)
4249 limit =
4250 req->base.copy.byte_limit;
4251 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304252 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004253 }
4254 break;
4255
4256 default:
4257 break;
4258 }
Yun Parkeaea8632017-04-09 09:53:45 -07004259 buf = req->base.copy.buf ?
4260 req->base.copy.buf : stats_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004261 if (req->base.callback.fp)
4262 req->base.callback.fp(req->base.callback.ctxt,
4263 type, buf, bytes);
4264 }
4265 stats_info_list += length;
4266 } while (1);
4267
4268 if (!more) {
tfyu9fcabd72017-09-26 17:46:48 +08004269 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4270 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4271 if (req == tmp) {
4272 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
4273 pdev->req_list_depth--;
4274 qdf_mem_free(req);
4275 break;
4276 }
4277 }
4278 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004279 }
4280}
4281
4282#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
4283int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
4284{
4285 if (debug_specs & TXRX_DBG_MASK_OBJS) {
4286#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4287 ol_txrx_pdev_display(vdev->pdev, 0);
4288#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304289 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304290 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004291#endif
4292 }
Yun Parkeaea8632017-04-09 09:53:45 -07004293 if (debug_specs & TXRX_DBG_MASK_STATS)
Mohit Khannaca4173b2017-09-12 21:52:19 -07004294 ol_txrx_stats_display(vdev->pdev,
4295 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004296 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
4297#if defined(ENABLE_TXRX_PROT_ANALYZE)
4298 ol_txrx_prot_ans_display(vdev->pdev);
4299#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304300 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304301 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004302#endif
4303 }
4304 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
4305#if defined(ENABLE_RX_REORDER_TRACE)
4306 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4307#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304308 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304309 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004310#endif
4311
4312 }
4313 return 0;
4314}
4315#endif
4316
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004317#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004318int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4319 int max_subfrms_ampdu, int max_subfrms_amsdu)
4320{
4321 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4322 max_subfrms_ampdu, max_subfrms_amsdu);
4323}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004324#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004325
4326#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4327void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4328{
4329 struct ol_txrx_vdev_t *vdev;
4330
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304331 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004332 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304333 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004334 "%*spdev object: %pK", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304335 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004336 "%*svdev list:", indent + 4, " ");
4337 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304338 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004339 }
4340 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304341 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004342 "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004343 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304344 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004345 htt_display(pdev->htt_pdev, indent);
4346}
4347
4348void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4349{
4350 struct ol_txrx_peer_t *peer;
4351
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304352 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004353 "%*stxrx vdev: %pK\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304354 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004355 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304356 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004357 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4358 indent + 4, " ",
4359 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4360 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4361 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304362 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004363 "%*speer list:", indent + 4, " ");
4364 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304365 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004366 }
4367}
4368
4369void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4370{
4371 int i;
4372
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304373 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004374 "%*stxrx peer: %pK", indent, " ", peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004375 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4376 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304377 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004378 "%*sID: %d", indent + 4, " ",
4379 peer->peer_ids[i]);
4380 }
4381 }
4382}
4383#endif /* TXRX_DEBUG_LEVEL */
4384
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004385/**
4386 * ol_txrx_stats() - update ol layer stats
4387 * @vdev_id: vdev_id
4388 * @buffer: pointer to buffer
4389 * @buf_len: length of the buffer
4390 *
4391 * Return: length of string
4392 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004393static int
Yun Parkeaea8632017-04-09 09:53:45 -07004394ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004395{
4396 uint32_t len = 0;
4397
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004398 struct ol_txrx_vdev_t *vdev =
4399 (struct ol_txrx_vdev_t *)
4400 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004401
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004402 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304403 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304404 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004405 snprintf(buffer, buf_len, "vdev not found");
4406 return len;
4407 }
4408
4409 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004410 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304411 ((vdev->ll_pause.is_q_paused == false) ?
4412 "UNPAUSED" : "PAUSED"),
4413 vdev->ll_pause.q_pause_cnt,
4414 vdev->ll_pause.q_unpause_cnt,
4415 vdev->ll_pause.q_overflow_cnt,
4416 ((vdev->ll_pause.is_q_timer_on == false)
4417 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004418 return len;
4419}
4420
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004421#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4422/**
4423 * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4424 * @peer: peer pointer
4425 *
4426 * Return: None
4427 */
4428static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4429{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004430 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4431 "cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4432 peer->bufq_info.curr,
4433 peer->bufq_info.dropped,
4434 peer->bufq_info.high_water_mark,
4435 peer->bufq_info.qdepth_no_thresh,
4436 peer->bufq_info.thresh);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004437}
4438
4439/**
4440 * ol_txrx_disp_peer_stats() - display peer stats
4441 * @pdev: pdev pointer
4442 *
4443 * Return: None
4444 */
4445static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4446{ int i;
4447 struct ol_txrx_peer_t *peer;
4448 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
4449
4450 if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4451 return;
4452
4453 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4454 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4455 peer = pdev->local_peer_ids.map[i];
Frank Liu4362e462018-01-16 11:51:55 +08004456 if (peer) {
4457 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khannab7bec722017-11-10 11:43:44 -08004458 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +08004459 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
4460 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004461 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4462
4463 if (peer) {
4464 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004465 "stats: peer 0x%pK local peer id %d", peer, i);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004466 ol_txrx_disp_peer_cached_bufq_stats(peer);
Mohit Khannab7bec722017-11-10 11:43:44 -08004467 ol_txrx_peer_release_ref(peer,
4468 PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004469 }
4470 }
4471}
4472#else
4473static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4474{
4475 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004476 "peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004477}
4478#endif
4479
Mohit Khannaca4173b2017-09-12 21:52:19 -07004480void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4481 enum qdf_stats_verbosity_level level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004482{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004483 u64 tx_dropped =
4484 pdev->stats.pub.tx.dropped.download_fail.pkts
4485 + pdev->stats.pub.tx.dropped.target_discard.pkts
4486 + pdev->stats.pub.tx.dropped.no_ack.pkts
4487 + pdev->stats.pub.tx.dropped.others.pkts;
4488
4489 if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
4490 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4491 "STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
4492 pdev->tx_desc.num_free,
4493 pdev->tx_desc.pool_size,
4494 pdev->stats.pub.tx.from_stack.pkts,
4495 pdev->stats.pub.tx.tso.tso_pkts.pkts,
4496 pdev->stats.pub.tx.delivered.pkts,
4497 htt_tx_status_download_fail,
4498 pdev->stats.pub.tx.dropped.download_fail.pkts,
4499 htt_tx_status_discard,
4500 pdev->stats.pub.tx.dropped.target_discard.pkts,
4501 htt_tx_status_no_ack,
4502 pdev->stats.pub.tx.dropped.no_ack.pkts,
4503 pdev->stats.pub.tx.dropped.others.pkts,
4504 pdev->stats.pub.tx.dropped.host_reject.pkts,
4505 pdev->stats.pub.rx.delivered.pkts,
4506 pdev->stats.pub.rx.dropped_err.pkts,
4507 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4508 pdev->stats.pub.rx.dropped_mic_err.pkts,
4509 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4510 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4511 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
4512 return;
4513 }
4514
4515 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304516 "TX PATH Statistics:");
Mohit Khannaca4173b2017-09-12 21:52:19 -07004517 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304518 "sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4519 pdev->stats.pub.tx.from_stack.pkts,
4520 pdev->stats.pub.tx.from_stack.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004521 pdev->stats.pub.tx.dropped.host_reject.pkts,
4522 pdev->stats.pub.tx.dropped.host_reject.bytes,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004523 tx_dropped,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004524 pdev->stats.pub.tx.dropped.download_fail.bytes
4525 + pdev->stats.pub.tx.dropped.target_discard.bytes
4526 + pdev->stats.pub.tx.dropped.no_ack.bytes);
Mohit Khannaca4173b2017-09-12 21:52:19 -07004527 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4528 "successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B) others: %lld (%lld B)",
Nirav Shahda008342016-05-17 18:50:40 +05304529 pdev->stats.pub.tx.delivered.pkts,
4530 pdev->stats.pub.tx.delivered.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004531 pdev->stats.pub.tx.dropped.download_fail.pkts,
4532 pdev->stats.pub.tx.dropped.download_fail.bytes,
4533 pdev->stats.pub.tx.dropped.target_discard.pkts,
4534 pdev->stats.pub.tx.dropped.target_discard.bytes,
4535 pdev->stats.pub.tx.dropped.no_ack.pkts,
Mohit Khannaca4173b2017-09-12 21:52:19 -07004536 pdev->stats.pub.tx.dropped.no_ack.bytes,
4537 pdev->stats.pub.tx.dropped.others.pkts,
4538 pdev->stats.pub.tx.dropped.others.bytes);
4539 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304540 "Tx completions per HTT message:\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004541 "Single Packet %d\n"
4542 " 2-10 Packets %d\n"
4543 "11-20 Packets %d\n"
4544 "21-30 Packets %d\n"
4545 "31-40 Packets %d\n"
4546 "41-50 Packets %d\n"
4547 "51-60 Packets %d\n"
4548 " 60+ Packets %d\n",
4549 pdev->stats.pub.tx.comp_histogram.pkts_1,
4550 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4551 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4552 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4553 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4554 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4555 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4556 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304557
Mohit Khannaca4173b2017-09-12 21:52:19 -07004558 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304559 "RX PATH Statistics:");
Mohit Khannaca4173b2017-09-12 21:52:19 -07004560 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304561 "%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
Nirav Shahda008342016-05-17 18:50:40 +05304562 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4563 "msdus with frag_ind: %d msdus with offload_ind: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004564 pdev->stats.priv.rx.normal.ppdus,
4565 pdev->stats.priv.rx.normal.mpdus,
4566 pdev->stats.pub.rx.delivered.pkts,
4567 pdev->stats.pub.rx.delivered.bytes,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304568 pdev->stats.pub.rx.dropped_err.pkts,
4569 pdev->stats.pub.rx.dropped_err.bytes,
4570 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4571 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4572 pdev->stats.pub.rx.dropped_mic_err.pkts,
Nirav Shahda008342016-05-17 18:50:40 +05304573 pdev->stats.pub.rx.dropped_mic_err.bytes,
4574 pdev->stats.pub.rx.msdus_with_frag_ind,
4575 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004576
Mohit Khannaca4173b2017-09-12 21:52:19 -07004577 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004578 " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4579 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4580 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4581 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304582
Mohit Khannaca4173b2017-09-12 21:52:19 -07004583 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shahda008342016-05-17 18:50:40 +05304584 "Rx packets per HTT message:\n"
Nirav Shah6a4eee62016-04-25 10:15:04 +05304585 "Single Packet %d\n"
4586 " 2-10 Packets %d\n"
4587 "11-20 Packets %d\n"
4588 "21-30 Packets %d\n"
4589 "31-40 Packets %d\n"
4590 "41-50 Packets %d\n"
4591 "51-60 Packets %d\n"
4592 " 60+ Packets %d\n",
4593 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4594 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4595 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4596 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4597 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4598 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4599 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4600 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004601
4602 ol_txrx_disp_peer_stats(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004603}
4604
4605void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4606{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304607 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004608}
4609
4610#if defined(ENABLE_TXRX_PROT_ANALYZE)
4611
4612void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4613{
4614 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4615 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4616}
4617
4618#endif /* ENABLE_TXRX_PROT_ANALYZE */
4619
4620#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4621int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4622{
4623 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4624 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4625}
4626#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4627
4628#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4629A_STATUS
4630ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4631 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4632{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304633 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304634 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304635 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304636 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004637 return A_OK;
4638}
4639#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4640
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004641static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004642{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004643 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004644
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004645 if (NULL == vdev)
4646 return;
4647
4648 vdev->disable_intrabss_fwd = val;
4649}
4650
Nirav Shahc657ef52016-07-26 14:22:38 +05304651/**
4652 * ol_txrx_update_mac_id() - update mac_id for vdev
4653 * @vdev_id: vdev id
4654 * @mac_id: mac id
4655 *
4656 * Return: none
4657 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004658static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304659{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004660 struct ol_txrx_vdev_t *vdev =
4661 (struct ol_txrx_vdev_t *)
4662 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304663
4664 if (NULL == vdev) {
4665 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4666 "%s: Invalid vdev_id %d", __func__, vdev_id);
4667 return;
4668 }
4669 vdev->mac_id = mac_id;
4670}
4671
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004672#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
4673
4674/**
4675 * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
4676 * @sta_id: sta_id
4677 *
4678 * Return: vdev handle
4679 * NULL if not found.
4680 */
4681static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
4682{
4683 struct ol_txrx_peer_t *peer = NULL;
4684 ol_txrx_pdev_handle pdev = NULL;
4685
4686 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304687 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304688 "Invalid sta id passed");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004689 return NULL;
4690 }
4691
Anurag Chouhan6d760662016-02-20 16:05:43 +05304692 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004693 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304694 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304695 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004696 return NULL;
4697 }
4698
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004699 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004700
4701 if (!peer) {
Zhu Jianminf7ffe942017-08-24 10:24:15 +08004702 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304703 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004704 return NULL;
4705 }
4706
4707 return peer->vdev;
4708}
4709
4710/**
4711 * ol_txrx_register_tx_flow_control() - register tx flow control callback
4712 * @vdev_id: vdev_id
4713 * @flowControl: flow control callback
4714 * @osif_fc_ctx: callback context
bings284f8be2017-08-11 10:41:30 +08004715 * @flow_control_is_pause: is vdev paused by flow control
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004716 *
4717 * Return: 0 for sucess or error code
4718 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004719static int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
bings284f8be2017-08-11 10:41:30 +08004720 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
4721 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004722{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004723 struct ol_txrx_vdev_t *vdev =
4724 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004725
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004726 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304727 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304728 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004729 return -EINVAL;
4730 }
4731
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304732 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004733 vdev->osif_flow_control_cb = flowControl;
bings284f8be2017-08-11 10:41:30 +08004734 vdev->osif_flow_control_is_pause = flow_control_is_pause;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004735 vdev->osif_fc_ctx = osif_fc_ctx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304736 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004737 return 0;
4738}
4739
4740/**
Yun Parkeaea8632017-04-09 09:53:45 -07004741 * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
4742 * callback
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004743 * @vdev_id: vdev_id
4744 *
4745 * Return: 0 for success or error code
4746 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004747static int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004748{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004749 struct ol_txrx_vdev_t *vdev =
4750 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004751
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004752 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304753 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304754 "%s: Invalid vdev_id", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004755 return -EINVAL;
4756 }
4757
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304758 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004759 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08004760 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004761 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304762 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004763 return 0;
4764}
4765
4766/**
4767 * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
4768 * @sta_id: sta id
4769 * @low_watermark: low watermark
4770 * @high_watermark_offset: high watermark offset value
4771 *
4772 * Return: true/false
4773 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004774static bool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004775ol_txrx_get_tx_resource(uint8_t sta_id,
4776 unsigned int low_watermark,
4777 unsigned int high_watermark_offset)
4778{
4779 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004780
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004781 if (NULL == vdev) {
Zhu Jianminf7ffe942017-08-24 10:24:15 +08004782 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304783 "%s: Invalid sta_id %d", __func__, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004784 /* Return true so caller do not understand that resource
4785 * is less than low_watermark.
4786 * sta_id validation will be done in ol_tx_send_data_frame
4787 * and if sta_id is not registered then host will drop
4788 * packet.
4789 */
4790 return true;
4791 }
4792
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304793 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304794
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004795 if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
4796 vdev->tx_fl_lwm = (uint16_t) low_watermark;
4797 vdev->tx_fl_hwm =
4798 (uint16_t) (low_watermark + high_watermark_offset);
4799 /* Not enough free resource, stop TX OS Q */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05304800 qdf_atomic_set(&vdev->os_q_paused, 1);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304801 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004802 return false;
4803 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304804 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004805 return true;
4806}
4807
4808/**
4809 * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
4810 * @vdev_id: vdev id
4811 * @pause_q_depth: pause queue depth
4812 *
4813 * Return: 0 for success or error code
4814 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004815static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004816ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
4817{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004818 struct ol_txrx_vdev_t *vdev =
4819 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004820
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004821 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304822 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304823 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004824 return -EINVAL;
4825 }
4826
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304827 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004828 vdev->ll_pause.max_q_depth = pause_q_depth;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304829 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004830
4831 return 0;
4832}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004833#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
4834
Leo Chang8e073612015-11-13 10:55:34 -08004835/**
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004836 * ol_txrx_display_stats() - Display OL TXRX display stats
4837 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304838 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004839 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304840 */
Mohit Khannaca4173b2017-09-12 21:52:19 -07004841static QDF_STATUS
4842ol_txrx_display_stats(void *soc, uint16_t value,
4843 enum qdf_stats_verbosity_level verb_level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004844{
4845 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004846 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004847
Anurag Chouhan6d760662016-02-20 16:05:43 +05304848 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004849 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304850 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304851 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004852 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004853 }
4854
4855 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004856 case CDP_TXRX_PATH_STATS:
Mohit Khannaca4173b2017-09-12 21:52:19 -07004857 ol_txrx_stats_display(pdev, verb_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004858 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004859 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004860 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004861 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004862 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07004863 ol_tx_dump_flow_pool_info((void *)pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004864 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004865 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304866 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004867 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004868 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4869 htt_display_rx_buf_debug(pdev->htt_pdev);
4870 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304871#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004872 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304873 ol_tx_sched_cur_state_display(pdev);
4874 ol_tx_sched_stats_display(pdev);
4875 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004876 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304877 ol_tx_queue_log_display(pdev);
4878 break;
4879#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004880 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304881 ol_tx_dump_group_credit_stats(pdev);
4882 break;
4883#endif
4884
4885#ifdef DEBUG_HL_LOGGING
Nirav Shaheb017be2018-02-15 11:20:58 +05304886 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304887 htt_dump_bundle_stats(pdev->htt_pdev);
4888 break;
4889#endif
4890#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004891 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004892 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004893 break;
4894 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004895 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004896}
4897
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004898/**
4899 * ol_txrx_clear_stats() - Clear OL TXRX stats
4900 * @value: Module id for which stats needs to be cleared
4901 *
4902 * Return: None
4903 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004904static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004905{
4906 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004907 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004908
Anurag Chouhan6d760662016-02-20 16:05:43 +05304909 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004910 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304911 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304912 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004913 return;
4914 }
4915
4916 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004917 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004918 ol_txrx_stats_clear(pdev);
4919 break;
Yun Park1027e8c2017-10-13 15:17:37 -07004920 case CDP_TXRX_TSO_STATS:
4921 ol_txrx_tso_stats_clear(pdev);
4922 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004923 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004924 ol_tx_clear_flow_pool_stats();
4925 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004926 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304927 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004928 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304929#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004930 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304931 ol_tx_sched_stats_clear(pdev);
4932 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004933 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304934 ol_tx_queue_log_clear(pdev);
4935 break;
4936#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004937 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304938 ol_tx_clear_group_credit_stats(pdev);
4939 break;
4940#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004941 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304942 htt_clear_bundle_stats(pdev->htt_pdev);
4943 break;
4944#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004945 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004946 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004947 break;
4948 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004949
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004950}
4951
4952/**
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004953 * ol_txrx_drop_nbuf_list() - drop an nbuf list
4954 * @buf_list: buffer list to be dropepd
4955 *
4956 * Return: int (number of bufs dropped)
4957 */
4958static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
4959{
4960 int num_dropped = 0;
4961 qdf_nbuf_t buf, next_buf;
4962 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4963
4964 buf = buf_list;
4965 while (buf) {
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05304966 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004967 next_buf = qdf_nbuf_queue_next(buf);
4968 if (pdev)
4969 TXRX_STATS_MSDU_INCR(pdev,
4970 rx.dropped_peer_invalid, buf);
4971 qdf_nbuf_free(buf);
4972 buf = next_buf;
4973 num_dropped++;
4974 }
4975 return num_dropped;
4976}
4977
4978/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004979 * ol_rx_data_cb() - data rx callback
4980 * @peer: peer
4981 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304982 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004983 *
4984 * Return: None
4985 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304986static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
4987 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004988{
Mohit Khanna0696eef2016-04-14 16:14:08 -07004989 void *osif_dev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004990 uint8_t drop_count = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304991 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304992 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004993 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304994 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004995
Jeff Johnsondac9e382017-09-24 10:36:08 -07004996 if (qdf_unlikely(!pdev))
Nirav Shah36a87bf2016-02-22 12:38:46 +05304997 goto free_buf;
4998
4999 /* Do not use peer directly. Derive peer from staid to
5000 * make sure that peer is valid.
5001 */
Jingxiang Ge3badb982018-01-02 17:39:01 +08005002 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
5003 staid, PEER_DEBUG_ID_OL_RX_THREAD);
Nirav Shah36a87bf2016-02-22 12:38:46 +05305004 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005005 goto free_buf;
5006
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305007 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07005008 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
5009 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305010 qdf_spin_unlock_bh(&peer->peer_info_lock);
Jingxiang Ge9f297062018-01-24 13:31:31 +08005011 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005012 goto free_buf;
5013 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08005014
5015 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07005016 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305017 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005018
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005019 qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
5020 if (!list_empty(&peer->bufq_info.cached_bufq)) {
5021 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005022 /* Flush the cached frames to HDD before passing new rx frame */
5023 ol_txrx_flush_rx_frames(peer, 0);
5024 } else
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005025 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005026
Jingxiang Ge3badb982018-01-02 17:39:01 +08005027 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
5028
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005029 buf = buf_list;
5030 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05305031 next_buf = qdf_nbuf_queue_next(buf);
5032 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07005033 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305034 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305035 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05305036 if (pdev)
5037 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05305038 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005039 }
5040 buf = next_buf;
5041 }
5042 return;
5043
5044free_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005045 drop_count = ol_txrx_drop_nbuf_list(buf_list);
5046 ol_txrx_warn("%s:Dropped frames %u", __func__, drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005047}
5048
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005049/* print for every 16th packet */
5050#define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
5051struct ol_rx_cached_buf *cache_buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305052
5053/** helper function to drop packets
5054 * Note: caller must hold the cached buq lock before invoking
5055 * this function. Also, it assumes that the pointers passed in
5056 * are valid (non-NULL)
5057 */
5058static inline void ol_txrx_drop_frames(
5059 struct ol_txrx_cached_bufq_t *bufqi,
5060 qdf_nbuf_t rx_buf_list)
5061{
5062 uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005063
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305064 bufqi->dropped += dropped;
5065 bufqi->qdepth_no_thresh += dropped;
5066
5067 if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
5068 bufqi->high_water_mark = bufqi->qdepth_no_thresh;
5069}
5070
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005071static QDF_STATUS ol_txrx_enqueue_rx_frames(
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305072 struct ol_txrx_peer_t *peer,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005073 struct ol_txrx_cached_bufq_t *bufqi,
5074 qdf_nbuf_t rx_buf_list)
5075{
5076 struct ol_rx_cached_buf *cache_buf;
5077 qdf_nbuf_t buf, next_buf;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005078 static uint32_t count;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005079
5080 if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
5081 ol_txrx_info_high(
5082 "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
5083 bufqi->curr, bufqi->dropped);
5084
5085 qdf_spin_lock_bh(&bufqi->bufq_lock);
5086 if (bufqi->curr >= bufqi->thresh) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305087 ol_txrx_drop_frames(bufqi, rx_buf_list);
5088 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5089 return QDF_STATUS_E_FAULT;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005090 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005091 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5092
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005093 buf = rx_buf_list;
5094 while (buf) {
5095 next_buf = qdf_nbuf_queue_next(buf);
5096 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
5097 if (!cache_buf) {
5098 ol_txrx_err(
5099 "Failed to allocate buf to cache the rx frames");
5100 qdf_nbuf_free(buf);
5101 } else {
5102 /* Add NULL terminator */
5103 qdf_nbuf_set_next(buf, NULL);
5104 cache_buf->buf = buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305105 if (peer && peer->valid) {
5106 qdf_spin_lock_bh(&bufqi->bufq_lock);
5107 list_add_tail(&cache_buf->list,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005108 &bufqi->cached_bufq);
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305109 bufqi->curr++;
5110 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5111 } else {
5112 qdf_mem_free(cache_buf);
5113 rx_buf_list = buf;
5114 qdf_nbuf_set_next(rx_buf_list, next_buf);
5115 qdf_spin_lock_bh(&bufqi->bufq_lock);
5116 ol_txrx_drop_frames(bufqi, rx_buf_list);
5117 qdf_spin_unlock_bh(&bufqi->bufq_lock);
5118 return QDF_STATUS_E_FAULT;
5119 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005120 }
5121 buf = next_buf;
5122 }
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305123 return QDF_STATUS_SUCCESS;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005124}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005125/**
5126 * ol_rx_data_process() - process rx frame
5127 * @peer: peer
5128 * @rx_buf_list: rx buffer list
5129 *
5130 * Return: None
5131 */
5132void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05305133 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005134{
Yun Parkeaea8632017-04-09 09:53:45 -07005135 /*
5136 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005137 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07005138 * IPA kernel module API should not be called on SIRQ CTXT
5139 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08005140 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05305141 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005142 uint8_t drop_count;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005143
5144 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305145 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005146 goto drop_rx_buf;
5147 }
5148
Dhanashri Atre182b0272016-02-17 15:35:07 -08005149 qdf_assert(peer->vdev);
5150
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305151 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005152 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08005153 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305154 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005155
5156 /*
5157 * If there is a data frame from peer before the peer is
5158 * registered for data service, enqueue them on to pending queue
5159 * which will be flushed to HDD once that station is registered.
5160 */
5161 if (!data_rx) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05305162 if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
5163 rx_buf_list)
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005164 != QDF_STATUS_SUCCESS)
Poddar, Siddarth07eebf32017-04-19 12:40:26 +05305165 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5166 "%s: failed to enqueue rx frm to cached_bufq",
5167 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005168 } else {
5169#ifdef QCA_CONFIG_SMP
5170 /*
5171 * If the kernel is SMP, schedule rx thread to
5172 * better use multicores.
5173 */
5174 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05305175 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005176 } else {
5177 p_cds_sched_context sched_ctx =
5178 get_cds_sched_ctxt();
5179 struct cds_ol_rx_pkt *pkt;
5180
5181 if (unlikely(!sched_ctx))
5182 goto drop_rx_buf;
5183
5184 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5185 if (!pkt) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05305186 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05305187 "No available Rx message buffer");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005188 goto drop_rx_buf;
5189 }
5190 pkt->callback = (cds_ol_rx_thread_cb)
5191 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05305192 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005193 pkt->Rxpkt = (void *)rx_buf_list;
5194 pkt->staId = peer->local_id;
5195 cds_indicate_rxpkt(sched_ctx, pkt);
5196 }
5197#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05305198 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005199#endif /* QCA_CONFIG_SMP */
5200 }
5201
5202 return;
5203
5204drop_rx_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07005205 drop_count = ol_txrx_drop_nbuf_list(rx_buf_list);
5206 ol_txrx_info_high("Dropped rx packets %u", drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005207}
5208
5209/**
5210 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005211 * @sta_desc: sta descriptor
5212 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05305213 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005214 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005215static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005216{
5217 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05305218 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005219 union ol_txrx_peer_update_param_t param;
5220 struct privacy_exemption privacy_filter;
5221
5222 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305223 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305224 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005225 }
5226
5227 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305228 ol_txrx_err("Invalid sta id :%d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005229 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305230 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005231 }
5232
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005233 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
5234 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005235 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305236 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005237
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305238 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005239 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05305240 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005241
5242 param.qos_capable = sta_desc->is_qos_enabled;
5243 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
5244 ol_txrx_peer_update_qos_capable);
5245
5246 if (sta_desc->is_wapi_supported) {
5247 /*Privacy filter to accept unencrypted WAI frames */
5248 privacy_filter.ether_type = ETHERTYPE_WAI;
5249 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
5250 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
5251 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
5252 }
5253
5254 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305255 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005256}
5257
5258/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005259 * ol_txrx_register_ocb_peer - Function to register the OCB peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005260 * @mac_addr: MAC address of the self peer
5261 * @peer_id: Pointer to the peer ID
5262 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305263 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005264 */
Jeff Johnson382bce02017-09-01 14:21:07 -07005265static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005266 uint8_t *peer_id)
5267{
5268 ol_txrx_pdev_handle pdev;
5269 ol_txrx_peer_handle peer;
5270
Anurag Chouhan6d760662016-02-20 16:05:43 +05305271 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005272 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305273 ol_txrx_err("%s: Unable to find pdev!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005274 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305275 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005276 }
5277
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005278 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
5279 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005280 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305281 ol_txrx_err("%s: Unable to find OCB peer!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005282 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305283 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005284 }
5285
5286 ol_txrx_set_ocb_peer(pdev, peer);
5287
5288 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005289 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005290 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005291
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305292 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005293}
5294
5295/**
5296 * ol_txrx_set_ocb_peer - Function to store the OCB peer
5297 * @pdev: Handle to the HTT instance
5298 * @peer: Pointer to the peer
5299 */
5300void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
5301 struct ol_txrx_peer_t *peer)
5302{
5303 if (pdev == NULL)
5304 return;
5305
5306 pdev->ocb_peer = peer;
5307 pdev->ocb_peer_valid = (NULL != peer);
5308}
5309
5310/**
5311 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
5312 * @pdev: Handle to the HTT instance
5313 * @peer: Pointer to the returned peer
5314 *
5315 * Return: true if the peer is valid, false if not
5316 */
5317bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
5318 struct ol_txrx_peer_t **peer)
5319{
5320 int rc;
5321
5322 if ((pdev == NULL) || (peer == NULL)) {
5323 rc = false;
5324 goto exit;
5325 }
5326
5327 if (pdev->ocb_peer_valid) {
5328 *peer = pdev->ocb_peer;
5329 rc = true;
5330 } else {
5331 rc = false;
5332 }
5333
5334exit:
5335 return rc;
5336}
5337
5338#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5339/**
5340 * ol_txrx_register_pause_cb() - register pause callback
5341 * @pause_cb: pause callback
5342 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05305343 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005344 */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005345static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
5346 tx_pause_callback pause_cb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005347{
Anurag Chouhan6d760662016-02-20 16:05:43 +05305348 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -07005349
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005350 if (!pdev || !pause_cb) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305351 ol_txrx_err("pdev or pause_cb is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305352 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005353 }
5354 pdev->pause_cb = pause_cb;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305355 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005356}
5357#endif
5358
Poddar, Siddarth34872782017-08-10 14:08:51 +05305359/**
5360 * ol_register_data_stall_detect_cb() - register data stall callback
5361 * @data_stall_detect_callback: data stall callback function
5362 *
5363 *
5364 * Return: QDF_STATUS Enumeration
5365 */
5366static QDF_STATUS ol_register_data_stall_detect_cb(
5367 data_stall_detect_cb data_stall_detect_callback)
5368{
5369 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5370
5371 if (pdev == NULL) {
5372 ol_txrx_err("%s: pdev NULL!", __func__);
5373 return QDF_STATUS_E_INVAL;
5374 }
5375 pdev->data_stall_detect_callback = data_stall_detect_callback;
5376 return QDF_STATUS_SUCCESS;
5377}
5378
5379/**
5380 * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5381 * @data_stall_detect_callback: data stall callback function
5382 *
5383 *
5384 * Return: QDF_STATUS Enumeration
5385 */
5386static QDF_STATUS ol_deregister_data_stall_detect_cb(
5387 data_stall_detect_cb data_stall_detect_callback)
5388{
5389 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5390
5391 if (pdev == NULL) {
5392 ol_txrx_err("%s: pdev NULL!", __func__);
5393 return QDF_STATUS_E_INVAL;
5394 }
5395 pdev->data_stall_detect_callback = NULL;
5396 return QDF_STATUS_SUCCESS;
5397}
5398
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305399/**
5400 * ol_txrx_post_data_stall_event() - post data stall event
5401 * @indicator: Module triggering data stall
5402 * @data_stall_type: data stall event type
5403 * @pdev_id: pdev id
5404 * @vdev_id_bitmap: vdev id bitmap
5405 * @recovery_type: data stall recovery type
5406 *
5407 * Return: None
5408 */
5409static void ol_txrx_post_data_stall_event(
5410 enum data_stall_log_event_indicator indicator,
5411 enum data_stall_log_event_type data_stall_type,
5412 uint32_t pdev_id, uint32_t vdev_id_bitmap,
5413 enum data_stall_log_recovery_type recovery_type)
5414{
5415 struct scheduler_msg msg = {0};
5416 QDF_STATUS status;
5417 struct data_stall_event_info *data_stall_info;
5418 ol_txrx_pdev_handle pdev;
5419
5420 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5421 if (!pdev) {
5422 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5423 "%s: pdev is NULL.", __func__);
5424 return;
5425 }
5426 data_stall_info = qdf_mem_malloc(sizeof(*data_stall_info));
5427 if (!data_stall_info) {
5428 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5429 "%s: data_stall_info is NULL.", __func__);
5430 return;
5431 }
5432 data_stall_info->indicator = indicator;
5433 data_stall_info->data_stall_type = data_stall_type;
5434 data_stall_info->vdev_id_bitmap = vdev_id_bitmap;
5435 data_stall_info->pdev_id = pdev_id;
5436 data_stall_info->recovery_type = recovery_type;
5437
Poddar, Siddarthb9047592017-10-05 15:48:28 +05305438 if (data_stall_info->data_stall_type ==
5439 DATA_STALL_LOG_FW_RX_REFILL_FAILED)
5440 htt_log_rx_ring_info(pdev->htt_pdev);
5441
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305442 sys_build_message_header(SYS_MSG_ID_DATA_STALL_MSG, &msg);
5443 /* Save callback and data */
5444 msg.callback = pdev->data_stall_detect_callback;
5445 msg.bodyptr = data_stall_info;
5446 msg.bodyval = 0;
5447
5448 status = scheduler_post_msg(QDF_MODULE_ID_SYS, &msg);
5449
5450 if (status != QDF_STATUS_SUCCESS) {
5451 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5452 "%s: failed to post data stall msg to SYS", __func__);
5453 qdf_mem_free(data_stall_info);
5454 }
5455}
5456
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305457void
5458ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5459{
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07005460 qdf_print("%s: Pkt: VA 0x%pK PA 0x%llx len %d\n", __func__,
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305461 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5462 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5463 qdf_nbuf_data(nbuf), len, true);
5464}
5465
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305466#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5467bool
5468ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5469{
Yun Park63661012018-01-04 15:04:22 -08005470 struct ol_tx_flow_pool_t *pool;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305471 bool enough_desc_flag;
5472
5473 if (!vdev)
Yun Parkff5da562017-01-18 14:44:20 -08005474 return false;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305475
5476 pool = vdev->pool;
5477
Yun Parkff5da562017-01-18 14:44:20 -08005478 if (!pool)
5479 return false;
5480
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305481 qdf_spin_lock_bh(&pool->flow_pool_lock);
5482 enough_desc_flag = (pool->avail_desc < (pool->stop_th +
Yun Parkff5da562017-01-18 14:44:20 -08005483 OL_TX_NON_FWD_RESERVE))
5484 ? false : true;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305485 qdf_spin_unlock_bh(&pool->flow_pool_lock);
5486 return enough_desc_flag;
5487}
5488#else
5489bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5490{
5491 return true;
5492}
5493#endif
5494
Dhanashri Atre12a08392016-02-17 13:10:34 -08005495/**
5496 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5497 * @vdev_id: vdev_id
5498 *
5499 * Return: vdev handle
5500 * NULL if not found.
5501 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005502struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005503{
5504 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5505 ol_txrx_vdev_handle vdev = NULL;
5506
5507 if (qdf_unlikely(!pdev))
5508 return NULL;
5509
5510 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5511 if (vdev->vdev_id == vdev_id)
5512 break;
5513 }
5514
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005515 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005516}
Nirav Shah2e583a02016-04-30 14:06:12 +05305517
5518/**
5519 * ol_txrx_set_wisa_mode() - set wisa mode
5520 * @vdev: vdev handle
5521 * @enable: enable flag
5522 *
5523 * Return: QDF STATUS
5524 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005525static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305526{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005527 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005528
Nirav Shah2e583a02016-04-30 14:06:12 +05305529 if (!vdev)
5530 return QDF_STATUS_E_INVAL;
5531
5532 vdev->is_wisa_mode_enable = enable;
5533 return QDF_STATUS_SUCCESS;
5534}
Leo Chang98726762016-10-28 11:07:18 -07005535
5536/**
5537 * ol_txrx_get_vdev_id() - get interface id from interface context
5538 * @pvdev: vdev handle
5539 *
5540 * Return: virtual interface id
5541 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005542static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005543{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005544 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005545
Leo Chang98726762016-10-28 11:07:18 -07005546 return vdev->vdev_id;
5547}
5548
5549/**
5550 * ol_txrx_last_assoc_received() - get time of last assoc received
5551 * @ppeer: peer handle
5552 *
5553 * Return: pointer of the time of last assoc received
5554 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005555static qdf_time_t *ol_txrx_last_assoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005556{
5557 ol_txrx_peer_handle peer = ppeer;
5558
5559 return &peer->last_assoc_rcvd;
5560}
5561
5562/**
5563 * ol_txrx_last_disassoc_received() - get time of last disassoc received
5564 * @ppeer: peer handle
5565 *
5566 * Return: pointer of the time of last disassoc received
5567 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005568static qdf_time_t *ol_txrx_last_disassoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005569{
5570 ol_txrx_peer_handle peer = ppeer;
5571
5572 return &peer->last_disassoc_rcvd;
5573}
5574
5575/**
5576 * ol_txrx_last_deauth_received() - get time of last deauth received
5577 * @ppeer: peer handle
5578 *
5579 * Return: pointer of the time of last deauth received
5580 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005581static qdf_time_t *ol_txrx_last_deauth_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005582{
5583 ol_txrx_peer_handle peer = ppeer;
5584
5585 return &peer->last_deauth_rcvd;
5586}
5587
5588/**
5589 * ol_txrx_soc_attach_target() - attach soc target
5590 * @soc: soc handle
5591 *
5592 * MCL legacy OL do nothing here
5593 *
5594 * Return: 0
5595 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005596static int ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005597{
5598 /* MCL legacy OL do nothing here */
5599 return 0;
5600}
5601
5602/**
5603 * ol_txrx_soc_detach() - detach soc target
5604 * @soc: soc handle
5605 *
5606 * MCL legacy OL do nothing here
5607 *
5608 * Return: noe
5609 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005610static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005611{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005612 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005613}
5614
5615/**
5616 * ol_txrx_pkt_log_con_service() - connect packet log service
5617 * @ppdev: physical device handle
5618 * @scn: device context
5619 *
5620 * Return: noe
5621 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005622static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005623{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005624 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005625
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005626 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005627 pktlog_htc_attach();
5628}
5629
5630/* OL wrapper functions for CDP abstraction */
5631/**
5632 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5633 * @peer: peer handle
5634 * @drop: rx packets drop or deliver
5635 *
5636 * Return: none
5637 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005638static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005639{
5640 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5641}
5642
5643/**
5644 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5645 * @ppdev: pdev handle
5646 * @vdev_id: interface id
5647 *
5648 * Return: virtual interface instance
5649 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005650static
5651struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5652 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005653{
5654 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5655}
5656
5657/**
5658 * ol_txrx_wrapper_register_peer() - register peer
5659 * @pdev: pdev handle
5660 * @sta_desc: peer description
5661 *
5662 * Return: QDF STATUS
5663 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005664static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005665 struct ol_txrx_desc_type *sta_desc)
5666{
5667 return ol_txrx_register_peer(sta_desc);
5668}
5669
5670/**
5671 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5672 * @pdev - the data physical device object
5673 * @local_peer_id - the ID txrx assigned locally to the peer in question
5674 *
5675 * The control SW typically uses the txrx peer handle to refer to the peer.
5676 * In unusual circumstances, if it is infeasible for the control SW maintain
5677 * the txrx peer handle but it can maintain a small integer local peer ID,
5678 * this function allows the peer handled to be retrieved, based on the local
5679 * peer ID.
5680 *
5681 * @return handle to the txrx peer object
5682 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005683static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005684ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5685 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005686{
5687 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005688 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005689}
5690
5691/**
5692 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5693 * @pdev: pdev handle
5694 *
5695 * Return: 1 high latency bus
5696 * 0 low latency bus
5697 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005698static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005699{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005700 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005701}
5702
5703/**
5704 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5705 * @data_peer - which peer has changed its state
5706 * @state - the new state of the peer
5707 *
5708 * Specify the peer's authentication state (none, connected, authenticated)
5709 * to allow the data SW to determine whether to filter out invalid data frames.
5710 * (In the "connected" state, where security is enabled, but authentication
5711 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5712 * be discarded.)
5713 * This function is only relevant for systems in which the tx and rx filtering
5714 * are done in the host rather than in the target.
5715 *
5716 * Return: QDF Status
5717 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005718static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005719 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5720{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005721 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005722 peer_mac, state);
5723}
5724
5725/**
5726 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5727 * @pdev: pdev handle
5728 * @peer_addr: peer address wnat to find
5729 * @peer_id: peer id
5730 *
5731 * Return: peer instance pointer
5732 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005733static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005734 uint8_t *peer_addr, uint8_t *peer_id)
5735{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005736 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005737 peer_addr, peer_id);
5738}
5739
5740/**
Mohit Khannab7bec722017-11-10 11:43:44 -08005741 * ol_txrx_wrapper_peer_get_ref_by_addr() - get peer reference by address
5742 * @pdev: pdev handle
5743 * @peer_addr: peer address we want to find
5744 * @peer_id: peer id
5745 * @debug_id: peer debug id for tracking
5746 *
5747 * Return: peer instance pointer
5748 */
5749static void *
5750ol_txrx_wrapper_peer_get_ref_by_addr(struct cdp_pdev *pdev,
5751 u8 *peer_addr, uint8_t *peer_id,
5752 enum peer_debug_id_type debug_id)
5753{
5754 return ol_txrx_peer_get_ref_by_addr((ol_txrx_pdev_handle)pdev,
5755 peer_addr, peer_id, debug_id);
5756}
5757
5758/**
5759 * ol_txrx_wrapper_peer_release_ref() - release peer reference
5760 * @peer: peer handle
5761 * @debug_id: peer debug id for tracking
5762 *
5763 * Release peer ref acquired by peer get ref api
5764 *
5765 * Return: void
5766 */
5767static void ol_txrx_wrapper_peer_release_ref(void *peer,
5768 enum peer_debug_id_type debug_id)
5769{
5770 ol_txrx_peer_release_ref(peer, debug_id);
5771}
5772
5773/**
Leo Chang98726762016-10-28 11:07:18 -07005774 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5775 * @cfg_ctx: cfg context
5776 * @cfg_param: cfg parameters
5777 *
5778 * Return: none
5779 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08005780static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005781ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5782 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07005783{
5784 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005785 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005786 (struct txrx_pdev_cfg_param_t *)cfg_param);
5787}
5788
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005789#ifdef WDI_EVENT_ENABLE
5790void *ol_get_pldev(struct cdp_pdev *txrx_pdev)
5791{
5792 struct ol_txrx_pdev_t *pdev =
5793 (struct ol_txrx_pdev_t *)txrx_pdev;
5794 if (pdev != NULL)
5795 return pdev->pl_dev;
5796
5797 return NULL;
5798}
5799#endif
5800
Leo Chang98726762016-10-28 11:07:18 -07005801static struct cdp_cmn_ops ol_ops_cmn = {
5802 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
5803 .txrx_vdev_attach = ol_txrx_vdev_attach,
5804 .txrx_vdev_detach = ol_txrx_vdev_detach,
5805 .txrx_pdev_attach = ol_txrx_pdev_attach,
5806 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
5807 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05305808 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07005809 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08005810 .txrx_peer_create = ol_txrx_peer_attach,
5811 .txrx_peer_setup = NULL,
5812 .txrx_peer_teardown = NULL,
5813 .txrx_peer_delete = ol_txrx_peer_detach,
Leo Chang98726762016-10-28 11:07:18 -07005814 .txrx_vdev_register = ol_txrx_vdev_register,
5815 .txrx_soc_detach = ol_txrx_soc_detach,
5816 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
5817 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
5818 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08005819 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07005820 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
5821 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
5822 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07005823 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005824 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
5825 .display_stats = ol_txrx_display_stats,
Leo Chang98726762016-10-28 11:07:18 -07005826 /* TODO: Add other functions */
5827};
5828
5829static struct cdp_misc_ops ol_ops_misc = {
5830 .set_ibss_vdev_heart_beat_timer =
5831 ol_txrx_set_ibss_vdev_heart_beat_timer,
5832#ifdef CONFIG_HL_SUPPORT
5833 .set_wmm_param = ol_txrx_set_wmm_param,
5834#endif /* CONFIG_HL_SUPPORT */
5835 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
5836 .bad_peer_txctl_update_threshold =
5837 ol_txrx_bad_peer_txctl_update_threshold,
5838 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
5839 .tx_non_std = ol_tx_non_std,
5840 .get_vdev_id = ol_txrx_get_vdev_id,
5841 .set_wisa_mode = ol_txrx_set_wisa_mode,
Poddar, Siddarth34872782017-08-10 14:08:51 +05305842 .txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
5843 .txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305844 .txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
Leo Chang98726762016-10-28 11:07:18 -07005845#ifdef FEATURE_RUNTIME_PM
5846 .runtime_suspend = ol_txrx_runtime_suspend,
5847 .runtime_resume = ol_txrx_runtime_resume,
5848#endif /* FEATURE_RUNTIME_PM */
5849 .get_opmode = ol_txrx_get_opmode,
5850 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
5851 .update_mac_id = ol_txrx_update_mac_id,
5852 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
5853 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
5854 .pkt_log_init = htt_pkt_log_init,
5855 .pkt_log_con_service = ol_txrx_pkt_log_con_service
5856};
5857
5858static struct cdp_flowctl_ops ol_ops_flowctl = {
5859#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5860 .register_pause_cb = ol_txrx_register_pause_cb,
5861 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005862 .dump_flow_pool_info = ol_tx_dump_flow_pool_info,
Leo Chang98726762016-10-28 11:07:18 -07005863#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
5864};
5865
5866static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
5867#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
5868 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
5869 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
5870 .flow_control_cb = ol_txrx_flow_control_cb,
5871 .get_tx_resource = ol_txrx_get_tx_resource,
5872 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
5873 .vdev_flush = ol_txrx_vdev_flush,
5874 .vdev_pause = ol_txrx_vdev_pause,
5875 .vdev_unpause = ol_txrx_vdev_unpause
5876#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
5877};
5878
Leo Chang98726762016-10-28 11:07:18 -07005879#ifdef IPA_OFFLOAD
Yun Parkb4f591d2017-03-29 15:51:01 -07005880static struct cdp_ipa_ops ol_ops_ipa = {
Leo Chang98726762016-10-28 11:07:18 -07005881 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
5882 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
5883 .ipa_set_active = ol_txrx_ipa_uc_set_active,
5884 .ipa_op_response = ol_txrx_ipa_uc_op_response,
5885 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
5886 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
5887 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07005888 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
Yun Parkb4f591d2017-03-29 15:51:01 -07005889 .ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
5890 .ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
5891 .ipa_setup = ol_txrx_ipa_setup,
5892 .ipa_cleanup = ol_txrx_ipa_cleanup,
5893 .ipa_setup_iface = ol_txrx_ipa_setup_iface,
5894 .ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
5895 .ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
5896 .ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
5897 .ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
5898#ifdef FEATURE_METERING
Yun Park637d6482016-10-05 10:51:33 -07005899 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
5900 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Yun Parkb4f591d2017-03-29 15:51:01 -07005901#endif
Leo Chang98726762016-10-28 11:07:18 -07005902};
Yun Parkb4f591d2017-03-29 15:51:01 -07005903#endif
Leo Chang98726762016-10-28 11:07:18 -07005904
Leo Chang98726762016-10-28 11:07:18 -07005905static struct cdp_bus_ops ol_ops_bus = {
5906 .bus_suspend = ol_txrx_bus_suspend,
5907 .bus_resume = ol_txrx_bus_resume
5908};
5909
5910static struct cdp_ocb_ops ol_ops_ocb = {
5911 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
5912 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
5913};
5914
5915static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005916#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07005917 .throttle_init_period = ol_tx_throttle_init_period,
5918 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005919#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07005920};
5921
5922static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07005923 .clear_stats = ol_txrx_clear_stats,
5924 .stats = ol_txrx_stats
5925};
5926
5927static struct cdp_cfg_ops ol_ops_cfg = {
5928 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
5929 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
5930 .cfg_attach = ol_pdev_cfg_attach,
5931 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
5932 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
5933 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
5934 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
5935 .set_flow_control_parameters =
5936 ol_txrx_wrapper_set_flow_control_parameters,
5937 .set_flow_steering = ol_set_cfg_flow_steering,
Yu Wang66a250b2017-07-19 11:46:40 +08005938 .set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
Leo Chang98726762016-10-28 11:07:18 -07005939};
5940
5941static struct cdp_peer_ops ol_ops_peer = {
5942 .register_peer = ol_txrx_wrapper_register_peer,
5943 .clear_peer = ol_txrx_clear_peer,
Mohit Khannab7bec722017-11-10 11:43:44 -08005944 .peer_get_ref_by_addr = ol_txrx_wrapper_peer_get_ref_by_addr,
5945 .peer_release_ref = ol_txrx_wrapper_peer_release_ref,
Leo Chang98726762016-10-28 11:07:18 -07005946 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
5947 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
5948 .local_peer_id = ol_txrx_local_peer_id,
5949 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
5950 .peer_state_update = ol_txrx_wrapper_peer_state_update,
5951 .get_vdevid = ol_txrx_get_vdevid,
5952 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
5953 .register_ocb_peer = ol_txrx_register_ocb_peer,
5954 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
5955 .get_peer_state = ol_txrx_get_peer_state,
5956 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
5957 .update_ibss_add_peer_num_of_vdev =
5958 ol_txrx_update_ibss_add_peer_num_of_vdev,
5959 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
5960 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08005961#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07005962 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
5963 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005964 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
5965 .update_last_real_peer = ol_txrx_update_last_real_peer,
5966#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07005967 .last_assoc_received = ol_txrx_last_assoc_received,
5968 .last_disassoc_received = ol_txrx_last_disassoc_received,
5969 .last_deauth_received = ol_txrx_last_deauth_received,
Leo Chang98726762016-10-28 11:07:18 -07005970 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
5971};
5972
5973static struct cdp_tx_delay_ops ol_ops_delay = {
5974#ifdef QCA_COMPUTE_TX_DELAY
5975 .tx_delay = ol_tx_delay,
5976 .tx_delay_hist = ol_tx_delay_hist,
5977 .tx_packet_count = ol_tx_packet_count,
5978 .tx_set_compute_interval = ol_tx_set_compute_interval
5979#endif /* QCA_COMPUTE_TX_DELAY */
5980};
5981
5982static struct cdp_pmf_ops ol_ops_pmf = {
5983 .get_pn_info = ol_txrx_get_pn_info
5984};
5985
Leo Chang98726762016-10-28 11:07:18 -07005986static struct cdp_ctrl_ops ol_ops_ctrl = {
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305987 .txrx_get_pldev = ol_get_pldev,
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005988 .txrx_wdi_event_sub = wdi_event_sub,
5989 .txrx_wdi_event_unsub = wdi_event_unsub,
Leo Chang98726762016-10-28 11:07:18 -07005990};
5991
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305992/* WINplatform specific structures */
Leo Chang98726762016-10-28 11:07:18 -07005993static struct cdp_me_ops ol_ops_me = {
5994 /* EMPTY FOR MCL */
5995};
5996
5997static struct cdp_mon_ops ol_ops_mon = {
5998 /* EMPTY FOR MCL */
5999};
6000
6001static struct cdp_host_stats_ops ol_ops_host_stats = {
6002 /* EMPTY FOR MCL */
6003};
6004
6005static struct cdp_wds_ops ol_ops_wds = {
6006 /* EMPTY FOR MCL */
6007};
6008
6009static struct cdp_raw_ops ol_ops_raw = {
6010 /* EMPTY FOR MCL */
6011};
6012
6013static struct cdp_ops ol_txrx_ops = {
6014 .cmn_drv_ops = &ol_ops_cmn,
6015 .ctrl_ops = &ol_ops_ctrl,
6016 .me_ops = &ol_ops_me,
6017 .mon_ops = &ol_ops_mon,
6018 .host_stats_ops = &ol_ops_host_stats,
6019 .wds_ops = &ol_ops_wds,
6020 .raw_ops = &ol_ops_raw,
6021 .misc_ops = &ol_ops_misc,
6022 .cfg_ops = &ol_ops_cfg,
6023 .flowctl_ops = &ol_ops_flowctl,
6024 .l_flowctl_ops = &ol_ops_l_flowctl,
Yun Parkb4f591d2017-03-29 15:51:01 -07006025#ifdef IPA_OFFLOAD
Leo Chang98726762016-10-28 11:07:18 -07006026 .ipa_ops = &ol_ops_ipa,
Yun Parkb4f591d2017-03-29 15:51:01 -07006027#endif
Leo Chang98726762016-10-28 11:07:18 -07006028 .bus_ops = &ol_ops_bus,
6029 .ocb_ops = &ol_ops_ocb,
6030 .peer_ops = &ol_ops_peer,
6031 .throttle_ops = &ol_ops_throttle,
6032 .mob_stats_ops = &ol_ops_mob_stats,
6033 .delay_ops = &ol_ops_delay,
6034 .pmf_ops = &ol_ops_pmf
6035};
6036
Jeff Johnson02c37b42017-01-10 14:49:24 -08006037/*
6038 * Local prototype added to temporarily address warning caused by
6039 * -Wmissing-prototypes. A more correct solution, namely to expose
6040 * a prototype in an appropriate header file, will come later.
6041 */
6042struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
6043 struct ol_if_ops *dp_ol_if_ops);
6044struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
6045 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07006046{
6047 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07006048
Leo Chang98726762016-10-28 11:07:18 -07006049 if (!soc) {
6050 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6051 "%s: OL SOC memory allocation failed\n", __func__);
6052 return NULL;
6053 }
6054
6055 soc->ops = &ol_txrx_ops;
6056 return soc;
6057}
6058
6059