blob: 7c2993a83e4b881dc153af3e8d13835393df0b29 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
wadesong9f2b1102017-12-20 22:58:35 +08002 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/*=== includes ===*/
20/* header files for OS primitives */
21#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053022#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053023#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053024#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053025#include <qdf_atomic.h> /* qdf_atomic_read */
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053026#include <qdf_debugfs.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080027
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028/* header files for utilities */
29#include <cds_queue.h> /* TAILQ */
30
31/* header files for configuration API */
32#include <ol_cfg.h> /* ol_cfg_is_high_latency */
33#include <ol_if_athvar.h>
34
35/* header files for HTT API */
36#include <ol_htt_api.h>
37#include <ol_htt_tx_api.h>
38
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039/* header files for our own APIs */
40#include <ol_txrx_api.h>
41#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070042#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070043#include <ol_txrx_ctrl_api.h>
44#include <cdp_txrx_stats.h>
45#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046/* header files for our internal definitions */
47#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
48#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049#include <ol_tx.h> /* ol_tx_ll */
50#include <ol_rx.h> /* ol_rx_deliver */
51#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
52#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
53#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
54#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
55#include <ol_rx_reorder.h>
56#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
57#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
58#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053059#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080060#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070061#include <ol_txrx_types.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080062#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070063#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080064#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070065#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080066#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053067#include "hif.h"
wadesong9e95bd92017-04-14 14:28:40 +080068#include "hif_main.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070069#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053070#ifndef REMOVE_PKT_LOG
71#include "pktlog_ac.h"
72#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070073#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053074#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053075#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080076#include <cdp_txrx_handle.h>
Poddar, Siddarthdb568162017-07-27 18:16:38 +053077#include "wlan_qct_sys.h"
78
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080079#include <htt_internal.h>
Yun Parkb4f591d2017-03-29 15:51:01 -070080#include <ol_txrx_ipa.h>
Deepak Dhamdheref918d422017-07-06 12:56:29 -070081#include "wlan_roam_debug.h"
Yun Parkb4f591d2017-03-29 15:51:01 -070082
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053083#define DPT_DEBUGFS_PERMS (QDF_FILE_USR_READ | \
84 QDF_FILE_USR_WRITE | \
85 QDF_FILE_GRP_READ | \
86 QDF_FILE_OTH_READ)
87
jitiphilecbee582018-06-06 14:29:40 +053088#define DPT_DEBUGFS_NUMBER_BASE 10
89/**
90 * enum dpt_set_param_debugfs - dpt set params
91 * @DPT_SET_PARAM_PROTO_BITMAP : set proto bitmap
92 * @DPT_SET_PARAM_NR_RECORDS: set num of records
93 * @DPT_SET_PARAM_VERBOSITY: set verbosity
94 */
95enum dpt_set_param_debugfs {
96 DPT_SET_PARAM_PROTO_BITMAP = 1,
97 DPT_SET_PARAM_NR_RECORDS = 2,
98 DPT_SET_PARAM_VERBOSITY = 3,
99 DPT_SET_PARAM_MAX,
100};
101
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800102QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -0700103 uint8_t *peer_mac,
104 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800105static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
106 bool val);
107int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700108extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800109ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700110 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700111
Leo Chang98726762016-10-28 11:07:18 -0700112extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
113 uint64_t **last_pn, uint32_t **rmf_pn_replays);
114
Mohit Khanna78cb6bb2017-03-31 17:05:14 -0700115/* thresh for peer's cached buf queue beyond which the elements are dropped */
116#define OL_TXRX_CACHED_BUFQ_THRESH 128
117
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530118/**
119 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
120 * fw is compatible for marking first packet after wow wakeup
121 * @value: 1 for enabled/ 0 for disabled
122 *
123 * Return: None
124 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800125static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530126{
127 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
128
129 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530130 ol_txrx_err(
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530131 "%s: pdev is NULL\n", __func__);
132 return;
133 }
134
135 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
136}
137
Nirav Shah22bf44d2015-12-10 15:39:48 +0530138/**
139 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
140 * wmi is enabled or not.
141 * @value: 1 for enabled/ 0 for disable
142 *
143 * Return: None
144 */
145void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
146{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530147 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700148
Nirav Shah22bf44d2015-12-10 15:39:48 +0530149 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530150 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530151 return;
152 }
153 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530154}
155
156/**
157 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
158 *
159 * Return: is_mgmt_over_wmi_enabled
160 */
161uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
162{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530163 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700164
Nirav Shah22bf44d2015-12-10 15:39:48 +0530165 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530166 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530167 return 0;
168 }
169 return pdev->is_mgmt_over_wmi_enabled;
170}
171
172
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800174static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800175ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
176 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800177{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800178 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
179 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800180 struct ol_txrx_peer_t *peer;
181
182 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
183 if (!peer)
184 return NULL;
185 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800186 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 return peer;
188}
189
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800190static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191{
Leo Chang98726762016-10-28 11:07:18 -0700192 struct ol_txrx_peer_t *peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700193
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800194 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530195 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530196 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530197 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198 }
199
200 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530201 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800202}
203
Yun Park0dad1002017-07-14 14:57:01 -0700204static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(struct cdp_pdev *ppdev,
205 uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800206{
Yun Park0dad1002017-07-14 14:57:01 -0700207 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800208 struct ol_txrx_peer_t *peer = NULL;
Yun Park5dd9a122018-01-12 15:00:12 -0800209 ol_txrx_vdev_handle vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800210
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800211 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530212 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530213 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800214 return NULL;
215 }
216
Yun Park5dd9a122018-01-12 15:00:12 -0800217 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev, sta_id,
218 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800219 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530220 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530221 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800222 return NULL;
223 }
224
Yun Park5dd9a122018-01-12 15:00:12 -0800225 vdev = peer->vdev;
226 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
227
228 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800229}
230
Mohit Khannababadb82017-02-21 18:54:19 -0800231/**
232 * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
233 * @ppdev: pointer of type cdp_pdev
234 * @peer_addr: peer mac addr
235 * @peer_id: pointer to fill in the value of peer->local_id for caller
236 *
237 * This function finds a peer with given mac address and returns its peer_id.
238 * Note that this function does not increment the peer->ref_cnt.
239 * This means that the peer may be deleted in some other parallel context after
240 * its been found.
241 *
242 * Return: peer handle if peer is found, NULL if peer is not found.
243 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800244void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Yun Park0dad1002017-07-14 14:57:01 -0700245 uint8_t *peer_addr,
246 uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800247{
248 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800249 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800250
Mohit Khannab7bec722017-11-10 11:43:44 -0800251 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
252 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800253 if (!peer)
254 return NULL;
255 *peer_id = peer->local_id;
Mohit Khannab7bec722017-11-10 11:43:44 -0800256 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khannababadb82017-02-21 18:54:19 -0800257 return peer;
258}
259
260/**
Mohit Khannab7bec722017-11-10 11:43:44 -0800261 * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
Mohit Khannababadb82017-02-21 18:54:19 -0800262 * @pdev: pointer of type ol_txrx_pdev_handle
263 * @peer_addr: peer mac addr
264 * @peer_id: pointer to fill in the value of peer->local_id for caller
265 *
266 * This function finds the peer with given mac address and returns its peer_id.
267 * Note that this function increments the peer->ref_cnt.
268 * This makes sure that peer will be valid. This also means the caller needs to
Mohit Khannab7bec722017-11-10 11:43:44 -0800269 * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
Mohit Khannababadb82017-02-21 18:54:19 -0800270 * reference.
271 * Sample usage:
272 * {
273 * //the API call below increments the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800274 * peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800275 *
276 * // Once peer usage is done
277 *
278 * //the API call below decrements the peer->ref_cnt
Mohit Khannab7bec722017-11-10 11:43:44 -0800279 * ol_txrx_peer_release_ref(peer, dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800280 * }
281 *
282 * Return: peer handle if the peer is found, NULL if peer is not found.
283 */
Mohit Khannab7bec722017-11-10 11:43:44 -0800284ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
285 u8 *peer_addr,
286 u8 *peer_id,
287 enum peer_debug_id_type dbg_id)
Mohit Khannababadb82017-02-21 18:54:19 -0800288{
289 struct ol_txrx_peer_t *peer;
290
Mohit Khannab7bec722017-11-10 11:43:44 -0800291 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
292 dbg_id);
Mohit Khannababadb82017-02-21 18:54:19 -0800293 if (!peer)
294 return NULL;
295 *peer_id = peer->local_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800296 return peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800297}
298
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800299static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300{
Leo Chang98726762016-10-28 11:07:18 -0700301 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700302
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800303 return peer->local_id;
304}
305
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700306/**
307 * @brief Find a txrx peer handle from a peer's local ID
308 * @details
309 * The control SW typically uses the txrx peer handle to refer to the peer.
310 * In unusual circumstances, if it is infeasible for the control SW maintain
311 * the txrx peer handle but it can maintain a small integer local peer ID,
312 * this function allows the peer handled to be retrieved, based on the local
313 * peer ID.
314 *
315 * @param pdev - the data physical device object
316 * @param local_peer_id - the ID txrx assigned locally to the peer in question
317 * @return handle to the txrx peer object
318 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800319ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800320ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800321 uint8_t local_peer_id)
322{
323 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800324 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700325
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800326 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
327 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
328 return NULL;
329 }
330
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530331 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800332 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530333 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800334 return peer;
335}
336
Jingxiang Ge3badb982018-01-02 17:39:01 +0800337/**
338 * @brief Find a txrx peer handle from a peer's local ID
339 * @param pdev - the data physical device object
340 * @param local_peer_id - the ID txrx assigned locally to the peer in question
341 * @dbg_id - debug_id to track caller
342 * @return handle to the txrx peer object
343 * @details
344 * The control SW typically uses the txrx peer handle to refer to the peer.
345 * In unusual circumstances, if it is infeasible for the control SW maintain
346 * the txrx peer handle but it can maintain a small integer local peer ID,
347 * this function allows the peer handled to be retrieved, based on the local
348 * peer ID.
349 *
350 * Note that this function increments the peer->ref_cnt.
351 * This makes sure that peer will be valid. This also means the caller needs to
352 * call the corresponding API -
353 * ol_txrx_peer_release_ref
354 *
355 * reference.
356 * Sample usage:
357 * {
358 * //the API call below increments the peer->ref_cnt
359 * peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
360 *
361 * // Once peer usage is done
362 *
363 * //the API call below decrements the peer->ref_cnt
364 * ol_txrx_peer_release_ref(peer, dbg_id);
365 * }
366 *
367 * Return: peer handle if the peer is found, NULL if peer is not found.
368 */
369ol_txrx_peer_handle
370ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
371 uint8_t local_peer_id,
372 enum peer_debug_id_type dbg_id)
373{
374 struct ol_txrx_peer_t *peer = NULL;
375 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
376
377 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
378 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
379 return NULL;
380 }
381
382 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
383 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
384 peer = pdev->local_peer_ids.map[local_peer_id];
385 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
386 if (peer && peer->valid)
387 ol_txrx_peer_get_ref(peer, dbg_id);
Jingxiang Ge9f297062018-01-24 13:31:31 +0800388 else
389 peer = NULL;
Jingxiang Ge3badb982018-01-02 17:39:01 +0800390 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
391
392 return peer;
393}
394
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
396{
397 int i;
398
399 /* point the freelist to the first ID */
400 pdev->local_peer_ids.freelist = 0;
401
402 /* link each ID to the next one */
403 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
404 pdev->local_peer_ids.pool[i] = i + 1;
405 pdev->local_peer_ids.map[i] = NULL;
406 }
407
408 /* link the last ID to itself, to mark the end of the list */
409 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
410 pdev->local_peer_ids.pool[i] = i;
411
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530412 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800413}
414
415static void
416ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
417 struct ol_txrx_peer_t *peer)
418{
419 int i;
420
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530421 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800422 i = pdev->local_peer_ids.freelist;
423 if (pdev->local_peer_ids.pool[i] == i) {
424 /* the list is empty, except for the list-end marker */
425 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
426 } else {
427 /* take the head ID and advance the freelist */
428 peer->local_id = i;
429 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
430 pdev->local_peer_ids.map[i] = peer;
431 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530432 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800433}
434
435static void
436ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
437 struct ol_txrx_peer_t *peer)
438{
439 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700440
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800441 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
442 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
443 return;
444 }
445 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530446 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800447 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
448 pdev->local_peer_ids.freelist = i;
449 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530450 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800451}
452
453static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
454{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530455 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800456}
457
458#else
459#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
460#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
461#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
462#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
463#endif
464
Nirav Shahd21a2e32018-04-20 16:34:43 +0530465#if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800466/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530467 * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
468 * @file: file to read
469 * @arg: pdev object
470 *
471 * Return: QDF_STATUS
472 */
473static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
474 void *arg)
475{
476 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
477 uint32_t i = 0;
478 QDF_STATUS status = QDF_STATUS_SUCCESS;
479
480 if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
481 return QDF_STATUS_E_INVAL;
482 else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
483 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
484 return QDF_STATUS_SUCCESS;
485 }
486
487 i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
488 status = qdf_dpt_dump_stats_debugfs(file, i);
489 if (status == QDF_STATUS_E_FAILURE)
490 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
491 else if (status == QDF_STATUS_SUCCESS)
492 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
493
494 return status;
495}
496
497/**
jitiphilecbee582018-06-06 14:29:40 +0530498 * ol_txrx_conv_str_to_int_debugfs() - convert string to int
499 * @buf: buffer containing string
500 * @len: buffer len
501 * @proto_bitmap: defines the protocol to be tracked
502 * @nr_records: defines the nth packet which is traced
503 * @verbosity: defines the verbosity level
504 *
505 * This function expects char buffer to be null terminated.
506 * Otherwise results could be unexpected values.
507 *
508 * Return: 0 on success
509 */
510static int ol_txrx_conv_str_to_int_debugfs(char *buf, qdf_size_t len,
511 int *proto_bitmap,
512 int *nr_records,
513 int *verbosity)
514{
515 int num_value = DPT_SET_PARAM_PROTO_BITMAP;
516 int ret, param_value = 0;
517 char *buf_param = buf;
518 int i;
519
520 for (i = 1; i < DPT_SET_PARAM_MAX; i++) {
521 /* Loop till you reach space as kstrtoint operates till
522 * null character. Replace space with null character
523 * to read each value.
524 * terminate the loop either at null terminated char or
525 * len is 0.
526 */
527 while (*buf && len) {
528 if (*buf == ' ') {
529 *buf = '\0';
530 buf++;
531 len--;
532 break;
533 }
534 buf++;
535 len--;
536 }
537 /* get the parameter */
538 ret = qdf_kstrtoint(buf_param,
539 DPT_DEBUGFS_NUMBER_BASE,
540 &param_value);
541 if (ret) {
542 QDF_TRACE(QDF_MODULE_ID_TXRX,
543 QDF_TRACE_LEVEL_ERROR,
544 "%s: Error while parsing buffer. ret %d",
545 __func__, ret);
546 return ret;
547 }
548 switch (num_value) {
549 case DPT_SET_PARAM_PROTO_BITMAP:
550 *proto_bitmap = param_value;
551 break;
552 case DPT_SET_PARAM_NR_RECORDS:
553 *nr_records = param_value;
554 break;
555 case DPT_SET_PARAM_VERBOSITY:
556 *verbosity = param_value;
557 break;
558 default:
559 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
560 "%s %d: :Set command needs exactly 3 arguments in format <proto_bitmap> <number of record> <Verbosity>.",
561 __func__, __LINE__);
562 break;
563 }
564 num_value++;
565 /*buf_param should now point to the next param value. */
566 buf_param = buf;
567 }
568
569 /* buf is not yet NULL implies more than 3 params are passed. */
570 if (*buf) {
571 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
572 "%s %d: :Set command needs exactly 3 arguments in format <proto_bitmap> <number of record> <Verbosity>.",
573 __func__, __LINE__);
574 return -EINVAL;
575 }
576 return 0;
577}
578
579/**
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530580 * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
581 * @priv: pdev object
582 * @buf: buff to get value for dpt parameters
583 * @len: buf length
584 *
585 * Return: QDF_STATUS
586 */
587static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
588 const char *buf,
589 qdf_size_t len)
590{
jitiphilecbee582018-06-06 14:29:40 +0530591 int ret;
592 int proto_bitmap = 0;
593 int nr_records = 0;
594 int verbosity = 0;
595 char *buf1 = NULL;
596
597 if (!buf || !len) {
598 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
599 "%s: null buffer or len. len %u",
600 __func__, (uint8_t)len);
601 return QDF_STATUS_E_FAULT;
602 }
603
604 buf1 = (char *)qdf_mem_malloc(len);
605 if (!buf1) {
606 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
607 "%s: qdf_mem_malloc failure",
608 __func__);
609 return QDF_STATUS_E_FAULT;
610 }
611 qdf_mem_copy(buf1, buf, len);
612 ret = ol_txrx_conv_str_to_int_debugfs(buf1, len, &proto_bitmap,
613 &nr_records, &verbosity);
614 if (ret) {
615 qdf_mem_free(buf1);
616 return QDF_STATUS_E_INVAL;
617 }
618
619 qdf_dpt_set_value_debugfs(proto_bitmap, nr_records, verbosity);
620 qdf_mem_free(buf1);
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530621 return QDF_STATUS_SUCCESS;
622}
623
624static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
625{
626 pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
627 pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
628 pdev->dpt_debugfs_fops.priv = pdev;
629
630 pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
631
632 if (!pdev->dpt_stats_log_dir) {
633 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
634 "%s: error while creating debugfs dir for %s",
635 __func__, "dpt_stats");
636 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
637 return -EBUSY;
638 }
639
640 if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
641 pdev->dpt_stats_log_dir,
642 &pdev->dpt_debugfs_fops)) {
643 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
644 "%s: debug Entry creation failed!",
645 __func__);
646 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
647 return -EBUSY;
648 }
649
650 pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
651 return 0;
652}
653
Nirav Shahd21a2e32018-04-20 16:34:43 +0530654static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
655{
656 qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
657}
658#else
659static inline int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
660{
661 return 0;
662}
663
664static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
665{
666}
667#endif
668
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530669/**
Dhanashri Atre12a08392016-02-17 13:10:34 -0800670 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800671 * @ctrl_pdev: cfg pdev
672 * @htc_pdev: HTC pdev
673 * @osdev: os dev
674 *
675 * Return: txrx pdev handle
676 * NULL for failure
677 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800678static struct cdp_pdev *
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530679ol_txrx_pdev_attach(ol_txrx_soc_handle soc,
680 struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700681 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800682{
683 struct ol_txrx_pdev_t *pdev;
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530684 struct cdp_cfg *cfg_pdev = (struct cdp_cfg *)ctrl_pdev;
hqufd227fe2017-06-26 17:01:14 +0800685 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800686
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530687 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800688 if (!pdev)
689 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530691 /* init LL/HL cfg here */
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530692 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(cfg_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530693 /*
694 * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
695 * enabled or not.
696 */
697 pdev->cfg.credit_update_enabled =
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530698 ol_cfg_is_credit_update_enabled(cfg_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530699
700 /* Explicitly request TX Completions from FW */
701 pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() ||
702 cds_is_packet_log_enabled();
703
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530704 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(cfg_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800705
706 /* store provided params */
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530707 pdev->ctrl_pdev = cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800708 pdev->osdev = osdev;
709
710 for (i = 0; i < htt_num_sec_types; i++)
711 pdev->sec_types[i] = (enum ol_sec_type)i;
712
713 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530714 ol_txrx_tso_stats_init(pdev);
jitiphil335d2412018-06-07 22:49:24 +0530715 ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800716
717 TAILQ_INIT(&pdev->vdev_list);
718
tfyu9fcabd72017-09-26 17:46:48 +0800719 TAILQ_INIT(&pdev->req_list);
720 pdev->req_list_depth = 0;
721 qdf_spinlock_create(&pdev->req_list_spinlock);
Ajit Pal Singh8184e932018-07-25 13:54:13 +0530722 qdf_spinlock_create(&pdev->tx_mutex);
tfyu9fcabd72017-09-26 17:46:48 +0800723
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800724 /* do initial set up of the peer ID -> peer object lookup map */
725 if (ol_txrx_peer_find_attach(pdev))
726 goto fail1;
727
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530728 /* initialize the counter of the target's tx buffer availability */
729 qdf_atomic_init(&pdev->target_tx_credit);
730 qdf_atomic_init(&pdev->orig_target_tx_credit);
731
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530732 if (ol_cfg_is_high_latency(cfg_pdev)) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530733 qdf_spinlock_create(&pdev->tx_queue_spinlock);
734 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
735 if (pdev->tx_sched.scheduler == NULL)
736 goto fail2;
737 }
738 ol_txrx_pdev_txq_log_init(pdev);
739 ol_txrx_pdev_grp_stats_init(pdev);
740
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800741 pdev->htt_pdev =
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530742 htt_pdev_alloc(pdev, cfg_pdev, htc_pdev, osdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800743 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530744 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800745
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530746 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
747 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +0800748
749 /*
750 * Init the tid --> category table.
751 * Regular tids (0-15) map to their AC.
752 * Extension tids get their own categories.
753 */
754 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
755 int ac = TXRX_TID_TO_WMM_AC(tid);
756
757 pdev->tid_to_ac[tid] = ac;
758 }
759 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
760 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
761 pdev->tid_to_ac[OL_TX_MGMT_TID] =
762 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
763 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
764 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
765 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
766 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
767
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530768 ol_txrx_debugfs_init(pdev);
769
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800770 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800771
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530772fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800773 ol_txrx_peer_find_detach(pdev);
774
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530775fail2:
Sravan Kumar Kairam1cbfb002018-06-14 18:28:48 +0530776 if (ol_cfg_is_high_latency(cfg_pdev))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530777 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
778
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800779fail1:
Ajit Pal Singh8184e932018-07-25 13:54:13 +0530780 qdf_spinlock_destroy(&pdev->tx_mutex);
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530781 ol_txrx_tso_stats_deinit(pdev);
jitiphil335d2412018-06-07 22:49:24 +0530782 ol_txrx_fw_stats_desc_pool_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530783 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800784
785fail0:
786 return NULL;
787}
788
Komal Seelamc4b28632016-02-03 15:02:18 +0530789#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
790/**
791 * htt_pkt_log_init() - API to initialize packet log
792 * @handle: pdev handle
793 * @scn: HIF context
794 *
795 * Return: void
796 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800797void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +0530798{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800799 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700800
Komal Seelamc4b28632016-02-03 15:02:18 +0530801 if (handle->pkt_log_init)
802 return;
803
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530804 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -0700805 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -0700806 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -0800807 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Komal Seelamc4b28632016-02-03 15:02:18 +0530808 if (pktlogmod_init(scn))
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530809 qdf_print("%s: pktlogmod_init failed", __func__);
Komal Seelamc4b28632016-02-03 15:02:18 +0530810 else
811 handle->pkt_log_init = true;
812 }
813}
814
815/**
816 * htt_pktlogmod_exit() - API to cleanup pktlog info
817 * @handle: Pdev handle
818 * @scn: HIF Context
819 *
820 * Return: void
821 */
Houston Hoffman8c485042017-02-08 13:40:21 -0800822static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +0530823{
Houston Hoffman8c485042017-02-08 13:40:21 -0800824 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -0700825 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +0530826 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -0800827 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +0530828 handle->pkt_log_init = false;
829 }
830}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800831
Komal Seelamc4b28632016-02-03 15:02:18 +0530832#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800833void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -0800834static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +0530835#endif
836
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800837/**
Dhanashri Atre12a08392016-02-17 13:10:34 -0800838 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800839 * @pdev: txrx pdev
840 *
841 * Return: 0 for success
842 */
843int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800844ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800845{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800846 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -0700847 uint16_t i;
848 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800849 int ret = 0;
850 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530851 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800852
Leo Chang376398b2015-10-23 14:19:02 -0700853 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
854 union ol_tx_desc_list_elem_t *c_element;
855 unsigned int sig_bit;
856 uint16_t desc_per_page;
857
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800858 if (!osc) {
859 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -0700860 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800861 }
862
863 /*
864 * For LL, limit the number of host's tx descriptors to match
865 * the number of target FW tx descriptors.
866 * This simplifies the FW, by ensuring the host will never
867 * download more tx descriptors than the target has space for.
868 * The FW will drop/free low-priority tx descriptors when it
869 * starts to run low, so that in theory the host should never
870 * run out of tx descriptors.
871 */
872
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800873 /*
874 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530875 * HL - wait for a HTT target credit initialization
876 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800877 */
Nirav Shah52d85aa2018-04-26 14:03:00 +0530878 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
879 ol_tx_init_pdev(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800880
Nirav Shah76291962016-04-25 10:50:37 +0530881 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
882
Nirav Shah5ff1fd02018-03-11 14:55:53 +0530883 ol_tx_setup_fastpath_ce_handles(osc, pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800884
885 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
886 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +0530887 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800888
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800889 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -0800890 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
891 ret = htt_ipa_uc_attach(pdev->htt_pdev);
892 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -0700893 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -0800894 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800895
Leo Chang376398b2015-10-23 14:19:02 -0700896 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +0530897 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530898 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -0700899 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
900 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
901 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530902 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -0700903 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -0800904 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -0700905 goto page_alloc_fail;
906 }
907 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
908 pdev->tx_desc.offset_filter = desc_per_page - 1;
909 /* Calculate page divider to find page number */
910 sig_bit = 0;
911 while (desc_per_page) {
912 sig_bit++;
913 desc_per_page = desc_per_page >> 1;
914 }
915 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -0800916 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -0700917 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
918 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
919 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
920 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800921
922 /*
923 * Each SW tx desc (used only within the tx datapath SW) has a
924 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
925 * Go ahead and allocate the HTT tx desc and link it with the SW tx
926 * desc now, to avoid doing it during time-critical transmit.
927 */
928 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -0700929 pdev->tx_desc.freelist =
930 (union ol_tx_desc_list_elem_t *)
931 (*pdev->tx_desc.desc_pages.cacheable_pages);
932 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800933 for (i = 0; i < desc_pool_size; i++) {
934 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -0700935 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530936 qdf_dma_addr_t frag_paddr = 0;
937 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800938
Leo Chang376398b2015-10-23 14:19:02 -0700939 if (i == (desc_pool_size - 1))
940 c_element->next = NULL;
941 else
942 c_element->next = (union ol_tx_desc_list_elem_t *)
943 ol_tx_desc_find(pdev, i + 1);
944
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800945 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800946 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530947 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800948 "%s: failed to alloc HTT tx desc (%d of %d)",
949 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -0700950 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -0800951 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -0700952 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800953 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800954
Leo Chang376398b2015-10-23 14:19:02 -0700955 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800956 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -0700957 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800958 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -0700959 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530960 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -0700961 "%s: failed to alloc HTT frag dsc (%d/%d)",
962 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800963 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -0700964 fail_idx = i;
965 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800966 }
Leo Chang376398b2015-10-23 14:19:02 -0700967 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -0700968 /*
969 * Initialize the first 6 words (TSO flags)
970 * of the frag descriptor
971 */
Leo Chang376398b2015-10-23 14:19:02 -0700972 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
973 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800974 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -0700975 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800976#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -0700977 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800978#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -0700979 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800980 0xffffffff;
981#endif
982#endif
Leo Chang376398b2015-10-23 14:19:02 -0700983 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530984 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -0700985 c_element = c_element->next;
986 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800987 }
988
989 /* link SW tx descs into a freelist */
990 pdev->tx_desc.num_free = desc_pool_size;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530991 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700992 "%s first tx_desc:0x%pK Last tx desc:0x%pK\n", __func__,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800993 (uint32_t *) pdev->tx_desc.freelist,
994 (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800995
996 /* check what format of frames are expected to be delivered by the OS */
997 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
998 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
999 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1000 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1001 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1002 pdev->htt_pkt_type = htt_pkt_type_eth2;
1003 else
1004 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1005 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301006 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001007 "%s Invalid standard frame type: %d",
1008 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001009 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001010 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001011 }
1012
1013 /* setup the global rx defrag waitlist */
1014 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1015
1016 /* configure where defrag timeout and duplicate detection is handled */
1017 pdev->rx.flags.defrag_timeout_check =
1018 pdev->rx.flags.dup_check =
1019 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1020
1021#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1022 /* Need to revisit this part. Currently,hardcode to riva's caps */
1023 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1024 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1025 /*
1026 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1027 * header for non-first subframe of A-MSDU.
1028 */
1029 pdev->sw_subfrm_hdr_recovery_enable = 1;
1030 /*
1031 * The Riva HW doesn't have the capability to set Protected Frame bit
1032 * in the MAC header for encrypted data frame.
1033 */
1034 pdev->sw_pf_proc_enable = 1;
1035
1036 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001037 /*
1038 * sw llc process is only needed in
1039 * 802.3 to 802.11 transform case
1040 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001041 pdev->sw_tx_llc_proc_enable = 1;
1042 pdev->sw_rx_llc_proc_enable = 1;
1043 } else {
1044 pdev->sw_tx_llc_proc_enable = 0;
1045 pdev->sw_rx_llc_proc_enable = 0;
1046 }
1047
1048 switch (pdev->frame_format) {
1049 case wlan_frm_fmt_raw:
1050 pdev->sw_tx_encap =
1051 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1052 ? 0 : 1;
1053 pdev->sw_rx_decap =
1054 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1055 ? 0 : 1;
1056 break;
1057 case wlan_frm_fmt_native_wifi:
1058 pdev->sw_tx_encap =
1059 pdev->
1060 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1061 ? 0 : 1;
1062 pdev->sw_rx_decap =
1063 pdev->
1064 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1065 ? 0 : 1;
1066 break;
1067 case wlan_frm_fmt_802_3:
1068 pdev->sw_tx_encap =
1069 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1070 ? 0 : 1;
1071 pdev->sw_rx_decap =
1072 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1073 ? 0 : 1;
1074 break;
1075 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301076 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001077 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1078 pdev->frame_format,
1079 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001080 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001081 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001082 }
1083#endif
1084
1085 /*
1086 * Determine what rx processing steps are done within the host.
1087 * Possibilities:
1088 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1089 * (This is unlikely; even if the target is doing rx->tx forwarding,
1090 * the host should be doing rx->tx forwarding too, as a back up for
1091 * the target's rx->tx forwarding, in case the target runs short on
1092 * memory, and can't store rx->tx frames that are waiting for
1093 * missing prior rx frames to arrive.)
1094 * 2. Just rx -> tx forwarding.
1095 * This is the typical configuration for HL, and a likely
1096 * configuration for LL STA or small APs (e.g. retail APs).
1097 * 3. Both PN check and rx -> tx forwarding.
1098 * This is the typical configuration for large LL APs.
1099 * Host-side PN check without rx->tx forwarding is not a valid
1100 * configuration, since the PN check needs to be done prior to
1101 * the rx->tx forwarding.
1102 */
1103 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001104 /*
1105 * PN check, rx-tx forwarding and rx reorder is done by
1106 * the target
1107 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001108 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1109 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1110 else
1111 pdev->rx_opt_proc = ol_rx_fwd_check;
1112 } else {
1113 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1114 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1115 /*
1116 * PN check done on host,
1117 * rx->tx forwarding not done at all.
1118 */
1119 pdev->rx_opt_proc = ol_rx_pn_check_only;
1120 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1121 /*
1122 * Both PN check and rx->tx forwarding done
1123 * on host.
1124 */
1125 pdev->rx_opt_proc = ol_rx_pn_check;
1126 } else {
1127#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1128"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301129 QDF_TRACE(QDF_MODULE_ID_TXRX,
1130 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001131 "%s: %s", __func__, TRACESTR01);
1132#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001133 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001134 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001135 }
1136 } else {
1137 /* PN check done on target */
1138 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1139 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1140 /*
1141 * rx->tx forwarding done on host (possibly as
1142 * back-up for target-side primary rx->tx
1143 * forwarding)
1144 */
1145 pdev->rx_opt_proc = ol_rx_fwd_check;
1146 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001147 /*
1148 * rx->tx forwarding either done in target,
1149 * or not done at all
1150 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001151 pdev->rx_opt_proc = ol_rx_deliver;
1152 }
1153 }
1154 }
1155
1156 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301157 qdf_spinlock_create(&pdev->peer_ref_mutex);
1158 qdf_spinlock_create(&pdev->rx.mutex);
1159 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001160 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001161 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1162
Yun Parkf01f6e22017-01-18 17:27:02 -08001163 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1164 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001165 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001166 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001167
Yun Parkf01f6e22017-01-18 17:27:02 -08001168 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1169 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001170 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001171 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001172
1173#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1174 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1175#endif
1176
1177 /*
1178 * WDI event attach
1179 */
1180 wdi_event_attach(pdev);
1181
1182 /*
1183 * Initialize rx PN check characteristics for different security types.
1184 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301185 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001186
1187 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1188 pdev->rx_pn[htt_sec_type_tkip].len =
1189 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1190 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1191 pdev->rx_pn[htt_sec_type_tkip].cmp =
1192 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1193 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1194
1195 /* WAPI: 128-bit PN */
1196 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1197 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1198
1199 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1200
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001201 ol_txrx_dbg("Created pdev %pK\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001202
1203 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1204
1205#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1206#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1207
1208/* #if 1 -- TODO: clean this up */
1209#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1210 /* avg = 100% * new + 0% * old */ \
1211 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1212/*
Yun Parkeaea8632017-04-09 09:53:45 -07001213 * #else
1214 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1215 * //avg = 25% * new + 25% * old
1216 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1217 * #endif
1218 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001219 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1220 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1221#endif
1222
1223 ol_txrx_local_peer_id_pool_init(pdev);
1224
1225 pdev->cfg.ll_pause_txq_limit =
1226 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1227
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301228 /* TX flow control for peer who is in very bad link status */
1229 ol_tx_badpeer_flow_cl_init(pdev);
1230
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001231#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301232 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301233 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001234
1235 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301236 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001237 {
1238 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001239
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001240 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301241 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001242 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1243 * 1000);
1244 /*
1245 * Compute a factor and shift that together are equal to the
1246 * inverse of the bin_width time, so that rather than dividing
1247 * by the bin width time, approximately the same result can be
1248 * obtained much more efficiently by a multiply + shift.
1249 * multiply_factor >> shift = 1 / bin_width_time, so
1250 * multiply_factor = (1 << shift) / bin_width_time.
1251 *
1252 * Pick the shift semi-arbitrarily.
1253 * If we knew statically what the bin_width would be, we could
1254 * choose a shift that minimizes the error.
1255 * Since the bin_width is determined dynamically, simply use a
1256 * shift that is about half of the uint32_t size. This should
1257 * result in a relatively large multiplier value, which
1258 * minimizes error from rounding the multiplier to an integer.
1259 * The rounding error only becomes significant if the tick units
1260 * are on the order of 1 microsecond. In most systems, it is
1261 * expected that the tick units will be relatively low-res,
1262 * on the order of 1 millisecond. In such systems the rounding
1263 * error is negligible.
1264 * It would be more accurate to dynamically try out different
1265 * shifts and choose the one that results in the smallest
1266 * rounding error, but that extra level of fidelity is
1267 * not needed.
1268 */
1269 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1270 pdev->tx_delay.hist_internal_bin_width_mult =
1271 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1272 1000 + (bin_width_1000ticks >> 1)) /
1273 bin_width_1000ticks;
1274 }
1275#endif /* QCA_COMPUTE_TX_DELAY */
1276
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001277 /* Thermal Mitigation */
1278 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001279
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001280 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001281
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301282 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1283
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001284 ol_tx_register_flow_control(pdev);
1285
1286 return 0; /* success */
1287
Leo Chang376398b2015-10-23 14:19:02 -07001288pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001289 OL_RX_REORDER_TRACE_DETACH(pdev);
1290
Leo Chang376398b2015-10-23 14:19:02 -07001291reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301292 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1293 qdf_spinlock_destroy(&pdev->rx.mutex);
1294 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301295 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001296 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1297
Leo Chang376398b2015-10-23 14:19:02 -07001298control_init_fail:
1299desc_alloc_fail:
1300 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001301 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001302 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001303
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301304 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001305 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001306
Leo Chang376398b2015-10-23 14:19:02 -07001307page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001308 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1309 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001310uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001311 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301312htt_attach_fail:
1313 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001314ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001315 return ret; /* fail */
1316}
1317
Dhanashri Atre12a08392016-02-17 13:10:34 -08001318/**
1319 * ol_txrx_pdev_attach_target() - send target configuration
1320 *
1321 * @pdev - the physical device being initialized
1322 *
1323 * The majority of the data SW setup are done by the pdev_attach
1324 * functions, but this function completes the data SW setup by
1325 * sending datapath configuration messages to the target.
1326 *
1327 * Return: 0 - success 1 - failure
1328 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001329static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001330{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001331 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001332
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05301333 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001334}
1335
Dhanashri Atre12a08392016-02-17 13:10:34 -08001336/**
Mohit Khanna54f3a382017-03-13 17:56:32 -07001337 * ol_tx_free_descs_inuse - free tx descriptors which are in use
1338 * @pdev - the physical device for which tx descs need to be freed
1339 *
1340 * Cycle through the list of TX descriptors (for a pdev) which are in use,
1341 * for which TX completion has not been received and free them. Should be
1342 * called only when the interrupts are off and all lower layer RX is stopped.
1343 * Otherwise there may be a race condition with TX completions.
1344 *
1345 * Return: None
1346 */
1347static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1348{
1349 int i;
1350 void *htt_tx_desc;
1351 struct ol_tx_desc_t *tx_desc;
1352 int num_freed_tx_desc = 0;
1353
1354 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1355 tx_desc = ol_tx_desc_find(pdev, i);
1356 /*
1357 * Confirm that each tx descriptor is "empty", i.e. it has
1358 * no tx frame attached.
1359 * In particular, check that there are no frames that have
1360 * been given to the target to transmit, for which the
1361 * target has never provided a response.
1362 */
1363 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
1364 ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
1365 ol_tx_desc_frame_free_nonstd(pdev,
1366 tx_desc, 1);
1367 num_freed_tx_desc++;
1368 }
1369 htt_tx_desc = tx_desc->htt_tx_desc;
1370 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1371 }
1372
1373 if (num_freed_tx_desc)
1374 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1375 "freed %d tx frames for which no resp from target",
1376 num_freed_tx_desc);
1377
1378}
1379
1380/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301381 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08001382 * @pdev - the data physical device object being removed
1383 * @force - delete the pdev (and its vdevs and peers) even if
1384 * there are outstanding references by the target to the vdevs
1385 * and peers within the pdev
1386 *
1387 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301388 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08001389 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301390 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08001391 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301392static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001393{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001394 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001395
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001396 /* preconditions */
1397 TXRX_ASSERT2(pdev);
1398
1399 /* check that the pdev has no vdevs allocated */
1400 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1401
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001402#ifdef QCA_SUPPORT_TX_THROTTLE
1403 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301404 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1405 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001406#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301407 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1408 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001409#endif
1410#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001411
1412 if (force) {
1413 /*
1414 * The assertion above confirms that all vdevs within this pdev
1415 * were detached. However, they may not have actually been
1416 * deleted.
1417 * If the vdev had peers which never received a PEER_UNMAP msg
1418 * from the target, then there are still zombie peer objects,
1419 * and the vdev parents of the zombie peers are also zombies,
1420 * hanging around until their final peer gets deleted.
1421 * Go through the peer hash table and delete any peers left.
1422 * As a side effect, this will complete the deletion of any
1423 * vdevs that are waiting for their peers to finish deletion.
1424 */
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001425 ol_txrx_dbg("Force delete for pdev %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001426 pdev);
1427 ol_txrx_peer_find_hash_erase(pdev);
1428 }
1429
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301430 /* to get flow pool status before freeing descs */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07001431 ol_tx_dump_flow_pool_info((void *)pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001432 ol_tx_free_descs_inuse(pdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301433 ol_tx_deregister_flow_control(pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001434
1435 /*
1436 * ol_tso_seg_list_deinit should happen after
1437 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
1438 * which is being de-initilized in ol_tso_seg_list_deinit
1439 */
1440 ol_tso_seg_list_deinit(pdev);
1441 ol_tso_num_seg_list_deinit(pdev);
1442
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301443 /* Stop the communication between HTT and target at first */
1444 htt_detach_target(pdev->htt_pdev);
1445
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301446 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001447 &pdev->tx_desc.desc_pages, 0, true);
1448 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001449
1450 /* Detach micro controller data path offload resource */
1451 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1452 htt_ipa_uc_detach(pdev->htt_pdev);
1453
1454 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05301455 ol_tx_desc_dup_detect_deinit(pdev);
1456
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301457 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1458 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1459 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001460 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001461#ifdef QCA_SUPPORT_TX_THROTTLE
1462 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301463 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001464#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301465
1466 /* TX flow control for peer who is in very bad link status */
1467 ol_tx_badpeer_flow_cl_deinit(pdev);
1468
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001469 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1470
1471 OL_RX_REORDER_TRACE_DETACH(pdev);
1472 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301473
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001474 /*
1475 * WDI event detach
1476 */
1477 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301478
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001479 ol_txrx_local_peer_id_cleanup(pdev);
1480
1481#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301482 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001483#endif
1484}
1485
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301486/**
1487 * ol_txrx_pdev_detach() - delete the data SW state
1488 * @ppdev - the data physical device object being removed
1489 * @force - delete the pdev (and its vdevs and peers) even if
1490 * there are outstanding references by the target to the vdevs
1491 * and peers within the pdev
1492 *
1493 * This function is used when the WLAN driver is being removed to
1494 * remove the host data component within the driver.
1495 * All virtual devices within the physical device need to be deleted
1496 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1497 *
1498 * Return: None
1499 */
1500static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
1501{
1502 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rakesh Pillai33942c42018-05-09 11:45:38 +05301503 struct ol_txrx_stats_req_internal *req, *temp_req;
tfyu9fcabd72017-09-26 17:46:48 +08001504 int i = 0;
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301505
1506 /*checking to ensure txrx pdev structure is not NULL */
1507 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05301508 ol_txrx_err(
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301509 "NULL pdev passed to %s\n", __func__);
1510 return;
1511 }
1512
1513 htt_pktlogmod_exit(pdev);
1514
tfyu9fcabd72017-09-26 17:46:48 +08001515 qdf_spin_lock_bh(&pdev->req_list_spinlock);
1516 if (pdev->req_list_depth > 0)
1517 ol_txrx_err(
1518 "Warning: the txrx req list is not empty, depth=%d\n",
1519 pdev->req_list_depth
1520 );
Rakesh Pillai33942c42018-05-09 11:45:38 +05301521 TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
tfyu9fcabd72017-09-26 17:46:48 +08001522 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
1523 pdev->req_list_depth--;
1524 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05301525 "%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
tfyu9fcabd72017-09-26 17:46:48 +08001526 i++,
1527 req,
1528 req->base.print.verbose,
1529 req->base.print.concise,
1530 req->base.stats_type_upload_mask,
1531 req->base.stats_type_reset_mask
1532 );
1533 qdf_mem_free(req);
1534 }
1535 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
1536
1537 qdf_spinlock_destroy(&pdev->req_list_spinlock);
Ajit Pal Singh8184e932018-07-25 13:54:13 +05301538 qdf_spinlock_destroy(&pdev->tx_mutex);
tfyu9fcabd72017-09-26 17:46:48 +08001539
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301540 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
1541
1542 if (pdev->cfg.is_high_latency)
1543 ol_tx_sched_detach(pdev);
1544
1545 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
1546
1547 htt_pdev_free(pdev->htt_pdev);
1548 ol_txrx_peer_find_detach(pdev);
1549 ol_txrx_tso_stats_deinit(pdev);
jitiphil335d2412018-06-07 22:49:24 +05301550 ol_txrx_fw_stats_desc_pool_deinit(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301551
1552 ol_txrx_pdev_txq_log_destroy(pdev);
1553 ol_txrx_pdev_grp_stat_destroy(pdev);
Alok Kumarddd457e2018-04-09 13:51:42 +05301554
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301555 ol_txrx_debugfs_exit(pdev);
1556
Alok Kumarddd457e2018-04-09 13:51:42 +05301557 qdf_mem_free(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301558}
1559
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301560#if defined(QCA_HL_NETDEV_FLOW_CONTROL)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301561
1562/**
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301563 * ol_txrx_vdev_per_vdev_tx_desc_init() - initialise per vdev tx desc count
1564 * related variables.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301565 * @vdev: the virtual device object
1566 *
1567 * Return: None
1568 */
1569static inline void
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301570ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301571{
1572 qdf_atomic_init(&vdev->tx_desc_count);
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301573 vdev->tx_desc_limit = 0;
1574 vdev->queue_restart_th = 0;
1575 vdev->prio_q_paused = 0;
1576 vdev->queue_stop_th = 0;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301577}
1578#else
1579
1580static inline void
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301581ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301582{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301583}
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301584#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301585
Dhanashri Atre12a08392016-02-17 13:10:34 -08001586/**
1587 * ol_txrx_vdev_attach - Allocate and initialize the data object
1588 * for a new virtual device.
1589 *
1590 * @data_pdev - the physical device the virtual device belongs to
1591 * @vdev_mac_addr - the MAC address of the virtual device
1592 * @vdev_id - the ID used to identify the virtual device to the target
1593 * @op_mode - whether this virtual device is operating as an AP,
1594 * an IBSS, or a STA
1595 *
1596 * Return: success: handle to new data vdev object, failure: NULL
1597 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001598static struct cdp_vdev *
1599ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001600 uint8_t *vdev_mac_addr,
1601 uint8_t vdev_id, enum wlan_op_mode op_mode)
1602{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001603 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001604 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001605 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001606
1607 /* preconditions */
1608 TXRX_ASSERT2(pdev);
1609 TXRX_ASSERT2(vdev_mac_addr);
1610
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301611 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001612 if (!vdev)
1613 return NULL; /* failure */
1614
1615 /* store provided params */
1616 vdev->pdev = pdev;
1617 vdev->vdev_id = vdev_id;
1618 vdev->opmode = op_mode;
1619
1620 vdev->delete.pending = 0;
1621 vdev->safemode = 0;
1622 vdev->drop_unenc = 1;
1623 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301624 vdev->fwd_tx_packets = 0;
1625 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001626
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301627 ol_txrx_vdev_per_vdev_tx_desc_init(vdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301628
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301629 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001630 OL_TXRX_MAC_ADDR_LEN);
1631
1632 TAILQ_INIT(&vdev->peer_list);
1633 vdev->last_real_peer = NULL;
1634
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001635 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301636
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001637#ifdef QCA_IBSS_SUPPORT
1638 vdev->ibss_peer_num = 0;
1639 vdev->ibss_peer_heart_beat_timer = 0;
1640#endif
1641
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301642 ol_txrx_vdev_txqs_init(vdev);
1643
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301644 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001645 vdev->ll_pause.paused_reason = 0;
1646 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
1647 vdev->ll_pause.txq.depth = 0;
wadesong5e2e8012017-08-21 16:56:03 +08001648 qdf_atomic_init(&vdev->delete.detaching);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301649 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001650 &vdev->ll_pause.timer,
1651 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301652 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301653 qdf_atomic_init(&vdev->os_q_paused);
1654 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001655 vdev->tx_fl_lwm = 0;
1656 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001657 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001658 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05301659 qdf_mem_zero(&vdev->last_peer_mac_addr,
1660 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301661 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001662 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08001663 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001664 vdev->osif_fc_ctx = NULL;
1665
Alok Kumar75355aa2018-03-19 17:32:58 +05301666 vdev->txrx_stats.txack_success = 0;
1667 vdev->txrx_stats.txack_failed = 0;
1668
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001669 /* Default MAX Q depth for every VDEV */
1670 vdev->ll_pause.max_q_depth =
1671 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001672 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001673 /* add this vdev into the pdev's list */
1674 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
1675
Poddar, Siddarth14521792017-03-14 21:19:42 +05301676 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001677 "Created vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001678 vdev,
1679 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1680 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1681 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1682
1683 /*
1684 * We've verified that htt_op_mode == wlan_op_mode,
1685 * so no translation is needed.
1686 */
1687 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
1688
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001689 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001690}
1691
Dhanashri Atre12a08392016-02-17 13:10:34 -08001692/**
1693 *ol_txrx_vdev_register - Link a vdev's data object with the
1694 * matching OS shim vdev object.
1695 *
1696 * @txrx_vdev: the virtual device's data object
1697 * @osif_vdev: the virtual device's OS shim object
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301698 * @ctrl_vdev: UMAC vdev objmgr handle
Dhanashri Atre12a08392016-02-17 13:10:34 -08001699 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
1700 *
1701 * The data object for a virtual device is created by the
1702 * function ol_txrx_vdev_attach. However, rather than fully
1703 * linking the data vdev object with the vdev objects from the
1704 * other subsystems that the data vdev object interacts with,
1705 * the txrx_vdev_attach function focuses primarily on creating
1706 * the data vdev object. After the creation of both the data
1707 * vdev object and the OS shim vdev object, this
1708 * txrx_osif_vdev_attach function is used to connect the two
1709 * vdev objects, so the data SW can use the OS shim vdev handle
1710 * when passing rx data received by a vdev up to the OS shim.
1711 */
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301712static void ol_txrx_vdev_register(struct cdp_vdev *pvdev, void *osif_vdev,
1713 struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
1714 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001715{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001716 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001717
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001718 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
1719 qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
1720 qdf_assert(0);
1721 return;
1722 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001723
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001724 vdev->osif_dev = osif_vdev;
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301725 vdev->ctrl_vdev = ctrl_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001726 vdev->rx = txrx_ops->rx.rx;
Poddar, Siddarth3906e172018-01-09 11:24:58 +05301727 vdev->stats_rx = txrx_ops->rx.stats_rx;
Alok Kumar4696fb02018-06-06 00:10:18 +05301728 vdev->tx_comp = txrx_ops->tx.tx_comp;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001729 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001730}
1731
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001732void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
1733{
1734 vdev->safemode = val;
1735}
1736
Dhanashri Atre12a08392016-02-17 13:10:34 -08001737/**
1738 * ol_txrx_set_privacy_filters - set the privacy filter
1739 * @vdev - the data virtual device object
1740 * @filter - filters to be set
1741 * @num - the number of filters
1742 *
1743 * Rx related. Set the privacy filters. When rx packets, check
1744 * the ether type, filter type and packet type to decide whether
1745 * discard these packets.
1746 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08001747static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001748ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
1749 void *filters, uint32_t num)
1750{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301751 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001752 num * sizeof(struct privacy_exemption));
1753 vdev->num_filters = num;
1754}
1755
1756void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
1757{
1758 vdev->drop_unenc = val;
1759}
1760
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001761#if defined(CONFIG_HL_SUPPORT) || defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
gbian016a42e2017-03-01 18:49:11 +08001762
1763static void
1764ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1765{
1766 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1767 int i;
1768 struct ol_tx_desc_t *tx_desc;
1769
1770 qdf_spin_lock_bh(&pdev->tx_mutex);
1771 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1772 tx_desc = ol_tx_desc_find(pdev, i);
1773 if (tx_desc->vdev == vdev)
1774 tx_desc->vdev = NULL;
1775 }
1776 qdf_spin_unlock_bh(&pdev->tx_mutex);
1777}
1778
1779#else
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001780#ifdef QCA_LL_TX_FLOW_CONTROL_V2
1781static void ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1782{
1783 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1784 struct ol_tx_flow_pool_t *pool;
1785 int i;
1786 struct ol_tx_desc_t *tx_desc;
gbian016a42e2017-03-01 18:49:11 +08001787
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001788 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
1789 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1790 tx_desc = ol_tx_desc_find(pdev, i);
1791 if (!qdf_atomic_read(&tx_desc->ref_cnt))
1792 /* not in use */
1793 continue;
1794
1795 pool = tx_desc->pool;
1796 qdf_spin_lock_bh(&pool->flow_pool_lock);
1797 if (tx_desc->vdev == vdev)
1798 tx_desc->vdev = NULL;
1799 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1800 }
1801 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
1802}
1803
1804#else
gbian016a42e2017-03-01 18:49:11 +08001805static void
1806ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
1807{
gbian016a42e2017-03-01 18:49:11 +08001808}
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -07001809#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1810#endif /* CONFIG_HL_SUPPORT */
gbian016a42e2017-03-01 18:49:11 +08001811
Dhanashri Atre12a08392016-02-17 13:10:34 -08001812/**
1813 * ol_txrx_vdev_detach - Deallocate the specified data virtual
1814 * device object.
1815 * @data_vdev: data object for the virtual device in question
1816 * @callback: function to call (if non-NULL) once the vdev has
1817 * been wholly deleted
1818 * @callback_context: context to provide in the callback
1819 *
1820 * All peers associated with the virtual device need to be deleted
1821 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
1822 * However, for the peers to be fully deleted, the peer deletion has to
1823 * percolate through the target data FW and back up to the host data SW.
1824 * Thus, even though the host control SW may have issued a peer_detach
1825 * call for each of the vdev's peers, the peer objects may still be
1826 * allocated, pending removal of all references to them by the target FW.
1827 * In this case, though the vdev_detach function call will still return
1828 * immediately, the vdev itself won't actually be deleted, until the
1829 * deletions of all its peers complete.
1830 * The caller can provide a callback function pointer to be notified when
1831 * the vdev deletion actually happens - whether it's directly within the
1832 * vdev_detach call, or if it's deferred until all in-progress peer
1833 * deletions have completed.
1834 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08001835static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001836ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001837 ol_txrx_vdev_delete_cb callback, void *context)
1838{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001839 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
wadesong5e2e8012017-08-21 16:56:03 +08001840 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001841
1842 /* preconditions */
1843 TXRX_ASSERT2(vdev);
wadesong5e2e8012017-08-21 16:56:03 +08001844 pdev = vdev->pdev;
1845
1846 /* prevent anyone from restarting the ll_pause timer again */
1847 qdf_atomic_set(&vdev->delete.detaching, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001848
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301849 ol_txrx_vdev_tx_queue_free(vdev);
1850
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301851 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301852 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001853 vdev->ll_pause.is_q_timer_on = false;
1854 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301855 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07001856
Nirav Shahcbc6d722016-03-01 16:24:53 +05301857 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301858 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001859 vdev->ll_pause.txq.head = next;
1860 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301861 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
wadesong5e2e8012017-08-21 16:56:03 +08001862
1863 /* ll_pause timer should be deleted without any locks held, and
1864 * no timer function should be executed after this point because
1865 * qdf_timer_free is deleting the timer synchronously.
1866 */
1867 qdf_timer_free(&vdev->ll_pause.timer);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301868 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001869
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301870 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001871 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08001872 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001873 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301874 qdf_spin_unlock_bh(&vdev->flow_control_lock);
1875 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001876
1877 /* remove the vdev from its parent pdev's list */
1878 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
1879
1880 /*
1881 * Use peer_ref_mutex while accessing peer_list, in case
1882 * a peer is in the process of being removed from the list.
1883 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301884 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001885 /* check that the vdev has no peers allocated */
1886 if (!TAILQ_EMPTY(&vdev->peer_list)) {
1887 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05301888 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001889 "%s: not deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001890 __func__, vdev,
1891 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1892 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1893 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1894 /* indicate that the vdev needs to be deleted */
1895 vdev->delete.pending = 1;
1896 vdev->delete.callback = callback;
1897 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301898 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001899 return;
1900 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301901 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001902 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001903
Poddar, Siddarth14521792017-03-14 21:19:42 +05301904 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001905 "%s: deleting vdev obj %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001906 __func__, vdev,
1907 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1908 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1909 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1910
1911 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
1912
1913 /*
Yun Parkeaea8632017-04-09 09:53:45 -07001914 * The ol_tx_desc_free might access the invalid content of vdev referred
1915 * by tx desc, since this vdev might be detached in another thread
1916 * asynchronous.
1917 *
1918 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
1919 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
1920 * to avoid crash.
1921 *
1922 */
gbian016a42e2017-03-01 18:49:11 +08001923 ol_txrx_tx_desc_reset_vdev(vdev);
1924
1925 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001926 * Doesn't matter if there are outstanding tx frames -
1927 * they will be freed once the target sends a tx completion
1928 * message for them.
1929 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301930 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001931 if (callback)
1932 callback(context);
1933}
1934
1935/**
1936 * ol_txrx_flush_rx_frames() - flush cached rx frames
1937 * @peer: peer
1938 * @drop: set flag to drop frames
1939 *
1940 * Return: None
1941 */
1942void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301943 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001944{
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001945 struct ol_txrx_cached_bufq_t *bufqi;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001946 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301947 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001948 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001949
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301950 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1951 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001952 return;
1953 }
1954
Dhanashri Atre182b0272016-02-17 15:35:07 -08001955 qdf_assert(peer->vdev);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301956 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001957 bufqi = &peer->bufq_info;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001958
Dhanashri Atre50141c52016-04-07 13:15:29 -07001959 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08001960 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001961 else
1962 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301963 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001964
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001965 qdf_spin_lock_bh(&bufqi->bufq_lock);
1966 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001967 typeof(*cache_buf), list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001968 while (!list_empty(&bufqi->cached_bufq)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001969 list_del(&cache_buf->list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001970 bufqi->curr--;
1971 qdf_assert(bufqi->curr >= 0);
1972 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001973 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301974 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001975 } else {
1976 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001977 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301978 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05301979 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001980 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301981 qdf_mem_free(cache_buf);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001982 qdf_spin_lock_bh(&bufqi->bufq_lock);
1983 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001984 typeof(*cache_buf), list);
1985 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001986 bufqi->qdepth_no_thresh = bufqi->curr;
1987 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301988 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001989}
1990
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07001991static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05301992{
1993 uint8_t sta_id;
1994 struct ol_txrx_peer_t *peer;
1995 struct ol_txrx_pdev_t *pdev;
1996
1997 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1998 if (!pdev)
1999 return;
2000
2001 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002002 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2003 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302004 if (!peer)
2005 continue;
2006 ol_txrx_flush_rx_frames(peer, 1);
2007 }
2008}
2009
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302010/* Define short name to use in cds_trigger_recovery */
2011#define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2012
Dhanashri Atre12a08392016-02-17 13:10:34 -08002013/**
Naveen Rawat17c42a82018-02-01 19:18:27 -08002014 * ol_txrx_dump_peer_access_list() - dump peer access list
2015 * @peer: peer handle
2016 *
2017 * This function will dump if any peer debug ids are still accessing peer
2018 *
2019 * Return: None
2020 */
2021static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2022{
2023 u32 i;
2024 u32 pending_ref;
2025
2026 for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2027 pending_ref = qdf_atomic_read(&peer->access_list[i]);
2028 if (pending_ref)
2029 ol_txrx_info_high("id %d pending refs %d",
2030 i, pending_ref);
2031 }
2032}
2033
2034/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08002035 * ol_txrx_peer_attach - Allocate and set up references for a
2036 * data peer object.
2037 * @data_pdev: data physical device object that will indirectly
2038 * own the data_peer object
2039 * @data_vdev - data virtual device object that will directly
2040 * own the data_peer object
2041 * @peer_mac_addr - MAC address of the new peer
2042 *
2043 * When an association with a peer starts, the host's control SW
2044 * uses this function to inform the host data SW.
2045 * The host data SW allocates its own peer object, and stores a
2046 * reference to the control peer object within the data peer object.
2047 * The host data SW also stores a reference to the virtual device
2048 * that the peer is associated with. This virtual device handle is
2049 * used when the data SW delivers rx data frames to the OS shim layer.
2050 * The host data SW returns a handle to the new peer data object,
2051 * so a reference within the control peer object can be set to the
2052 * data peer object.
2053 *
2054 * Return: handle to new data peer object, or NULL if the attach
2055 * fails
2056 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002057static void *
psimha8696f772018-04-03 17:38:38 -07002058ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05302059 struct cdp_ctrl_objmgr_peer *ctrl_peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002060{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002061 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002062 struct ol_txrx_peer_t *peer;
2063 struct ol_txrx_peer_t *temp_peer;
2064 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002065 bool wait_on_deletion = false;
2066 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002067 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302068 bool cmp_wait_mac = false;
2069 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002070
2071 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002072 TXRX_ASSERT2(vdev);
2073 TXRX_ASSERT2(peer_mac_addr);
2074
Dhanashri Atre12a08392016-02-17 13:10:34 -08002075 pdev = vdev->pdev;
2076 TXRX_ASSERT2(pdev);
2077
Abhishek Singh217d9782017-04-28 23:49:11 +05302078 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2079 QDF_MAC_ADDR_SIZE))
2080 cmp_wait_mac = true;
2081
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302082 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002083 /* check for duplicate existing peer */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002084 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2085 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2086 (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302087 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002088 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exists.\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002089 vdev->vdev_id,
2090 peer_mac_addr[0], peer_mac_addr[1],
2091 peer_mac_addr[2], peer_mac_addr[3],
2092 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302093 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002094 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002095 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002096 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302097 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002098 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302099 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002100 return NULL;
2101 }
2102 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302103 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2104 &temp_peer->mac_addr,
2105 &vdev->last_peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302106 ol_txrx_info_high(
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002107 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exists.\n",
Abhishek Singh217d9782017-04-28 23:49:11 +05302108 vdev->vdev_id,
2109 vdev->last_peer_mac_addr.raw[0],
2110 vdev->last_peer_mac_addr.raw[1],
2111 vdev->last_peer_mac_addr.raw[2],
2112 vdev->last_peer_mac_addr.raw[3],
2113 vdev->last_peer_mac_addr.raw[4],
2114 vdev->last_peer_mac_addr.raw[5]);
2115 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2116 vdev->wait_on_peer_id = temp_peer->local_id;
2117 qdf_event_reset(&vdev->wait_delete_comp);
2118 wait_on_deletion = true;
2119 break;
2120 } else {
2121 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2122 ol_txrx_err("peer not found");
2123 return NULL;
2124 }
2125 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002126 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302127 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002128
Abhishek Singh217d9782017-04-28 23:49:11 +05302129 qdf_mem_zero(&vdev->last_peer_mac_addr,
2130 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002131 if (wait_on_deletion) {
2132 /* wait for peer deletion */
Nachiket Kukade0396b732017-11-14 16:35:16 +05302133 rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002134 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002135 if (QDF_STATUS_SUCCESS != rc) {
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002136 ol_txrx_err("error waiting for peer_id(%d) deletion, status %d\n",
Dustin Brown100201e2017-07-10 11:48:40 -07002137 vdev->wait_on_peer_id, (int) rc);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002138 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08002139 ol_txrx_dump_peer_access_list(temp_peer);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002140 wlan_roam_debug_dump_table();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002141 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Dustin Brown100201e2017-07-10 11:48:40 -07002142
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002143 return NULL;
2144 }
2145 }
2146
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302147 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002148 if (!peer)
2149 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002150
2151 /* store provided params */
2152 peer->vdev = vdev;
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05302153 peer->ctrl_peer = peer->ctrl_peer;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302154 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002155 OL_TXRX_MAC_ADDR_LEN);
2156
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302157 ol_txrx_peer_txqs_init(pdev, peer);
2158
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002159 INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302160 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002161 /* add this peer into the vdev's list */
2162 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302163 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002164 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
Frank Liu4362e462018-01-16 11:51:55 +08002165 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2166 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002167 vdev->last_real_peer = peer;
Frank Liu4362e462018-01-16 11:51:55 +08002168 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2169 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002170
2171 peer->rx_opt_proc = pdev->rx_opt_proc;
2172
2173 ol_rx_peer_init(pdev, peer);
2174
2175 /* initialize the peer_id */
2176 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2177 peer->peer_ids[i] = HTT_INVALID_PEER;
2178
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302179 qdf_spinlock_create(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002180 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2181
2182 peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002183
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302184 qdf_atomic_init(&peer->delete_in_progress);
2185 qdf_atomic_init(&peer->flush_in_progress);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302186 qdf_atomic_init(&peer->ref_cnt);
Mohit Khannab7bec722017-11-10 11:43:44 -08002187
2188 for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2189 qdf_atomic_init(&peer->access_list[i]);
2190
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002191 /* keep one reference for attach */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002192 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002193
Mohit Khanna8ee37c62017-08-07 17:15:20 -07002194 /* Set a flag to indicate peer create is pending in firmware */
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002195 qdf_atomic_init(&peer->fw_create_pending);
2196 qdf_atomic_set(&peer->fw_create_pending, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002197
2198 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002199 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2200 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002201
2202 ol_txrx_peer_find_hash_add(pdev, peer);
2203
Mohit Khanna47384bc2016-08-15 15:37:05 -07002204 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002205 "vdev %pK created peer %pK ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Mohit Khanna47384bc2016-08-15 15:37:05 -07002206 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002207 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2208 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2209 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2210 /*
2211 * For every peer MAp message search and set if bss_peer
2212 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002213 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2214 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002215 peer->bss_peer = 1;
2216
2217 /*
2218 * The peer starts in the "disc" state while association is in progress.
2219 * Once association completes, the peer will get updated to "auth" state
2220 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2221 * or else to the "conn" state. For non-open mode, the peer will
2222 * progress to "auth" state once the authentication completes.
2223 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002224 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002225 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002226 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002227
2228#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2229 peer->rssi_dbm = HTT_RSSI_INVALID;
2230#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002231 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2232 !pdev->self_peer) {
2233 pdev->self_peer = peer;
2234 /*
2235 * No Tx in monitor mode, otherwise results in target assert.
2236 * Setting disable_intrabss_fwd to true
2237 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002238 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002239 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002240
2241 ol_txrx_local_peer_id_alloc(pdev, peer);
2242
Leo Chang98726762016-10-28 11:07:18 -07002243 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002244}
2245
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302246#undef PEER_DEL_TIMEOUT
2247
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002248/*
2249 * Discarding tx filter - removes all data frames (disconnected state)
2250 */
2251static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2252{
2253 return A_ERROR;
2254}
2255
2256/*
2257 * Non-autentication tx filter - filters out data frames that are not
2258 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2259 * data frames (connected state)
2260 */
2261static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2262{
2263 return
2264 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2265 tx_msdu_info->htt.info.ethertype ==
2266 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2267}
2268
2269/*
2270 * Pass-through tx filter - lets all data frames through (authenticated state)
2271 */
2272static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2273{
2274 return A_OK;
2275}
2276
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002277/**
2278 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2279 * @peer: handle to peer
2280 *
2281 * returns mac addrs for module which do not know peer type
2282 *
2283 * Return: the mac_addr from peer
2284 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002285static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002286ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002287{
Leo Chang98726762016-10-28 11:07:18 -07002288 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002289
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002290 if (!peer)
2291 return NULL;
2292
2293 return peer->mac_addr.raw;
2294}
2295
Abhishek Singhcfb44482017-03-10 12:42:37 +05302296#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002297/**
2298 * ol_txrx_get_pn_info() - Returns pn info from peer
2299 * @peer: handle to peer
2300 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2301 * @last_pn: return last_rmf_pn value from peer.
2302 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2303 *
2304 * Return: NONE
2305 */
2306void
Leo Chang98726762016-10-28 11:07:18 -07002307ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002308 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2309{
Leo Chang98726762016-10-28 11:07:18 -07002310 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002311 *last_pn_valid = &peer->last_rmf_pn_valid;
2312 *last_pn = &peer->last_rmf_pn;
2313 *rmf_pn_replays = &peer->rmf_pn_replays;
2314}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302315#else
2316void
2317ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2318 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2319{
2320}
2321#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002322
2323/**
2324 * ol_txrx_get_opmode() - Return operation mode of vdev
2325 * @vdev: vdev handle
2326 *
2327 * Return: operation mode.
2328 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002329static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002330{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002331 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002332
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002333 return vdev->opmode;
2334}
2335
2336/**
2337 * ol_txrx_get_peer_state() - Return peer state of peer
2338 * @peer: peer handle
2339 *
2340 * Return: return peer state
2341 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002342static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002343{
Leo Chang98726762016-10-28 11:07:18 -07002344 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002345
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002346 return peer->state;
2347}
2348
2349/**
2350 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2351 * @peer: peer handle
2352 *
2353 * Return: vdev handle from peer
2354 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002355static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002356{
Leo Chang98726762016-10-28 11:07:18 -07002357 ol_txrx_peer_handle peer = ppeer;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002358
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002359 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002360}
2361
2362/**
2363 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2364 * @vdev: vdev handle
2365 *
2366 * Return: vdev mac address
2367 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002368static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002369ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002370{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002371 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002372
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002373 if (!vdev)
2374 return NULL;
2375
2376 return vdev->mac_addr.raw;
2377}
2378
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002379#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002380/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002381 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002382 * vdev
2383 * @vdev: vdev handle
2384 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002385 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002386 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002387struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002388ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2389{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002390 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002391}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002392#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002393
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002394#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002395/**
2396 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2397 * @vdev: vdev handle
2398 *
2399 * Return: Handle to pdev
2400 */
2401ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2402{
2403 return vdev->pdev;
2404}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002405#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002406
2407/**
2408 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2409 * @vdev: vdev handle
2410 *
2411 * Return: Handle to control pdev
2412 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002413static struct cdp_cfg *
2414ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002415{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002416 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002417
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002418 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002419}
2420
2421/**
2422 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2423 * @vdev: vdev handle
2424 *
2425 * Return: Rx Fwd disabled status
2426 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002427static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002428ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002429{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002430 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002431 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2432 vdev->pdev->ctrl_pdev;
2433 return cfg->rx_fwd_disabled;
2434}
2435
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002436#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002437/**
2438 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2439 * @vdev: vdev handle
2440 * @peer_num_delta: peer nums to be adjusted
2441 *
2442 * Return: -1 for failure or total peer nums after adjustment.
2443 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002444static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002445ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002446 int16_t peer_num_delta)
2447{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002448 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002449 int16_t new_peer_num;
2450
2451 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07002452 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002453 return OL_TXRX_INVALID_NUM_PEERS;
2454
2455 vdev->ibss_peer_num = new_peer_num;
2456
2457 return new_peer_num;
2458}
2459
2460/**
2461 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2462 * beat timer
2463 * @vdev: vdev handle
2464 * @timer_value_sec: new heart beat timer value
2465 *
2466 * Return: Old timer value set in vdev.
2467 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002468static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2469 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002470{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002471 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002472 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2473
2474 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2475
2476 return old_timer_value;
2477}
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002478#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002479
2480/**
2481 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
2482 * @vdev: vdev handle
2483 * @callback: callback function to remove the peer.
2484 * @callback_context: handle for callback function
2485 * @remove_last_peer: Does it required to last peer.
2486 *
2487 * Return: NONE
2488 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002489static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002490ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002491 ol_txrx_vdev_peer_remove_cb callback,
2492 void *callback_context, bool remove_last_peer)
2493{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002494 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002495 ol_txrx_peer_handle peer, temp;
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002496 int self_removed = 0;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002497 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002498 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002499
2500 temp = NULL;
2501 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
2502 peer_list_elem) {
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05302503 if (qdf_atomic_read(&peer->delete_in_progress))
2504 continue;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002505 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002506 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
Poddar, Siddarth3f97e3d2017-12-18 15:11:13 +05302507 callback(callback_context, temp->mac_addr.raw,
Jiachao Wu641760e2018-01-21 12:11:31 +08002508 vdev->vdev_id, temp);
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002509 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002510 }
2511 /* self peer is deleted last */
2512 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002513 self_removed = 1;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002514 break;
Yun Parkeaea8632017-04-09 09:53:45 -07002515 }
2516 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002517 }
2518
Mohit Khanna137b97d2016-04-21 16:11:33 -07002519 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2520
Orhan K AKYILDIZecf401c2017-04-28 14:10:27 -07002521 if (self_removed)
2522 ol_txrx_info("%s: self peer removed by caller ",
2523 __func__);
2524
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002525 if (remove_last_peer) {
2526 /* remove IBSS bss peer last */
2527 peer = TAILQ_FIRST(&vdev->peer_list);
2528 callback(callback_context, (uint8_t *) &vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08002529 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002530 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002531}
2532
2533/**
2534 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
2535 * @vdev: vdev handle
2536 * @callback: callback function to remove the peer.
2537 * @callback_context: handle for callback function
2538 *
2539 * Return: NONE
2540 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002541static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002542ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002543 ol_txrx_vdev_peer_remove_cb callback,
2544 void *callback_context)
2545{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002546 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002547 ol_txrx_peer_handle peer = NULL;
Jiachao Wu641760e2018-01-21 12:11:31 +08002548 ol_txrx_peer_handle tmp_peer = NULL;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002549
Jiachao Wu641760e2018-01-21 12:11:31 +08002550 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, peer_list_elem, tmp_peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302551 ol_txrx_info_high(
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002552 "%s: peer found for vdev id %d. deleting the peer",
2553 __func__, vdev->vdev_id);
2554 callback(callback_context, (uint8_t *)&vdev->mac_addr,
Jiachao Wu641760e2018-01-21 12:11:31 +08002555 vdev->vdev_id, peer);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002556 }
2557}
2558
Nirav Shah575282c2018-07-08 22:48:00 +05302559#ifdef WLAN_FEATURE_DSRC
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002560/**
2561 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2562 * @vdev: vdev handle
2563 * @ocb_set_chan: OCB channel information to be set in vdev.
2564 *
2565 * Return: NONE
2566 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002567static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002568 struct ol_txrx_ocb_set_chan ocb_set_chan)
2569{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002570 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002571
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002572 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2573 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2574}
2575
2576/**
2577 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2578 * @vdev: vdev handle
2579 *
2580 * Return: handle to struct ol_txrx_ocb_chan_info
2581 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002582static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002583ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002584{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002585 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07002586
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002587 return vdev->ocb_channel_info;
2588}
Nirav Shah575282c2018-07-08 22:48:00 +05302589#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002590
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002591/**
2592 * @brief specify the peer's authentication state
2593 * @details
2594 * Specify the peer's authentication state (none, connected, authenticated)
2595 * to allow the data SW to determine whether to filter out invalid data frames.
2596 * (In the "connected" state, where security is enabled, but authentication
2597 * has not completed, tx and rx data frames other than EAPOL or WAPI should
2598 * be discarded.)
2599 * This function is only relevant for systems in which the tx and rx filtering
2600 * are done in the host rather than in the target.
2601 *
2602 * @param data_peer - which peer has changed its state
2603 * @param state - the new state of the peer
2604 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002605 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002606 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002607QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002608 uint8_t *peer_mac,
2609 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002610{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002611 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002612 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002613 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002614
Anurag Chouhanc5548422016-02-24 18:33:27 +05302615 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302616 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05302617 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302618 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002619 }
2620
Mohit Khannab7bec722017-11-10 11:43:44 -08002621 peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2622 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002623 if (NULL == peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302624 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302625 "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
2626 __func__,
2627 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2628 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302629 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002630 }
2631
2632 /* TODO: Should we send WMI command of the connection state? */
2633 /* avoid multiple auth state change. */
2634 if (peer->state == state) {
2635#ifdef TXRX_PRINT_VERBOSE_ENABLE
Poddar, Siddarth14521792017-03-14 21:19:42 +05302636 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002637 "%s: no state change, returns directly\n",
2638 __func__);
2639#endif
Mohit Khannab7bec722017-11-10 11:43:44 -08002640 peer_ref_cnt = ol_txrx_peer_release_ref
2641 (peer,
2642 PEER_DEBUG_ID_OL_INTERNAL);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302643 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002644 }
2645
Poddar, Siddarth14521792017-03-14 21:19:42 +05302646 ol_txrx_dbg("%s: change from %d to %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002647 __func__, peer->state, state);
2648
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002649 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002650 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002651 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002652 ? ol_tx_filter_non_auth
2653 : ol_tx_filter_discard);
2654
2655 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002656 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002657 int tid;
2658 /*
2659 * Pause all regular (non-extended) TID tx queues until
2660 * data arrives and ADDBA negotiation has completed.
2661 */
Poddar, Siddarth14521792017-03-14 21:19:42 +05302662 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002663 "%s: pause peer and unpause mgmt/non-qos\n",
2664 __func__);
2665 ol_txrx_peer_pause(peer); /* pause all tx queues */
2666 /* unpause mgmt and non-QoS tx queues */
2667 for (tid = OL_TX_NUM_QOS_TIDS;
2668 tid < OL_TX_NUM_TIDS; tid++)
2669 ol_txrx_peer_tid_unpause(peer, tid);
2670 }
2671 }
Mohit Khannab7bec722017-11-10 11:43:44 -08002672 peer_ref_cnt = ol_txrx_peer_release_ref(peer,
2673 PEER_DEBUG_ID_OL_INTERNAL);
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002674 /*
Mohit Khannab7bec722017-11-10 11:43:44 -08002675 * after ol_txrx_peer_release_ref, peer object cannot be accessed
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002676 * if the return code was 0
2677 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08002678 if (peer_ref_cnt > 0)
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002679 /*
2680 * Set the state after the Pause to avoid the race condiction
2681 * with ADDBA check in tx path
2682 */
2683 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302684 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002685}
2686
2687void
2688ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
2689{
2690 peer->keyinstalled = val;
2691}
2692
2693void
2694ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
2695 uint8_t *peer_mac,
2696 union ol_txrx_peer_update_param_t *param,
2697 enum ol_txrx_peer_update_select_t select)
2698{
2699 struct ol_txrx_peer_t *peer;
2700
Mohit Khannab7bec722017-11-10 11:43:44 -08002701 peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
2702 PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002703 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302704 ol_txrx_dbg("%s: peer is null",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002705 __func__);
2706 return;
2707 }
2708
2709 switch (select) {
2710 case ol_txrx_peer_update_qos_capable:
2711 {
2712 /* save qos_capable here txrx peer,
2713 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
2714 */
2715 peer->qos_capable = param->qos_capable;
2716 /*
2717 * The following function call assumes that the peer has a
2718 * single ID. This is currently true, and
2719 * is expected to remain true.
2720 */
2721 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
2722 peer->peer_ids[0],
2723 peer->qos_capable);
2724 break;
2725 }
2726 case ol_txrx_peer_update_uapsdMask:
2727 {
2728 peer->uapsd_mask = param->uapsd_mask;
2729 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
2730 peer->peer_ids[0],
2731 peer->uapsd_mask);
2732 break;
2733 }
2734 case ol_txrx_peer_update_peer_security:
2735 {
2736 enum ol_sec_type sec_type = param->sec_type;
2737 enum htt_sec_type peer_sec_type = htt_sec_type_none;
2738
2739 switch (sec_type) {
2740 case ol_sec_type_none:
2741 peer_sec_type = htt_sec_type_none;
2742 break;
2743 case ol_sec_type_wep128:
2744 peer_sec_type = htt_sec_type_wep128;
2745 break;
2746 case ol_sec_type_wep104:
2747 peer_sec_type = htt_sec_type_wep104;
2748 break;
2749 case ol_sec_type_wep40:
2750 peer_sec_type = htt_sec_type_wep40;
2751 break;
2752 case ol_sec_type_tkip:
2753 peer_sec_type = htt_sec_type_tkip;
2754 break;
2755 case ol_sec_type_tkip_nomic:
2756 peer_sec_type = htt_sec_type_tkip_nomic;
2757 break;
2758 case ol_sec_type_aes_ccmp:
2759 peer_sec_type = htt_sec_type_aes_ccmp;
2760 break;
2761 case ol_sec_type_wapi:
2762 peer_sec_type = htt_sec_type_wapi;
2763 break;
2764 default:
2765 peer_sec_type = htt_sec_type_none;
2766 break;
2767 }
2768
2769 peer->security[txrx_sec_ucast].sec_type =
2770 peer->security[txrx_sec_mcast].sec_type =
2771 peer_sec_type;
2772
2773 break;
2774 }
2775 default:
2776 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05302777 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002778 "ERROR: unknown param %d in %s", select,
2779 __func__);
2780 break;
2781 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002782 } /* switch */
Mohit Khannab7bec722017-11-10 11:43:44 -08002783 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002784}
2785
2786uint8_t
2787ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2788{
2789
2790 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07002791
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002792 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
2793 if (peer)
2794 return peer->uapsd_mask;
2795 return 0;
2796}
2797
2798uint8_t
2799ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2800{
2801
2802 struct ol_txrx_peer_t *peer_t =
2803 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
2804 if (peer_t != NULL)
2805 return peer_t->qos_capable;
2806 return 0;
2807}
2808
Mohit Khannab7bec722017-11-10 11:43:44 -08002809/**
Mohit Khannab7bec722017-11-10 11:43:44 -08002810 * ol_txrx_peer_free_tids() - free tids for the peer
2811 * @peer: peer handle
2812 *
2813 * Return: None
2814 */
2815static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
2816{
2817 int i = 0;
2818 /*
2819 * 'array' is allocated in addba handler and is supposed to be
2820 * freed in delba handler. There is the case (for example, in
2821 * SSR) where delba handler is not called. Because array points
2822 * to address of 'base' by default and is reallocated in addba
2823 * handler later, only free the memory when the array does not
2824 * point to base.
2825 */
2826 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
2827 if (peer->tids_rx_reorder[i].array !=
2828 &peer->tids_rx_reorder[i].base) {
2829 ol_txrx_dbg(
2830 "%s, delete reorder arr, tid:%d\n",
2831 __func__, i);
2832 qdf_mem_free(peer->tids_rx_reorder[i].array);
2833 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
2834 (uint8_t)i);
2835 }
2836 }
2837}
2838
2839/**
2840 * ol_txrx_peer_release_ref() - release peer reference
2841 * @peer: peer handle
2842 *
2843 * Release peer reference and delete peer if refcount is 0
2844 *
wadesong9f2b1102017-12-20 22:58:35 +08002845 * Return: Resulting peer ref_cnt after this function is invoked
Mohit Khannab7bec722017-11-10 11:43:44 -08002846 */
2847int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
2848 enum peer_debug_id_type debug_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002849{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002850 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002851 struct ol_txrx_vdev_t *vdev;
2852 struct ol_txrx_pdev_t *pdev;
Jingxiang Ge3badb982018-01-02 17:39:01 +08002853 bool ref_silent = false;
Jingxiang Ge190679b2018-01-30 08:56:19 +08002854 int access_list = 0;
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002855 uint32_t err_code = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002856
2857 /* preconditions */
2858 TXRX_ASSERT2(peer);
2859
2860 vdev = peer->vdev;
2861 if (NULL == vdev) {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002862 ol_txrx_err("The vdev is not present anymore\n");
Amar Singhal7ef59092018-09-11 15:32:35 -07002863 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002864 }
2865
2866 pdev = vdev->pdev;
2867 if (NULL == pdev) {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002868 ol_txrx_err("The pdev is not present anymore\n");
2869 err_code = 0xbad2;
2870 goto ERR_STATE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002871 }
2872
Mohit Khannab7bec722017-11-10 11:43:44 -08002873 if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
2874 ol_txrx_err("incorrect debug_id %d ", debug_id);
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002875 err_code = 0xbad3;
2876 goto ERR_STATE;
Mohit Khannab7bec722017-11-10 11:43:44 -08002877 }
2878
Jingxiang Ge3badb982018-01-02 17:39:01 +08002879 if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
2880 ref_silent = true;
2881
2882 if (!ref_silent)
2883 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
2884 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07002885 peer, 0xdead,
Jingxiang Ge3badb982018-01-02 17:39:01 +08002886 qdf_atomic_read(&peer->ref_cnt));
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002887
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002888
2889 /*
2890 * Hold the lock all the way from checking if the peer ref count
2891 * is zero until the peer references are removed from the hash
2892 * table and vdev list (if the peer ref count is zero).
2893 * This protects against a new HL tx operation starting to use the
2894 * peer object just after this function concludes it's done being used.
2895 * Furthermore, the lock needs to be held while checking whether the
2896 * vdev's list of peers is empty, to make sure that list is not modified
2897 * concurrently with the empty check.
2898 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302899 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07002900
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002901 /*
2902 * Check for the reference count before deleting the peer
2903 * as we noticed that sometimes we are re-entering this
2904 * function again which is leading to dead-lock.
2905 * (A double-free should never happen, so assert if it does.)
2906 */
2907 rc = qdf_atomic_read(&(peer->ref_cnt));
2908
2909 if (rc == 0) {
2910 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2911 ol_txrx_err("The Peer is not present anymore\n");
2912 qdf_assert(0);
2913 return -EACCES;
2914 }
2915 /*
2916 * now decrement rc; this will be the return code.
2917 * 0 : peer deleted
2918 * >0: peer ref removed, but still has other references
2919 * <0: sanity failed - no changes to the state of the peer
2920 */
2921 rc--;
2922
Mohit Khannab7bec722017-11-10 11:43:44 -08002923 if (!qdf_atomic_read(&peer->access_list[debug_id])) {
2924 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
jitiphil8ad8a6f2018-03-01 23:45:05 +05302925 ol_txrx_err("peer %pK ref was not taken by %d",
Mohit Khannab7bec722017-11-10 11:43:44 -08002926 peer, debug_id);
2927 ol_txrx_dump_peer_access_list(peer);
2928 QDF_BUG(0);
2929 return -EACCES;
2930 }
Mohit Khannab7bec722017-11-10 11:43:44 -08002931 qdf_atomic_dec(&peer->access_list[debug_id]);
2932
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07002933 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Mohit Khannab7bec722017-11-10 11:43:44 -08002934 u16 peer_id;
Deepak Dhamdheref918d422017-07-06 12:56:29 -07002935 wlan_roam_debug_log(vdev->vdev_id,
2936 DEBUG_DELETING_PEER_OBJ,
2937 DEBUG_INVALID_PEER_ID,
2938 &peer->mac_addr.raw, peer, 0,
2939 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002940 peer_id = peer->local_id;
2941 /* remove the reference to the peer from the hash table */
2942 ol_txrx_peer_find_hash_remove(pdev, peer);
2943
2944 /* remove the peer from its parent vdev's list */
2945 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
2946
2947 /* cleanup the Rx reorder queues for this peer */
2948 ol_rx_peer_cleanup(vdev, peer);
2949
Jingxiang Ge3badb982018-01-02 17:39:01 +08002950 qdf_spinlock_destroy(&peer->peer_info_lock);
2951 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
2952
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002953 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302954 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002955
2956 /*
2957 * Set wait_delete_comp event if the current peer id matches
2958 * with registered peer id.
2959 */
2960 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05302961 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002962 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2963 }
2964
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002965 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
2966 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08002967
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002968 /* check whether the parent vdev has no peers left */
2969 if (TAILQ_EMPTY(&vdev->peer_list)) {
2970 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002971 * Check if the parent vdev was waiting for its peers
2972 * to be deleted, in order for it to be deleted too.
2973 */
2974 if (vdev->delete.pending) {
2975 ol_txrx_vdev_delete_cb vdev_delete_cb =
2976 vdev->delete.callback;
2977 void *vdev_delete_context =
2978 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05302979 /*
2980 * Now that there are no references to the peer,
2981 * we can release the peer reference lock.
2982 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302983 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05302984
gbian016a42e2017-03-01 18:49:11 +08002985 /*
Yun Parkeaea8632017-04-09 09:53:45 -07002986 * The ol_tx_desc_free might access the invalid
2987 * content of vdev referred by tx desc, since
2988 * this vdev might be detached in another thread
2989 * asynchronous.
2990 *
2991 * Go through tx desc pool to set corresponding
2992 * tx desc's vdev to NULL when detach this vdev,
2993 * and add vdev checking in the ol_tx_desc_free
2994 * to avoid crash.
2995 */
gbian016a42e2017-03-01 18:49:11 +08002996 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05302997 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002998 "%s: deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
Yun Parkeaea8632017-04-09 09:53:45 -07002999 __func__, vdev,
3000 vdev->mac_addr.raw[0],
3001 vdev->mac_addr.raw[1],
3002 vdev->mac_addr.raw[2],
3003 vdev->mac_addr.raw[3],
3004 vdev->mac_addr.raw[4],
3005 vdev->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003006 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303007 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003008 if (vdev_delete_cb)
3009 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303010 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303011 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003012 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303013 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303014 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303015 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003016
jitiphil8ad8a6f2018-03-01 23:45:05 +05303017 ol_txrx_info_high("[%d][%d]: Deleting peer %pK ref_cnt -> %d %s",
Mohit Khannab7bec722017-11-10 11:43:44 -08003018 debug_id,
3019 qdf_atomic_read(&peer->access_list[debug_id]),
3020 peer, rc,
3021 qdf_atomic_read(&peer->fw_create_pending)
3022 == 1 ?
3023 "(No Maps received)" : "");
Mohit Khanna8ee37c62017-08-07 17:15:20 -07003024
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303025 ol_txrx_peer_tx_queue_free(pdev, peer);
3026
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -07003027 /* Remove mappings from peer_id to peer object */
3028 ol_txrx_peer_clear_map_peer(pdev, peer);
3029
wadesong9f2b1102017-12-20 22:58:35 +08003030 /* Remove peer pointer from local peer ID map */
3031 ol_txrx_local_peer_id_free(pdev, peer);
3032
Mohit Khannab7bec722017-11-10 11:43:44 -08003033 ol_txrx_peer_free_tids(peer);
3034
3035 ol_txrx_dump_peer_access_list(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003036
Alok Kumar8e178242018-06-15 12:49:57 +05303037 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003038 } else {
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003039 access_list = qdf_atomic_read(&peer->access_list[debug_id]);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303040 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Jingxiang Ge3badb982018-01-02 17:39:01 +08003041 if (!ref_silent)
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003042 ol_txrx_info_high("[%d][%d]: ref delete peer %pK ref_cnt -> %d",
3043 debug_id,
3044 access_list,
3045 peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003046 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003047 return rc;
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003048ERR_STATE:
3049 wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3050 DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3051 peer, err_code, qdf_atomic_read(&peer->ref_cnt));
3052 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003053}
3054
Dhanashri Atre12a08392016-02-17 13:10:34 -08003055/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003056 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3057 * @peer: pointer to ol txrx peer structure
3058 *
3059 * Return: QDF Status
3060 */
3061static QDF_STATUS
3062ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3063{
3064 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3065 /* Drop pending Rx frames in CDS */
3066 if (sched_ctx)
3067 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3068
3069 /* Purge the cached rx frame queue */
3070 ol_txrx_flush_rx_frames(peer, 1);
3071
3072 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003073 peer->state = OL_TXRX_PEER_STATE_DISC;
3074 qdf_spin_unlock_bh(&peer->peer_info_lock);
3075
3076 return QDF_STATUS_SUCCESS;
3077}
3078
3079/**
3080 * ol_txrx_clear_peer() - clear peer
3081 * @sta_id: sta id
3082 *
3083 * Return: QDF Status
3084 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003085static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003086{
3087 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003088 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Zhu Jianmin99523042018-06-06 20:01:44 +08003089 QDF_STATUS status;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003090
3091 if (!pdev) {
Zhu Jianmin99523042018-06-06 20:01:44 +08003092 ol_txrx_err("Unable to find pdev!");
Mohit Khanna0696eef2016-04-14 16:14:08 -07003093 return QDF_STATUS_E_FAILURE;
3094 }
3095
3096 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303097 ol_txrx_err("Invalid sta id %d", sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003098 return QDF_STATUS_E_INVAL;
3099 }
3100
Zhu Jianmin99523042018-06-06 20:01:44 +08003101 peer = ol_txrx_peer_get_ref_by_local_id(ppdev, sta_id,
3102 PEER_DEBUG_ID_OL_INTERNAL);
Kabilan Kannanfa163982018-01-30 12:03:41 -08003103
3104 /* Return success, if the peer is already cleared by
3105 * data path via peer detach function.
3106 */
Mohit Khanna0696eef2016-04-14 16:14:08 -07003107 if (!peer)
Kabilan Kannanfa163982018-01-30 12:03:41 -08003108 return QDF_STATUS_SUCCESS;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003109
Zhu Jianmin99523042018-06-06 20:01:44 +08003110 ol_txrx_dbg("Clear peer rx frames: " QDF_MAC_ADDR_STR,
3111 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw));
3112 ol_txrx_clear_peer_internal(peer);
3113 status = ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003114
Zhu Jianmin99523042018-06-06 20:01:44 +08003115 return status;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003116}
3117
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003118void peer_unmap_timer_work_function(void *param)
3119{
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003120 WMA_LOGI("Enter: %s", __func__);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003121 /* Added for debugging only */
Naveen Rawat17c42a82018-02-01 19:18:27 -08003122 ol_txrx_dump_peer_access_list(param);
Deepak Dhamdheref918d422017-07-06 12:56:29 -07003123 wlan_roam_debug_dump_table();
Anurag Chouhan4085ff72017-10-05 18:09:56 +05303124 cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003125}
3126
Mohit Khanna0696eef2016-04-14 16:14:08 -07003127/**
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003128 * peer_unmap_timer_handler() - peer unmap timer function
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003129 * @data: peer object pointer
3130 *
3131 * Return: none
3132 */
3133void peer_unmap_timer_handler(void *data)
3134{
3135 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003136 ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003137
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003138 ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003139 peer, qdf_atomic_read(&peer->ref_cnt));
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003140 ol_txrx_err("peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003141 peer,
3142 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3143 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3144 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Nachiket Kukadea48fd772017-07-28 18:48:57 +05303145 if (!cds_is_driver_recovering() && !cds_is_fw_down()) {
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003146 qdf_create_work(0, &txrx_pdev->peer_unmap_timer_work,
3147 peer_unmap_timer_work_function,
Naveen Rawat17c42a82018-02-01 19:18:27 -08003148 peer);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003149 qdf_sched_work(0, &txrx_pdev->peer_unmap_timer_work);
Deepak Dhamdhered42ab7c2017-04-13 19:32:16 -07003150 } else {
3151 ol_txrx_err("Recovery is in progress, ignore!");
3152 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003153}
3154
3155
3156/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003157 * ol_txrx_peer_detach() - Delete a peer's data object.
3158 * @peer - the object to detach
Naveen Rawatf4ada152017-09-05 14:56:12 -07003159 * @bitmap - bitmap indicating special handling of request.
Dhanashri Atre12a08392016-02-17 13:10:34 -08003160 *
3161 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003162 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003163 * stored in the control peer object to the data peer
3164 * object (set up by a call to ol_peer_store()) is provided.
3165 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003166 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003167 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003168static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003169{
Leo Chang98726762016-10-28 11:07:18 -07003170 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003171 struct ol_txrx_vdev_t *vdev = peer->vdev;
3172
3173 /* redirect peer's rx delivery function to point to a discard func */
3174 peer->rx_opt_proc = ol_rx_discard;
3175
3176 peer->valid = 0;
3177
Mohit Khanna0696eef2016-04-14 16:14:08 -07003178 /* flush all rx packets before clearing up the peer local_id */
3179 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003180
3181 /* debug print to dump rx reorder state */
3182 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3183
Abhinav Kumar50d4dc72018-06-15 16:35:50 +05303184 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003185 "%s:peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003186 __func__, peer,
3187 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3188 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3189 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003190
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303191 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003192 if (vdev->last_real_peer == peer)
3193 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303194 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003195 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3196
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003197 /*
3198 * set delete_in_progress to identify that wma
3199 * is waiting for unmap massage for this peer
3200 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303201 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003202
Lin Bai973e6922018-01-08 17:59:19 +08003203 if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
Naveen Rawatf4ada152017-09-05 14:56:12 -07003204 if (vdev->opmode == wlan_op_mode_sta) {
3205 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3206 &peer->mac_addr,
3207 sizeof(union ol_txrx_align_mac_addr_t));
Abhishek Singh217d9782017-04-28 23:49:11 +05303208
Lin Bai973e6922018-01-08 17:59:19 +08003209 /*
3210 * Create a timer to track unmap events when the
3211 * sta peer gets deleted.
3212 */
Naveen Rawatf4ada152017-09-05 14:56:12 -07003213 qdf_timer_start(&peer->peer_unmap_timer,
3214 OL_TXRX_PEER_UNMAP_TIMEOUT);
Mohit Khannab7bec722017-11-10 11:43:44 -08003215 ol_txrx_info_high
3216 ("started peer_unmap_timer for peer %pK",
3217 peer);
Naveen Rawatf4ada152017-09-05 14:56:12 -07003218 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003219 }
3220
3221 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003222 * Remove the reference added during peer_attach.
3223 * The peer will still be left allocated until the
3224 * PEER_UNMAP message arrives to remove the other
3225 * reference, added by the PEER_MAP message.
3226 */
Manjunathappa Prakash1253c3d2018-08-22 15:52:14 -07003227 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003228}
3229
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003230/**
3231 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
Lin Bai973e6922018-01-08 17:59:19 +08003232 * @ppeer - the object to detach
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003233 *
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003234 * Detach a peer and force peer object to be removed. It is called during
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003235 * roaming scenario when the firmware has already deleted a peer.
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003236 * Remove it from the peer_id_to_object map. Peer object is actually freed
3237 * when last reference is deleted.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003238 *
3239 * Return: None
3240 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003241static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003242{
Leo Chang98726762016-10-28 11:07:18 -07003243 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003244 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3245
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003246 ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d",
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003247 __func__, peer, qdf_atomic_read(&peer->ref_cnt));
3248
3249 /* Clear the peer_id_to_obj map entries */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003250 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
Lin Bai973e6922018-01-08 17:59:19 +08003251 ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003252}
3253
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003254/**
3255 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3256 * @txrx_pdev: Pointer to txrx pdev
3257 *
3258 * Return: none
3259 */
3260static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3261{
3262 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003263 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003264
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303265 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3266 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3267 else
3268 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003269
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003270 num_free = ol_tx_get_total_free_desc(pdev);
3271
Kapil Gupta53d9b572017-06-28 17:53:25 +05303272 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303273 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003274 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003275
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003276}
3277
3278/**
3279 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3280 * @timeout: timeout in ms
3281 *
3282 * Wait for tx queue to be empty, return timeout error if
3283 * queue doesn't empty before timeout occurs.
3284 *
3285 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303286 * QDF_STATUS_SUCCESS if the queue empties,
3287 * QDF_STATUS_E_TIMEOUT in case of timeout,
3288 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003289 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003290static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003291{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003292 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003293
3294 if (txrx_pdev == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303295 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003296 "%s: txrx context is null", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303297 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003298 }
3299
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003300 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303301 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003302 if (timeout <= 0) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303303 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303304 "%s: tx frames are pending", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003305 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303306 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003307 }
3308 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3309 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303310 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003311}
3312
3313#ifndef QCA_WIFI_3_0_EMU
Himanshu Agarwal83a87572017-05-25 14:09:50 +05303314#define SUSPEND_DRAIN_WAIT 500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003315#else
3316#define SUSPEND_DRAIN_WAIT 3000
3317#endif
3318
Yue Ma1e11d792016-02-26 18:58:44 -08003319#ifdef FEATURE_RUNTIME_PM
3320/**
3321 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3322 * @txrx_pdev: TXRX pdev context
3323 *
3324 * TXRX is ready to runtime suspend if there are no pending packets
3325 * in the tx queue.
3326 *
3327 * Return: QDF_STATUS
3328 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003329static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003330{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003331 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003332
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003333 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003334 return QDF_STATUS_E_BUSY;
3335 else
3336 return QDF_STATUS_SUCCESS;
3337}
3338
3339/**
3340 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3341 * @txrx_pdev: TXRX pdev context
3342 *
3343 * This is a dummy function for symmetry.
3344 *
3345 * Return: QDF_STATUS_SUCCESS
3346 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003347static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003348{
3349 return QDF_STATUS_SUCCESS;
3350}
3351#endif
3352
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003353/**
3354 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003355 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003356 *
3357 * Ensure that ol_txrx is ready for bus suspend
3358 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303359 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003360 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003361static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003362{
3363 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3364}
3365
3366/**
3367 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003368 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003369 *
3370 * Dummy function for symetry
3371 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303372 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003373 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003374static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003375{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303376 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003377}
3378
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003379/**
3380 * ol_txrx_get_tx_pending - Get the number of pending transmit
3381 * frames that are awaiting completion.
3382 *
3383 * @pdev - the data physical device object
3384 * Mainly used in clean up path to make sure all buffers have been freed
3385 *
3386 * Return: count of pending frames
3387 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003388int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003389{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003390 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003391 uint32_t total;
3392
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303393 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3394 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3395 else
3396 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003397
Nirav Shah55b45a02016-01-21 10:00:16 +05303398 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003399}
3400
3401void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3402{
3403 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07003404 /*
3405 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303406 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07003407 * which is tha same with normal data send complete path
3408 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003409 htt_tx_pending_discard(pdev_handle->htt_pdev);
3410
3411 TAILQ_INIT(&tx_descs);
3412 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3413 /* Discard Frames in Discard List */
3414 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3415
3416 ol_tx_discard_target_frms(pdev_handle);
3417}
3418
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003419static inline
3420uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3421{
3422 return (uint64_t) ((size_t) req);
3423}
3424
3425static inline
3426struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3427{
3428 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3429}
3430
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003431#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003432void
3433ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3434 uint8_t cfg_stats_type, uint32_t cfg_val)
3435{
jitiphil335d2412018-06-07 22:49:24 +05303436 uint8_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07003437
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003438 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3439 0 /* reset mask */,
3440 cfg_stats_type, cfg_val, dummy_cookie);
3441}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003442#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003443
jitiphil335d2412018-06-07 22:49:24 +05303444/**
3445 * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
3446 * @pdev: handle to ol txrx pdev
3447 * @pool_size: Size of fw stats descriptor pool
3448 *
3449 * Return: 0 for success, error code on failure.
3450 */
3451int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
3452 uint8_t pool_size)
3453{
3454 int i;
3455
3456 if (!pdev) {
3457 ol_txrx_err("%s: pdev is NULL", __func__);
3458 return -EINVAL;
3459 }
3460 pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
3461 sizeof(struct ol_txrx_fw_stats_desc_elem_t));
3462 if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
3463 ol_txrx_err("%s: failed to allocate desc pool", __func__);
3464 return -ENOMEM;
3465 }
3466 pdev->ol_txrx_fw_stats_desc_pool.freelist =
3467 &pdev->ol_txrx_fw_stats_desc_pool.pool[0];
3468 pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
3469
3470 for (i = 0; i < (pool_size - 1); i++) {
3471 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3472 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3473 pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
3474 &pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
3475 }
3476 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3477 pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3478 pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
3479 qdf_spinlock_create(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3480 qdf_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
3481 qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
3482 return 0;
3483}
3484
3485/**
3486 * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
3487 * fw stats descriptor pool
3488 * @pdev: handle to ol txrx pdev
3489 *
3490 * Return: None
3491 */
3492void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
3493{
jitiphil335d2412018-06-07 22:49:24 +05303494 if (!pdev) {
3495 ol_txrx_err("%s: pdev is NULL", __func__);
3496 return;
3497 }
3498 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3499 ol_txrx_err("%s: Pool is not initialized", __func__);
3500 return;
3501 }
3502 if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
3503 ol_txrx_err("%s: Pool is not allocated", __func__);
3504 return;
3505 }
3506 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3507 qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
jitiphil335d2412018-06-07 22:49:24 +05303508 qdf_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
3509 pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
3510
3511 pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
3512 pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
3513 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3514}
3515
3516/**
3517 * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
3518 * free descriptor pool
3519 * @pdev: handle to ol txrx pdev
3520 *
3521 * Return: pointer to fw stats descriptor, NULL on failure
3522 */
3523struct ol_txrx_fw_stats_desc_t
3524 *ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
3525{
3526 struct ol_txrx_fw_stats_desc_t *desc = NULL;
3527
3528 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3529 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3530 qdf_spin_unlock_bh(&pdev->
3531 ol_txrx_fw_stats_desc_pool.pool_lock);
3532 ol_txrx_err("%s: Pool deinitialized", __func__);
3533 return NULL;
3534 }
3535 if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
3536 desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
3537 pdev->ol_txrx_fw_stats_desc_pool.freelist =
3538 pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
3539 }
3540 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3541
3542 if (desc)
3543 ol_txrx_dbg("%s: desc_id %d allocated",
3544 __func__, desc->desc_id);
3545 else
3546 ol_txrx_err("%s: fw stats descriptors are exhausted", __func__);
3547
3548 return desc;
3549}
3550
3551/**
3552 * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
3553 * back into free pool
3554 * @pdev: handle to ol txrx pdev
3555 * @fw_stats_desc: fw_stats_desc_get descriptor
3556 *
3557 * Return: pointer to request
3558 */
3559struct ol_txrx_stats_req_internal
3560 *ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
3561 unsigned char desc_id)
3562{
3563 struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
3564 struct ol_txrx_stats_req_internal *req;
3565
3566 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3567 if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3568 qdf_spin_unlock_bh(&pdev->
3569 ol_txrx_fw_stats_desc_pool.pool_lock);
3570 ol_txrx_err("%s: Desc ID %u Pool deinitialized",
3571 __func__, desc_id);
3572 return NULL;
3573 }
3574 desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
3575 req = desc_elem->desc.req;
3576 desc_elem->desc.req = NULL;
3577 desc_elem->next =
3578 pdev->ol_txrx_fw_stats_desc_pool.freelist;
3579 pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
3580 qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3581 return req;
3582}
3583
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003584static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003585ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07003586 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003587{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003588 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003589 struct ol_txrx_pdev_t *pdev = vdev->pdev;
jitiphil335d2412018-06-07 22:49:24 +05303590 uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003591 struct ol_txrx_stats_req_internal *non_volatile_req;
jitiphil335d2412018-06-07 22:49:24 +05303592 struct ol_txrx_fw_stats_desc_t *desc = NULL;
3593 struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003594
3595 if (!pdev ||
3596 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
3597 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
3598 return A_ERROR;
3599 }
3600
3601 /*
3602 * Allocate a non-transient stats request object.
3603 * (The one provided as an argument is likely allocated on the stack.)
3604 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303605 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003606 if (!non_volatile_req)
3607 return A_NO_MEMORY;
3608
3609 /* copy the caller's specifications */
3610 non_volatile_req->base = *req;
3611 non_volatile_req->serviced = 0;
3612 non_volatile_req->offset = 0;
tfyu9fcabd72017-09-26 17:46:48 +08003613 if (response_expected) {
jitiphil335d2412018-06-07 22:49:24 +05303614 desc = ol_txrx_fw_stats_desc_alloc(pdev);
3615 if (!desc) {
3616 qdf_mem_free(non_volatile_req);
3617 return A_ERROR;
3618 }
3619
3620 /* use the desc id as the cookie */
3621 cookie = desc->desc_id;
3622 desc->req = non_volatile_req;
tfyu9fcabd72017-09-26 17:46:48 +08003623 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3624 TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
3625 pdev->req_list_depth++;
3626 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3627 }
3628
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003629 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
3630 req->stats_type_upload_mask,
3631 req->stats_type_reset_mask,
3632 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
3633 cookie)) {
tfyu9fcabd72017-09-26 17:46:48 +08003634 if (response_expected) {
3635 qdf_spin_lock_bh(&pdev->req_list_spinlock);
jitiphil335d2412018-06-07 22:49:24 +05303636 TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
3637 req_list_elem);
tfyu9fcabd72017-09-26 17:46:48 +08003638 pdev->req_list_depth--;
3639 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
jitiphil335d2412018-06-07 22:49:24 +05303640 if (desc) {
3641 qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.
3642 pool_lock);
3643 desc->req = NULL;
3644 elem = container_of(desc,
3645 struct ol_txrx_fw_stats_desc_elem_t,
3646 desc);
3647 elem->next =
3648 pdev->ol_txrx_fw_stats_desc_pool.freelist;
3649 pdev->ol_txrx_fw_stats_desc_pool.freelist = elem;
3650 qdf_spin_unlock_bh(&pdev->
3651 ol_txrx_fw_stats_desc_pool.
3652 pool_lock);
3653 }
tfyu9fcabd72017-09-26 17:46:48 +08003654 }
3655
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303656 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003657 return A_ERROR;
3658 }
3659
Nirav Shahd2310422016-01-21 18:58:06 +05303660 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303661 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05303662
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003663 return A_OK;
3664}
Dhanashri Atre12a08392016-02-17 13:10:34 -08003665
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003666void
3667ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
jitiphil335d2412018-06-07 22:49:24 +05303668 uint8_t cookie, uint8_t *stats_info_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003669{
3670 enum htt_dbg_stats_type type;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003671 enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003672 enum htt_dbg_stats_status status;
3673 int length;
3674 uint8_t *stats_data;
tfyu9fcabd72017-09-26 17:46:48 +08003675 struct ol_txrx_stats_req_internal *req, *tmp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003676 int more = 0;
tfyu9fcabd72017-09-26 17:46:48 +08003677 int found = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003678
jitiphil335d2412018-06-07 22:49:24 +05303679 if (cookie >= FW_STATS_DESC_POOL_SIZE) {
3680 ol_txrx_err("%s: Cookie is not valid", __func__);
3681 return;
3682 }
3683 req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
3684 if (!req) {
3685 ol_txrx_err("%s: Request not retrieved for cookie %u", __func__,
3686 (uint8_t)cookie);
3687 return;
3688 }
tfyu9fcabd72017-09-26 17:46:48 +08003689 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3690 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
3691 if (req == tmp) {
3692 found = 1;
3693 break;
3694 }
3695 }
3696 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3697
3698 if (!found) {
3699 ol_txrx_err(
Alok Kumarbf47b992017-10-27 16:30:32 +05303700 "req(%pK) from firmware can't be found in the list\n", req);
tfyu9fcabd72017-09-26 17:46:48 +08003701 return;
3702 }
3703
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003704 do {
3705 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
3706 &length, &stats_data);
3707 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
3708 break;
3709 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
3710 status == HTT_DBG_STATS_STATUS_PARTIAL) {
3711 uint8_t *buf;
3712 int bytes = 0;
3713
3714 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
3715 more = 1;
3716 if (req->base.print.verbose || req->base.print.concise)
3717 /* provide the header along with the data */
3718 htt_t2h_stats_print(stats_info_list,
3719 req->base.print.concise);
3720
3721 switch (type) {
3722 case HTT_DBG_STATS_WAL_PDEV_TXRX:
3723 bytes = sizeof(struct wlan_dbg_stats);
3724 if (req->base.copy.buf) {
3725 int lmt;
3726
3727 lmt = sizeof(struct wlan_dbg_stats);
3728 if (req->base.copy.byte_limit < lmt)
3729 lmt = req->base.copy.byte_limit;
3730 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303731 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003732 }
3733 break;
3734 case HTT_DBG_STATS_RX_REORDER:
3735 bytes = sizeof(struct rx_reorder_stats);
3736 if (req->base.copy.buf) {
3737 int lmt;
3738
3739 lmt = sizeof(struct rx_reorder_stats);
3740 if (req->base.copy.byte_limit < lmt)
3741 lmt = req->base.copy.byte_limit;
3742 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303743 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003744 }
3745 break;
3746 case HTT_DBG_STATS_RX_RATE_INFO:
3747 bytes = sizeof(wlan_dbg_rx_rate_info_t);
3748 if (req->base.copy.buf) {
3749 int lmt;
3750
3751 lmt = sizeof(wlan_dbg_rx_rate_info_t);
3752 if (req->base.copy.byte_limit < lmt)
3753 lmt = req->base.copy.byte_limit;
3754 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303755 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003756 }
3757 break;
3758
3759 case HTT_DBG_STATS_TX_RATE_INFO:
3760 bytes = sizeof(wlan_dbg_tx_rate_info_t);
3761 if (req->base.copy.buf) {
3762 int lmt;
3763
3764 lmt = sizeof(wlan_dbg_tx_rate_info_t);
3765 if (req->base.copy.byte_limit < lmt)
3766 lmt = req->base.copy.byte_limit;
3767 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303768 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003769 }
3770 break;
3771
3772 case HTT_DBG_STATS_TX_PPDU_LOG:
3773 bytes = 0;
3774 /* TO DO: specify how many bytes are present */
3775 /* TO DO: add copying to the requestor's buf */
3776
3777 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003778 bytes = sizeof(struct
3779 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003780 if (req->base.copy.buf) {
3781 int limit;
3782
Yun Parkeaea8632017-04-09 09:53:45 -07003783 limit = sizeof(struct
3784 rx_remote_buffer_mgmt_stats);
3785 if (req->base.copy.byte_limit < limit)
3786 limit = req->base.copy.
3787 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003788 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303789 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003790 }
3791 break;
3792
3793 case HTT_DBG_STATS_TXBF_INFO:
3794 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
3795 if (req->base.copy.buf) {
3796 int limit;
3797
Yun Parkeaea8632017-04-09 09:53:45 -07003798 limit = sizeof(struct
3799 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003800 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003801 limit = req->base.copy.
3802 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003803 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303804 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003805 }
3806 break;
3807
3808 case HTT_DBG_STATS_SND_INFO:
3809 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
3810 if (req->base.copy.buf) {
3811 int limit;
3812
Yun Parkeaea8632017-04-09 09:53:45 -07003813 limit = sizeof(struct
3814 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003815 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003816 limit = req->base.copy.
3817 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003818 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303819 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003820 }
3821 break;
3822
3823 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003824 bytes = sizeof(struct
3825 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003826 if (req->base.copy.buf) {
3827 int limit;
3828
Yun Parkeaea8632017-04-09 09:53:45 -07003829 limit = sizeof(struct
3830 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003831 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003832 limit = req->base.copy.
3833 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003834 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303835 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003836 }
3837 break;
3838
3839 case HTT_DBG_STATS_ERROR_INFO:
3840 bytes =
3841 sizeof(struct wlan_dbg_wifi2_error_stats);
3842 if (req->base.copy.buf) {
3843 int limit;
3844
Yun Parkeaea8632017-04-09 09:53:45 -07003845 limit = sizeof(struct
3846 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003847 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003848 limit = req->base.copy.
3849 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003850 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303851 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003852 }
3853 break;
3854
3855 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
3856 bytes =
3857 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
3858 if (req->base.copy.buf) {
3859 int limit;
3860
3861 limit = sizeof(struct
3862 rx_txbf_musu_ndpa_pkts_stats);
3863 if (req->base.copy.byte_limit < limit)
3864 limit =
3865 req->base.copy.byte_limit;
3866 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303867 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003868 }
3869 break;
3870
3871 default:
3872 break;
3873 }
Yun Parkeaea8632017-04-09 09:53:45 -07003874 buf = req->base.copy.buf ?
3875 req->base.copy.buf : stats_data;
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003876
3877 /* Not implemented for MCL */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003878 if (req->base.callback.fp)
3879 req->base.callback.fp(req->base.callback.ctxt,
Manjunathappa Prakash7a4ecb22018-03-28 20:08:07 -07003880 cmn_type, buf, bytes);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003881 }
3882 stats_info_list += length;
3883 } while (1);
3884
3885 if (!more) {
tfyu9fcabd72017-09-26 17:46:48 +08003886 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3887 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
3888 if (req == tmp) {
3889 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
3890 pdev->req_list_depth--;
3891 qdf_mem_free(req);
3892 break;
3893 }
3894 }
3895 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003896 }
3897}
3898
3899#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
3900int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
3901{
3902 if (debug_specs & TXRX_DBG_MASK_OBJS) {
3903#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3904 ol_txrx_pdev_display(vdev->pdev, 0);
3905#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303906 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303907 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003908#endif
3909 }
Yun Parkeaea8632017-04-09 09:53:45 -07003910 if (debug_specs & TXRX_DBG_MASK_STATS)
Mohit Khannaca4173b2017-09-12 21:52:19 -07003911 ol_txrx_stats_display(vdev->pdev,
3912 QDF_STATS_VERBOSITY_LEVEL_HIGH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003913 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
3914#if defined(ENABLE_TXRX_PROT_ANALYZE)
3915 ol_txrx_prot_ans_display(vdev->pdev);
3916#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303917 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303918 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003919#endif
3920 }
3921 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
3922#if defined(ENABLE_RX_REORDER_TRACE)
3923 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
3924#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303925 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303926 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003927#endif
3928
3929 }
3930 return 0;
3931}
3932#endif
3933
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003934#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003935int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
3936 int max_subfrms_ampdu, int max_subfrms_amsdu)
3937{
3938 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
3939 max_subfrms_ampdu, max_subfrms_amsdu);
3940}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003941#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003942
3943#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3944void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
3945{
3946 struct ol_txrx_vdev_t *vdev;
3947
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303948 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003949 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303950 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003951 "%*spdev object: %pK", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303952 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003953 "%*svdev list:", indent + 4, " ");
3954 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303955 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003956 }
3957 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303958 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003959 "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003960 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303961 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003962 htt_display(pdev->htt_pdev, indent);
3963}
3964
3965void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
3966{
3967 struct ol_txrx_peer_t *peer;
3968
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303969 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003970 "%*stxrx vdev: %pK\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303971 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003972 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303973 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003974 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
3975 indent + 4, " ",
3976 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
3977 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
3978 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303979 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003980 "%*speer list:", indent + 4, " ");
3981 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303982 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003983 }
3984}
3985
3986void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
3987{
3988 int i;
3989
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303990 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003991 "%*stxrx peer: %pK", indent, " ", peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003992 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
3993 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303994 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003995 "%*sID: %d", indent + 4, " ",
3996 peer->peer_ids[i]);
3997 }
3998 }
3999}
4000#endif /* TXRX_DEBUG_LEVEL */
4001
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004002/**
4003 * ol_txrx_stats() - update ol layer stats
4004 * @vdev_id: vdev_id
4005 * @buffer: pointer to buffer
4006 * @buf_len: length of the buffer
4007 *
4008 * Return: length of string
4009 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004010static int
Yun Parkeaea8632017-04-09 09:53:45 -07004011ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004012{
4013 uint32_t len = 0;
4014
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004015 struct ol_txrx_vdev_t *vdev =
4016 (struct ol_txrx_vdev_t *)
4017 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004018
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004019 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304020 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304021 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004022 snprintf(buffer, buf_len, "vdev not found");
4023 return len;
4024 }
4025
4026 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004027 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304028 ((vdev->ll_pause.is_q_paused == false) ?
4029 "UNPAUSED" : "PAUSED"),
4030 vdev->ll_pause.q_pause_cnt,
4031 vdev->ll_pause.q_unpause_cnt,
4032 vdev->ll_pause.q_overflow_cnt,
4033 ((vdev->ll_pause.is_q_timer_on == false)
4034 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004035 return len;
4036}
4037
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004038#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4039/**
4040 * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4041 * @peer: peer pointer
4042 *
4043 * Return: None
4044 */
4045static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4046{
Nirav Shahe6194ac2018-07-13 11:04:41 +05304047 txrx_nofl_info("cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4048 peer->bufq_info.curr,
4049 peer->bufq_info.dropped,
4050 peer->bufq_info.high_water_mark,
4051 peer->bufq_info.qdepth_no_thresh,
4052 peer->bufq_info.thresh);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004053}
4054
4055/**
4056 * ol_txrx_disp_peer_stats() - display peer stats
4057 * @pdev: pdev pointer
4058 *
4059 * Return: None
4060 */
4061static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4062{ int i;
4063 struct ol_txrx_peer_t *peer;
4064 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
4065
4066 if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4067 return;
4068
4069 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
Manjunathappa Prakasha4272ab2018-09-17 11:39:44 -07004070 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004071 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4072 peer = pdev->local_peer_ids.map[i];
Frank Liu4362e462018-01-16 11:51:55 +08004073 if (peer) {
Mohit Khannab7bec722017-11-10 11:43:44 -08004074 ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
Frank Liu4362e462018-01-16 11:51:55 +08004075 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004076 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Manjunathappa Prakasha4272ab2018-09-17 11:39:44 -07004077 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004078
4079 if (peer) {
Nirav Shahe6194ac2018-07-13 11:04:41 +05304080 txrx_nofl_info("stats: peer 0x%pK local peer id %d",
4081 peer, i);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004082 ol_txrx_disp_peer_cached_bufq_stats(peer);
Mohit Khannab7bec722017-11-10 11:43:44 -08004083 ol_txrx_peer_release_ref(peer,
4084 PEER_DEBUG_ID_OL_INTERNAL);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004085 }
4086 }
4087}
4088#else
4089static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4090{
Nirav Shahe6194ac2018-07-13 11:04:41 +05304091 txrx_nofl_info("peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004092}
4093#endif
4094
Mohit Khannaca4173b2017-09-12 21:52:19 -07004095void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4096 enum qdf_stats_verbosity_level level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004097{
Mohit Khannaca4173b2017-09-12 21:52:19 -07004098 u64 tx_dropped =
4099 pdev->stats.pub.tx.dropped.download_fail.pkts
4100 + pdev->stats.pub.tx.dropped.target_discard.pkts
4101 + pdev->stats.pub.tx.dropped.no_ack.pkts
4102 + pdev->stats.pub.tx.dropped.others.pkts;
4103
4104 if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
Nirav Shahe6194ac2018-07-13 11:04:41 +05304105 txrx_nofl_dbg("STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
4106 pdev->tx_desc.num_free,
4107 pdev->tx_desc.pool_size,
4108 pdev->stats.pub.tx.from_stack.pkts,
4109 pdev->stats.pub.tx.tso.tso_pkts.pkts,
4110 pdev->stats.pub.tx.delivered.pkts,
4111 htt_tx_status_download_fail,
4112 pdev->stats.pub.tx.dropped.download_fail.pkts,
4113 htt_tx_status_discard,
4114 pdev->stats.pub.tx.dropped.
4115 target_discard.pkts,
4116 htt_tx_status_no_ack,
4117 pdev->stats.pub.tx.dropped.no_ack.pkts,
4118 pdev->stats.pub.tx.dropped.others.pkts,
4119 pdev->stats.pub.tx.dropped.host_reject.pkts,
4120 pdev->stats.pub.rx.delivered.pkts,
4121 pdev->stats.pub.rx.dropped_err.pkts,
4122 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4123 pdev->stats.pub.rx.dropped_mic_err.pkts,
4124 pdev->stats.pub.rx.intra_bss_fwd.
4125 packets_stack,
4126 pdev->stats.pub.rx.intra_bss_fwd.
4127 packets_fwd,
4128 pdev->stats.pub.rx.intra_bss_fwd.
4129 packets_stack_n_fwd);
Mohit Khannaca4173b2017-09-12 21:52:19 -07004130 return;
4131 }
4132
Nirav Shahe6194ac2018-07-13 11:04:41 +05304133 txrx_nofl_info("TX PATH Statistics:");
4134 txrx_nofl_info("sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4135 pdev->stats.pub.tx.from_stack.pkts,
4136 pdev->stats.pub.tx.from_stack.bytes,
4137 pdev->stats.pub.tx.dropped.host_reject.pkts,
4138 pdev->stats.pub.tx.dropped.host_reject.bytes,
4139 tx_dropped,
4140 pdev->stats.pub.tx.dropped.download_fail.bytes
4141 + pdev->stats.pub.tx.dropped.target_discard.bytes
4142 + pdev->stats.pub.tx.dropped.no_ack.bytes);
4143 txrx_nofl_info("successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B) others: %lld (%lld B)",
4144 pdev->stats.pub.tx.delivered.pkts,
4145 pdev->stats.pub.tx.delivered.bytes,
4146 pdev->stats.pub.tx.dropped.download_fail.pkts,
4147 pdev->stats.pub.tx.dropped.download_fail.bytes,
4148 pdev->stats.pub.tx.dropped.target_discard.pkts,
4149 pdev->stats.pub.tx.dropped.target_discard.bytes,
4150 pdev->stats.pub.tx.dropped.no_ack.pkts,
4151 pdev->stats.pub.tx.dropped.no_ack.bytes,
4152 pdev->stats.pub.tx.dropped.others.pkts,
4153 pdev->stats.pub.tx.dropped.others.bytes);
4154 txrx_nofl_info("Tx completions per HTT message:\n"
4155 "Single Packet %d\n"
4156 " 2-10 Packets %d\n"
4157 "11-20 Packets %d\n"
4158 "21-30 Packets %d\n"
4159 "31-40 Packets %d\n"
4160 "41-50 Packets %d\n"
4161 "51-60 Packets %d\n"
4162 " 60+ Packets %d\n",
4163 pdev->stats.pub.tx.comp_histogram.pkts_1,
4164 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4165 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4166 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4167 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4168 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4169 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4170 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304171
Nirav Shahe6194ac2018-07-13 11:04:41 +05304172 txrx_nofl_info("RX PATH Statistics:");
4173 txrx_nofl_info("%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
4174 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4175 "msdus with frag_ind: %d msdus with offload_ind: %d",
4176 pdev->stats.priv.rx.normal.ppdus,
4177 pdev->stats.priv.rx.normal.mpdus,
4178 pdev->stats.pub.rx.delivered.pkts,
4179 pdev->stats.pub.rx.delivered.bytes,
4180 pdev->stats.pub.rx.dropped_err.pkts,
4181 pdev->stats.pub.rx.dropped_err.bytes,
4182 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4183 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4184 pdev->stats.pub.rx.dropped_mic_err.pkts,
4185 pdev->stats.pub.rx.dropped_mic_err.bytes,
4186 pdev->stats.pub.rx.msdus_with_frag_ind,
4187 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004188
Nirav Shahe6194ac2018-07-13 11:04:41 +05304189 txrx_nofl_info(" fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4190 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4191 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4192 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304193
Nirav Shahe6194ac2018-07-13 11:04:41 +05304194 txrx_nofl_info("packets per HTT message:\n"
4195 "Single Packet %d\n"
4196 " 2-10 Packets %d\n"
4197 "11-20 Packets %d\n"
4198 "21-30 Packets %d\n"
4199 "31-40 Packets %d\n"
4200 "41-50 Packets %d\n"
4201 "51-60 Packets %d\n"
4202 " 60+ Packets %d\n",
4203 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4204 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4205 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4206 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4207 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4208 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4209 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4210 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004211
4212 ol_txrx_disp_peer_stats(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004213}
4214
4215void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4216{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304217 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004218}
4219
4220#if defined(ENABLE_TXRX_PROT_ANALYZE)
4221
4222void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4223{
4224 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4225 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4226}
4227
4228#endif /* ENABLE_TXRX_PROT_ANALYZE */
4229
4230#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4231int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4232{
4233 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4234 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4235}
4236#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4237
4238#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4239A_STATUS
4240ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4241 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4242{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304243 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304244 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304245 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304246 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004247 return A_OK;
4248}
4249#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4250
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004251static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004252{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004253 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004254
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004255 if (NULL == vdev)
4256 return;
4257
4258 vdev->disable_intrabss_fwd = val;
4259}
4260
Nirav Shahc657ef52016-07-26 14:22:38 +05304261/**
4262 * ol_txrx_update_mac_id() - update mac_id for vdev
4263 * @vdev_id: vdev id
4264 * @mac_id: mac id
4265 *
4266 * Return: none
4267 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004268static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304269{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004270 struct ol_txrx_vdev_t *vdev =
4271 (struct ol_txrx_vdev_t *)
4272 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304273
4274 if (NULL == vdev) {
4275 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4276 "%s: Invalid vdev_id %d", __func__, vdev_id);
4277 return;
4278 }
4279 vdev->mac_id = mac_id;
4280}
4281
Alok Kumar75355aa2018-03-19 17:32:58 +05304282/**
4283 * ol_txrx_get_tx_ack_count() - get tx ack count
4284 * @vdev_id: vdev_id
4285 *
4286 * Return: tx ack count
4287 */
4288static uint32_t ol_txrx_get_tx_ack_stats(uint8_t vdev_id)
4289{
4290 struct ol_txrx_vdev_t *vdev =
4291 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
4292 if (!vdev) {
4293 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4294 "%s: Invalid vdev_id %d", __func__, vdev_id);
4295 return 0;
4296 }
4297 return vdev->txrx_stats.txack_success;
4298}
4299
Leo Chang8e073612015-11-13 10:55:34 -08004300/**
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004301 * ol_txrx_display_stats() - Display OL TXRX display stats
4302 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304303 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004304 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304305 */
Mohit Khannaca4173b2017-09-12 21:52:19 -07004306static QDF_STATUS
4307ol_txrx_display_stats(void *soc, uint16_t value,
4308 enum qdf_stats_verbosity_level verb_level)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004309{
4310 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004311 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004312
Anurag Chouhan6d760662016-02-20 16:05:43 +05304313 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004314 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304315 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304316 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004317 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004318 }
4319
4320 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004321 case CDP_TXRX_PATH_STATS:
Mohit Khannaca4173b2017-09-12 21:52:19 -07004322 ol_txrx_stats_display(pdev, verb_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004323 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004324 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004325 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004326 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004327 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07004328 ol_tx_dump_flow_pool_info((void *)pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004329 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004330 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304331 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004332 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004333 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4334 htt_display_rx_buf_debug(pdev->htt_pdev);
4335 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304336#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004337 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304338 ol_tx_sched_cur_state_display(pdev);
4339 ol_tx_sched_stats_display(pdev);
4340 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004341 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304342 ol_tx_queue_log_display(pdev);
4343 break;
4344#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004345 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304346 ol_tx_dump_group_credit_stats(pdev);
4347 break;
4348#endif
4349
4350#ifdef DEBUG_HL_LOGGING
Nirav Shaheb017be2018-02-15 11:20:58 +05304351 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304352 htt_dump_bundle_stats(pdev->htt_pdev);
4353 break;
4354#endif
4355#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004356 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004357 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004358 break;
4359 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004360 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004361}
4362
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004363/**
4364 * ol_txrx_clear_stats() - Clear OL TXRX stats
4365 * @value: Module id for which stats needs to be cleared
4366 *
4367 * Return: None
4368 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004369static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004370{
4371 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004372 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004373
Anurag Chouhan6d760662016-02-20 16:05:43 +05304374 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004375 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304376 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304377 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004378 return;
4379 }
4380
4381 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004382 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004383 ol_txrx_stats_clear(pdev);
4384 break;
Yun Park1027e8c2017-10-13 15:17:37 -07004385 case CDP_TXRX_TSO_STATS:
4386 ol_txrx_tso_stats_clear(pdev);
4387 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004388 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004389 ol_tx_clear_flow_pool_stats();
4390 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004391 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304392 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004393 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304394#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004395 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304396 ol_tx_sched_stats_clear(pdev);
4397 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004398 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304399 ol_tx_queue_log_clear(pdev);
4400 break;
4401#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004402 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304403 ol_tx_clear_group_credit_stats(pdev);
4404 break;
4405#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004406 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304407 htt_clear_bundle_stats(pdev->htt_pdev);
4408 break;
4409#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004410 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004411 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004412 break;
4413 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004414
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004415}
4416
4417/**
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004418 * ol_txrx_drop_nbuf_list() - drop an nbuf list
4419 * @buf_list: buffer list to be dropepd
4420 *
4421 * Return: int (number of bufs dropped)
4422 */
4423static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
4424{
4425 int num_dropped = 0;
4426 qdf_nbuf_t buf, next_buf;
4427 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4428
4429 buf = buf_list;
4430 while (buf) {
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05304431 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004432 next_buf = qdf_nbuf_queue_next(buf);
4433 if (pdev)
4434 TXRX_STATS_MSDU_INCR(pdev,
4435 rx.dropped_peer_invalid, buf);
4436 qdf_nbuf_free(buf);
4437 buf = next_buf;
4438 num_dropped++;
4439 }
4440 return num_dropped;
4441}
4442
4443/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004444 * ol_rx_data_cb() - data rx callback
4445 * @peer: peer
4446 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304447 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004448 *
4449 * Return: None
4450 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304451static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
4452 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004453{
Mohit Khanna0696eef2016-04-14 16:14:08 -07004454 void *osif_dev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004455 uint8_t drop_count = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304456 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304457 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004458 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304459 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004460
Jeff Johnsondac9e382017-09-24 10:36:08 -07004461 if (qdf_unlikely(!pdev))
Nirav Shah36a87bf2016-02-22 12:38:46 +05304462 goto free_buf;
4463
4464 /* Do not use peer directly. Derive peer from staid to
4465 * make sure that peer is valid.
4466 */
Jingxiang Ge3badb982018-01-02 17:39:01 +08004467 peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
4468 staid, PEER_DEBUG_ID_OL_RX_THREAD);
Nirav Shah36a87bf2016-02-22 12:38:46 +05304469 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004470 goto free_buf;
4471
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304472 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07004473 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
4474 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304475 qdf_spin_unlock_bh(&peer->peer_info_lock);
Jingxiang Ge9f297062018-01-24 13:31:31 +08004476 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004477 goto free_buf;
4478 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08004479
4480 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07004481 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304482 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004483
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004484 qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
4485 if (!list_empty(&peer->bufq_info.cached_bufq)) {
4486 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004487 /* Flush the cached frames to HDD before passing new rx frame */
4488 ol_txrx_flush_rx_frames(peer, 0);
4489 } else
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004490 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004491
Jingxiang Ge3badb982018-01-02 17:39:01 +08004492 ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
4493
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004494 buf = buf_list;
4495 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304496 next_buf = qdf_nbuf_queue_next(buf);
4497 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07004498 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304499 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304500 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05304501 if (pdev)
4502 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304503 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004504 }
4505 buf = next_buf;
4506 }
4507 return;
4508
4509free_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004510 drop_count = ol_txrx_drop_nbuf_list(buf_list);
4511 ol_txrx_warn("%s:Dropped frames %u", __func__, drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004512}
4513
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004514/* print for every 16th packet */
4515#define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
4516struct ol_rx_cached_buf *cache_buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304517
4518/** helper function to drop packets
4519 * Note: caller must hold the cached buq lock before invoking
4520 * this function. Also, it assumes that the pointers passed in
4521 * are valid (non-NULL)
4522 */
4523static inline void ol_txrx_drop_frames(
4524 struct ol_txrx_cached_bufq_t *bufqi,
4525 qdf_nbuf_t rx_buf_list)
4526{
4527 uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07004528
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304529 bufqi->dropped += dropped;
4530 bufqi->qdepth_no_thresh += dropped;
4531
4532 if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
4533 bufqi->high_water_mark = bufqi->qdepth_no_thresh;
4534}
4535
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004536static QDF_STATUS ol_txrx_enqueue_rx_frames(
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304537 struct ol_txrx_peer_t *peer,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004538 struct ol_txrx_cached_bufq_t *bufqi,
4539 qdf_nbuf_t rx_buf_list)
4540{
4541 struct ol_rx_cached_buf *cache_buf;
4542 qdf_nbuf_t buf, next_buf;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004543 static uint32_t count;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004544
4545 if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
4546 ol_txrx_info_high(
4547 "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
4548 bufqi->curr, bufqi->dropped);
4549
4550 qdf_spin_lock_bh(&bufqi->bufq_lock);
4551 if (bufqi->curr >= bufqi->thresh) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304552 ol_txrx_drop_frames(bufqi, rx_buf_list);
4553 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4554 return QDF_STATUS_E_FAULT;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004555 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004556 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4557
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004558 buf = rx_buf_list;
4559 while (buf) {
4560 next_buf = qdf_nbuf_queue_next(buf);
4561 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
4562 if (!cache_buf) {
4563 ol_txrx_err(
4564 "Failed to allocate buf to cache the rx frames");
4565 qdf_nbuf_free(buf);
4566 } else {
4567 /* Add NULL terminator */
4568 qdf_nbuf_set_next(buf, NULL);
4569 cache_buf->buf = buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304570 if (peer && peer->valid) {
4571 qdf_spin_lock_bh(&bufqi->bufq_lock);
4572 list_add_tail(&cache_buf->list,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004573 &bufqi->cached_bufq);
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304574 bufqi->curr++;
4575 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4576 } else {
4577 qdf_mem_free(cache_buf);
4578 rx_buf_list = buf;
4579 qdf_nbuf_set_next(rx_buf_list, next_buf);
4580 qdf_spin_lock_bh(&bufqi->bufq_lock);
4581 ol_txrx_drop_frames(bufqi, rx_buf_list);
4582 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4583 return QDF_STATUS_E_FAULT;
4584 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004585 }
4586 buf = next_buf;
4587 }
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304588 return QDF_STATUS_SUCCESS;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004589}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004590/**
4591 * ol_rx_data_process() - process rx frame
4592 * @peer: peer
4593 * @rx_buf_list: rx buffer list
4594 *
4595 * Return: None
4596 */
4597void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05304598 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004599{
Yun Parkeaea8632017-04-09 09:53:45 -07004600 /*
4601 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004602 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07004603 * IPA kernel module API should not be called on SIRQ CTXT
4604 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08004605 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304606 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004607
4608 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304609 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004610 goto drop_rx_buf;
4611 }
4612
Dhanashri Atre182b0272016-02-17 15:35:07 -08004613 qdf_assert(peer->vdev);
4614
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304615 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004616 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08004617 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304618 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004619
4620 /*
4621 * If there is a data frame from peer before the peer is
4622 * registered for data service, enqueue them on to pending queue
4623 * which will be flushed to HDD once that station is registered.
4624 */
4625 if (!data_rx) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304626 if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
4627 rx_buf_list)
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004628 != QDF_STATUS_SUCCESS)
Poddar, Siddarth07eebf32017-04-19 12:40:26 +05304629 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4630 "%s: failed to enqueue rx frm to cached_bufq",
4631 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004632 } else {
4633#ifdef QCA_CONFIG_SMP
4634 /*
4635 * If the kernel is SMP, schedule rx thread to
4636 * better use multicores.
4637 */
4638 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05304639 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004640 } else {
4641 p_cds_sched_context sched_ctx =
4642 get_cds_sched_ctxt();
4643 struct cds_ol_rx_pkt *pkt;
4644
4645 if (unlikely(!sched_ctx))
4646 goto drop_rx_buf;
4647
4648 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Alok Kumar3a6327d2018-08-06 17:28:25 +05304649 if (!pkt)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004650 goto drop_rx_buf;
Alok Kumar3a6327d2018-08-06 17:28:25 +05304651
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004652 pkt->callback = (cds_ol_rx_thread_cb)
4653 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304654 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004655 pkt->Rxpkt = (void *)rx_buf_list;
4656 pkt->staId = peer->local_id;
4657 cds_indicate_rxpkt(sched_ctx, pkt);
4658 }
4659#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304660 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004661#endif /* QCA_CONFIG_SMP */
4662 }
4663
4664 return;
4665
4666drop_rx_buf:
Alok Kumar3a6327d2018-08-06 17:28:25 +05304667 ol_txrx_drop_nbuf_list(rx_buf_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004668}
4669
4670/**
4671 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004672 * @sta_desc: sta descriptor
4673 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304674 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004675 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004676static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004677{
4678 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304679 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004680 union ol_txrx_peer_update_param_t param;
4681 struct privacy_exemption privacy_filter;
4682
4683 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304684 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304685 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004686 }
4687
4688 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304689 ol_txrx_err("Invalid sta id :%d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004690 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304691 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004692 }
4693
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004694 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
4695 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004696 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304697 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004698
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304699 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004700 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304701 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004702
4703 param.qos_capable = sta_desc->is_qos_enabled;
4704 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
4705 ol_txrx_peer_update_qos_capable);
4706
4707 if (sta_desc->is_wapi_supported) {
4708 /*Privacy filter to accept unencrypted WAI frames */
4709 privacy_filter.ether_type = ETHERTYPE_WAI;
4710 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
4711 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
4712 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
4713 }
4714
4715 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304716 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004717}
4718
4719/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004720 * ol_txrx_register_ocb_peer - Function to register the OCB peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004721 * @mac_addr: MAC address of the self peer
4722 * @peer_id: Pointer to the peer ID
4723 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304724 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004725 */
Jeff Johnson382bce02017-09-01 14:21:07 -07004726static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004727 uint8_t *peer_id)
4728{
4729 ol_txrx_pdev_handle pdev;
4730 ol_txrx_peer_handle peer;
4731
Anurag Chouhan6d760662016-02-20 16:05:43 +05304732 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004733 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304734 ol_txrx_err("%s: Unable to find pdev!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004735 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304736 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004737 }
4738
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004739 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
4740 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004741 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304742 ol_txrx_err("%s: Unable to find OCB peer!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004743 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304744 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004745 }
4746
4747 ol_txrx_set_ocb_peer(pdev, peer);
4748
4749 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004750 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004751 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004752
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304753 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004754}
4755
4756/**
4757 * ol_txrx_set_ocb_peer - Function to store the OCB peer
4758 * @pdev: Handle to the HTT instance
4759 * @peer: Pointer to the peer
4760 */
4761void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
4762 struct ol_txrx_peer_t *peer)
4763{
4764 if (pdev == NULL)
4765 return;
4766
4767 pdev->ocb_peer = peer;
4768 pdev->ocb_peer_valid = (NULL != peer);
4769}
4770
4771/**
4772 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
4773 * @pdev: Handle to the HTT instance
4774 * @peer: Pointer to the returned peer
4775 *
4776 * Return: true if the peer is valid, false if not
4777 */
4778bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
4779 struct ol_txrx_peer_t **peer)
4780{
4781 int rc;
4782
4783 if ((pdev == NULL) || (peer == NULL)) {
4784 rc = false;
4785 goto exit;
4786 }
4787
4788 if (pdev->ocb_peer_valid) {
4789 *peer = pdev->ocb_peer;
4790 rc = true;
4791 } else {
4792 rc = false;
4793 }
4794
4795exit:
4796 return rc;
4797}
4798
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004799#ifdef RECEIVE_OFFLOAD
4800/**
4801 * ol_txrx_offld_flush_handler() - offld flush handler
4802 * @context: dev handle
4803 * @rxpkt: rx data
4804 * @staid: station id
4805 *
4806 * This function handles an offld flush indication.
4807 * If the rx thread is enabled, it will be invoked by the rx
4808 * thread else it will be called in the tasklet context
4809 *
4810 * Return: none
4811 */
4812static void ol_txrx_offld_flush_handler(void *context,
4813 void *rxpkt,
4814 uint16_t staid)
4815{
4816 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4817
4818 if (qdf_unlikely(!pdev)) {
4819 ol_txrx_err("Invalid context");
4820 qdf_assert(0);
4821 return;
4822 }
4823
4824 if (pdev->offld_flush_cb)
4825 pdev->offld_flush_cb(context);
4826 else
4827 ol_txrx_err("offld_flush_cb NULL");
4828}
4829
4830/**
4831 * ol_txrx_offld_flush() - offld flush callback
4832 * @data: opaque data pointer
4833 *
4834 * This is the callback registered with CE to trigger
4835 * an offld flush
4836 *
4837 * Return: none
4838 */
4839static void ol_txrx_offld_flush(void *data)
4840{
4841 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
4842 struct cds_ol_rx_pkt *pkt;
4843 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4844
4845 if (qdf_unlikely(!sched_ctx))
4846 return;
4847
Amar Singhal4e855ad2018-09-04 12:19:00 -07004848 if (qdf_unlikely(!pdev)) {
4849 ol_txrx_err("TXRX module context is NULL");
4850 return;
4851 }
4852
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004853 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
4854 ol_txrx_offld_flush_handler(data, NULL, 0);
4855 } else {
4856 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Alok Kumar3a6327d2018-08-06 17:28:25 +05304857 if (qdf_unlikely(!pkt))
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004858 return;
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07004859
4860 pkt->callback = ol_txrx_offld_flush_handler;
4861 pkt->context = data;
4862 pkt->Rxpkt = NULL;
4863 pkt->staId = 0;
4864 cds_indicate_rxpkt(sched_ctx, pkt);
4865 }
4866}
4867
4868/**
4869 * ol_register_offld_flush_cb() - register the offld flush callback
4870 * @offld_flush_cb: flush callback function
4871 * @offld_init_cb: Allocate and initialize offld data structure.
4872 *
4873 * Store the offld flush callback provided and in turn
4874 * register OL's offld flush handler with CE
4875 *
4876 * Return: none
4877 */
4878static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
4879{
4880 struct hif_opaque_softc *hif_device;
4881 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4882
4883 if (pdev == NULL) {
4884 ol_txrx_err("pdev NULL!");
4885 TXRX_ASSERT2(0);
4886 goto out;
4887 }
4888 if (pdev->offld_flush_cb != NULL) {
4889 ol_txrx_info("offld already initialised");
4890 if (pdev->offld_flush_cb != offld_flush_cb) {
4891 ol_txrx_err(
4892 "offld_flush_cb is differ to previously registered callback")
4893 TXRX_ASSERT2(0);
4894 goto out;
4895 }
4896 goto out;
4897 }
4898 pdev->offld_flush_cb = offld_flush_cb;
4899 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
4900
4901 if (qdf_unlikely(hif_device == NULL)) {
4902 ol_txrx_err("hif_device NULL!");
4903 qdf_assert(0);
4904 goto out;
4905 }
4906
4907 hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
4908
4909out:
4910 return;
4911}
4912
4913/**
4914 * ol_deregister_offld_flush_cb() - deregister the offld flush callback
4915 *
4916 * Remove the offld flush callback provided and in turn
4917 * deregister OL's offld flush handler with CE
4918 *
4919 * Return: none
4920 */
4921static void ol_deregister_offld_flush_cb(void)
4922{
4923 struct hif_opaque_softc *hif_device;
4924 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4925
4926 if (pdev == NULL) {
4927 ol_txrx_err("pdev NULL!");
4928 return;
4929 }
4930 hif_device = cds_get_context(QDF_MODULE_ID_HIF);
4931
4932 if (qdf_unlikely(hif_device == NULL)) {
4933 ol_txrx_err("hif_device NULL!");
4934 qdf_assert(0);
4935 return;
4936 }
4937
4938 hif_offld_flush_cb_deregister(hif_device);
4939
4940 pdev->offld_flush_cb = NULL;
4941}
4942#endif /* RECEIVE_OFFLOAD */
4943
Poddar, Siddarth34872782017-08-10 14:08:51 +05304944/**
4945 * ol_register_data_stall_detect_cb() - register data stall callback
4946 * @data_stall_detect_callback: data stall callback function
4947 *
4948 *
4949 * Return: QDF_STATUS Enumeration
4950 */
4951static QDF_STATUS ol_register_data_stall_detect_cb(
4952 data_stall_detect_cb data_stall_detect_callback)
4953{
4954 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4955
4956 if (pdev == NULL) {
4957 ol_txrx_err("%s: pdev NULL!", __func__);
4958 return QDF_STATUS_E_INVAL;
4959 }
4960 pdev->data_stall_detect_callback = data_stall_detect_callback;
4961 return QDF_STATUS_SUCCESS;
4962}
4963
4964/**
4965 * ol_deregister_data_stall_detect_cb() - de-register data stall callback
4966 * @data_stall_detect_callback: data stall callback function
4967 *
4968 *
4969 * Return: QDF_STATUS Enumeration
4970 */
4971static QDF_STATUS ol_deregister_data_stall_detect_cb(
4972 data_stall_detect_cb data_stall_detect_callback)
4973{
4974 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4975
4976 if (pdev == NULL) {
4977 ol_txrx_err("%s: pdev NULL!", __func__);
4978 return QDF_STATUS_E_INVAL;
4979 }
4980 pdev->data_stall_detect_callback = NULL;
4981 return QDF_STATUS_SUCCESS;
4982}
4983
Poddar, Siddarthdb568162017-07-27 18:16:38 +05304984/**
4985 * ol_txrx_post_data_stall_event() - post data stall event
4986 * @indicator: Module triggering data stall
4987 * @data_stall_type: data stall event type
4988 * @pdev_id: pdev id
4989 * @vdev_id_bitmap: vdev id bitmap
4990 * @recovery_type: data stall recovery type
4991 *
4992 * Return: None
4993 */
4994static void ol_txrx_post_data_stall_event(
4995 enum data_stall_log_event_indicator indicator,
4996 enum data_stall_log_event_type data_stall_type,
4997 uint32_t pdev_id, uint32_t vdev_id_bitmap,
4998 enum data_stall_log_recovery_type recovery_type)
4999{
5000 struct scheduler_msg msg = {0};
5001 QDF_STATUS status;
5002 struct data_stall_event_info *data_stall_info;
5003 ol_txrx_pdev_handle pdev;
5004
5005 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5006 if (!pdev) {
5007 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5008 "%s: pdev is NULL.", __func__);
5009 return;
5010 }
5011 data_stall_info = qdf_mem_malloc(sizeof(*data_stall_info));
5012 if (!data_stall_info) {
5013 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5014 "%s: data_stall_info is NULL.", __func__);
5015 return;
5016 }
5017 data_stall_info->indicator = indicator;
5018 data_stall_info->data_stall_type = data_stall_type;
5019 data_stall_info->vdev_id_bitmap = vdev_id_bitmap;
5020 data_stall_info->pdev_id = pdev_id;
5021 data_stall_info->recovery_type = recovery_type;
5022
Poddar, Siddarthb9047592017-10-05 15:48:28 +05305023 if (data_stall_info->data_stall_type ==
5024 DATA_STALL_LOG_FW_RX_REFILL_FAILED)
5025 htt_log_rx_ring_info(pdev->htt_pdev);
5026
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305027 sys_build_message_header(SYS_MSG_ID_DATA_STALL_MSG, &msg);
5028 /* Save callback and data */
5029 msg.callback = pdev->data_stall_detect_callback;
5030 msg.bodyptr = data_stall_info;
5031 msg.bodyval = 0;
5032
gaurank kathpalia9fb3f4b2018-08-28 20:19:48 +05305033 status = scheduler_post_message(QDF_MODULE_ID_TXRX,
5034 QDF_MODULE_ID_HDD,
5035 QDF_MODULE_ID_SYS, &msg);
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305036
5037 if (status != QDF_STATUS_SUCCESS) {
5038 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5039 "%s: failed to post data stall msg to SYS", __func__);
5040 qdf_mem_free(data_stall_info);
5041 }
5042}
5043
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305044void
5045ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5046{
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07005047 qdf_print("%s: Pkt: VA 0x%pK PA 0x%llx len %d\n", __func__,
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305048 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5049 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5050 qdf_nbuf_data(nbuf), len, true);
5051}
5052
Dhanashri Atre12a08392016-02-17 13:10:34 -08005053/**
5054 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5055 * @vdev_id: vdev_id
5056 *
5057 * Return: vdev handle
5058 * NULL if not found.
5059 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005060struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005061{
5062 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5063 ol_txrx_vdev_handle vdev = NULL;
5064
5065 if (qdf_unlikely(!pdev))
5066 return NULL;
5067
5068 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5069 if (vdev->vdev_id == vdev_id)
5070 break;
5071 }
5072
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005073 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005074}
Nirav Shah2e583a02016-04-30 14:06:12 +05305075
5076/**
5077 * ol_txrx_set_wisa_mode() - set wisa mode
5078 * @vdev: vdev handle
5079 * @enable: enable flag
5080 *
5081 * Return: QDF STATUS
5082 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005083static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305084{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005085 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005086
Nirav Shah2e583a02016-04-30 14:06:12 +05305087 if (!vdev)
5088 return QDF_STATUS_E_INVAL;
5089
5090 vdev->is_wisa_mode_enable = enable;
5091 return QDF_STATUS_SUCCESS;
5092}
Leo Chang98726762016-10-28 11:07:18 -07005093
5094/**
5095 * ol_txrx_get_vdev_id() - get interface id from interface context
5096 * @pvdev: vdev handle
5097 *
5098 * Return: virtual interface id
5099 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005100static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005101{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005102 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005103
Leo Chang98726762016-10-28 11:07:18 -07005104 return vdev->vdev_id;
5105}
5106
5107/**
Leo Chang98726762016-10-28 11:07:18 -07005108 * ol_txrx_soc_attach_target() - attach soc target
5109 * @soc: soc handle
5110 *
5111 * MCL legacy OL do nothing here
5112 *
5113 * Return: 0
5114 */
Venkata Sharath Chandra Manchala598f5032018-09-05 18:55:43 -07005115static QDF_STATUS ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005116{
5117 /* MCL legacy OL do nothing here */
Venkata Sharath Chandra Manchala598f5032018-09-05 18:55:43 -07005118 return QDF_STATUS_SUCCESS;
Leo Chang98726762016-10-28 11:07:18 -07005119}
5120
5121/**
5122 * ol_txrx_soc_detach() - detach soc target
5123 * @soc: soc handle
5124 *
5125 * MCL legacy OL do nothing here
5126 *
5127 * Return: noe
5128 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005129static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005130{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005131 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005132}
5133
5134/**
5135 * ol_txrx_pkt_log_con_service() - connect packet log service
5136 * @ppdev: physical device handle
5137 * @scn: device context
5138 *
5139 * Return: noe
5140 */
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305141#ifdef REMOVE_PKT_LOG
5142static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
5143{
5144}
5145#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005146static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005147{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005148 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005149
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005150 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005151 pktlog_htc_attach();
5152}
Nirav Shahbb8e47c2018-05-17 16:56:41 +05305153#endif
Leo Chang98726762016-10-28 11:07:18 -07005154
5155/* OL wrapper functions for CDP abstraction */
5156/**
5157 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5158 * @peer: peer handle
5159 * @drop: rx packets drop or deliver
5160 *
5161 * Return: none
5162 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005163static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005164{
5165 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5166}
5167
5168/**
5169 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5170 * @ppdev: pdev handle
5171 * @vdev_id: interface id
5172 *
5173 * Return: virtual interface instance
5174 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005175static
5176struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5177 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005178{
5179 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5180}
5181
5182/**
5183 * ol_txrx_wrapper_register_peer() - register peer
5184 * @pdev: pdev handle
5185 * @sta_desc: peer description
5186 *
5187 * Return: QDF STATUS
5188 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005189static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005190 struct ol_txrx_desc_type *sta_desc)
5191{
5192 return ol_txrx_register_peer(sta_desc);
5193}
5194
5195/**
5196 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5197 * @pdev - the data physical device object
5198 * @local_peer_id - the ID txrx assigned locally to the peer in question
5199 *
5200 * The control SW typically uses the txrx peer handle to refer to the peer.
5201 * In unusual circumstances, if it is infeasible for the control SW maintain
5202 * the txrx peer handle but it can maintain a small integer local peer ID,
5203 * this function allows the peer handled to be retrieved, based on the local
5204 * peer ID.
5205 *
5206 * @return handle to the txrx peer object
5207 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005208static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005209ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5210 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005211{
5212 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005213 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005214}
5215
5216/**
5217 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5218 * @pdev: pdev handle
5219 *
5220 * Return: 1 high latency bus
5221 * 0 low latency bus
5222 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005223static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005224{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005225 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005226}
5227
5228/**
5229 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5230 * @data_peer - which peer has changed its state
5231 * @state - the new state of the peer
5232 *
5233 * Specify the peer's authentication state (none, connected, authenticated)
5234 * to allow the data SW to determine whether to filter out invalid data frames.
5235 * (In the "connected" state, where security is enabled, but authentication
5236 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5237 * be discarded.)
5238 * This function is only relevant for systems in which the tx and rx filtering
5239 * are done in the host rather than in the target.
5240 *
5241 * Return: QDF Status
5242 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005243static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005244 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5245{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005246 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005247 peer_mac, state);
5248}
5249
5250/**
5251 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5252 * @pdev: pdev handle
Jeff Johnson37df7c32018-05-10 12:30:35 -07005253 * @peer_addr: peer address want to find
Leo Chang98726762016-10-28 11:07:18 -07005254 * @peer_id: peer id
5255 *
5256 * Return: peer instance pointer
5257 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005258static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005259 uint8_t *peer_addr, uint8_t *peer_id)
5260{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005261 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005262 peer_addr, peer_id);
5263}
5264
5265/**
Mohit Khannab7bec722017-11-10 11:43:44 -08005266 * ol_txrx_wrapper_peer_get_ref_by_addr() - get peer reference by address
5267 * @pdev: pdev handle
5268 * @peer_addr: peer address we want to find
5269 * @peer_id: peer id
5270 * @debug_id: peer debug id for tracking
5271 *
5272 * Return: peer instance pointer
5273 */
5274static void *
5275ol_txrx_wrapper_peer_get_ref_by_addr(struct cdp_pdev *pdev,
5276 u8 *peer_addr, uint8_t *peer_id,
5277 enum peer_debug_id_type debug_id)
5278{
5279 return ol_txrx_peer_get_ref_by_addr((ol_txrx_pdev_handle)pdev,
5280 peer_addr, peer_id, debug_id);
5281}
5282
5283/**
5284 * ol_txrx_wrapper_peer_release_ref() - release peer reference
5285 * @peer: peer handle
5286 * @debug_id: peer debug id for tracking
5287 *
5288 * Release peer ref acquired by peer get ref api
5289 *
5290 * Return: void
5291 */
5292static void ol_txrx_wrapper_peer_release_ref(void *peer,
5293 enum peer_debug_id_type debug_id)
5294{
5295 ol_txrx_peer_release_ref(peer, debug_id);
5296}
5297
5298/**
Leo Chang98726762016-10-28 11:07:18 -07005299 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5300 * @cfg_ctx: cfg context
5301 * @cfg_param: cfg parameters
5302 *
5303 * Return: none
5304 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08005305static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005306ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5307 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07005308{
5309 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005310 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005311 (struct txrx_pdev_cfg_param_t *)cfg_param);
5312}
5313
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005314#ifdef WDI_EVENT_ENABLE
5315void *ol_get_pldev(struct cdp_pdev *txrx_pdev)
5316{
5317 struct ol_txrx_pdev_t *pdev =
5318 (struct ol_txrx_pdev_t *)txrx_pdev;
5319 if (pdev != NULL)
5320 return pdev->pl_dev;
5321
5322 return NULL;
5323}
5324#endif
5325
Leo Chang98726762016-10-28 11:07:18 -07005326static struct cdp_cmn_ops ol_ops_cmn = {
5327 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
5328 .txrx_vdev_attach = ol_txrx_vdev_attach,
5329 .txrx_vdev_detach = ol_txrx_vdev_detach,
5330 .txrx_pdev_attach = ol_txrx_pdev_attach,
5331 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
5332 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05305333 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07005334 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08005335 .txrx_peer_create = ol_txrx_peer_attach,
5336 .txrx_peer_setup = NULL,
5337 .txrx_peer_teardown = NULL,
5338 .txrx_peer_delete = ol_txrx_peer_detach,
Leo Chang98726762016-10-28 11:07:18 -07005339 .txrx_vdev_register = ol_txrx_vdev_register,
5340 .txrx_soc_detach = ol_txrx_soc_detach,
5341 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
5342 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
5343 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08005344 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07005345 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
5346 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
5347 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07005348 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005349 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
5350 .display_stats = ol_txrx_display_stats,
Leo Chang98726762016-10-28 11:07:18 -07005351 /* TODO: Add other functions */
5352};
5353
5354static struct cdp_misc_ops ol_ops_misc = {
5355 .set_ibss_vdev_heart_beat_timer =
5356 ol_txrx_set_ibss_vdev_heart_beat_timer,
5357#ifdef CONFIG_HL_SUPPORT
5358 .set_wmm_param = ol_txrx_set_wmm_param,
5359#endif /* CONFIG_HL_SUPPORT */
5360 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
5361 .bad_peer_txctl_update_threshold =
5362 ol_txrx_bad_peer_txctl_update_threshold,
5363 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
5364 .tx_non_std = ol_tx_non_std,
5365 .get_vdev_id = ol_txrx_get_vdev_id,
Alok Kumar75355aa2018-03-19 17:32:58 +05305366 .get_tx_ack_stats = ol_txrx_get_tx_ack_stats,
Leo Chang98726762016-10-28 11:07:18 -07005367 .set_wisa_mode = ol_txrx_set_wisa_mode,
Poddar, Siddarth34872782017-08-10 14:08:51 +05305368 .txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
5369 .txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
Poddar, Siddarthdb568162017-07-27 18:16:38 +05305370 .txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
Leo Chang98726762016-10-28 11:07:18 -07005371#ifdef FEATURE_RUNTIME_PM
5372 .runtime_suspend = ol_txrx_runtime_suspend,
5373 .runtime_resume = ol_txrx_runtime_resume,
5374#endif /* FEATURE_RUNTIME_PM */
5375 .get_opmode = ol_txrx_get_opmode,
5376 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
5377 .update_mac_id = ol_txrx_update_mac_id,
5378 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
5379 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
5380 .pkt_log_init = htt_pkt_log_init,
5381 .pkt_log_con_service = ol_txrx_pkt_log_con_service
5382};
5383
5384static struct cdp_flowctl_ops ol_ops_flowctl = {
5385#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5386 .register_pause_cb = ol_txrx_register_pause_cb,
5387 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005388 .dump_flow_pool_info = ol_tx_dump_flow_pool_info,
Leo Chang98726762016-10-28 11:07:18 -07005389#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
5390};
5391
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305392#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
Leo Chang98726762016-10-28 11:07:18 -07005393static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
Leo Chang98726762016-10-28 11:07:18 -07005394 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
5395 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
5396 .flow_control_cb = ol_txrx_flow_control_cb,
5397 .get_tx_resource = ol_txrx_get_tx_resource,
5398 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
5399 .vdev_flush = ol_txrx_vdev_flush,
5400 .vdev_pause = ol_txrx_vdev_pause,
5401 .vdev_unpause = ol_txrx_vdev_unpause
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305402}; /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
5403#elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
5404static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
5405 .register_tx_flow_control = ol_txrx_register_hl_flow_control,
5406 .vdev_flush = ol_txrx_vdev_flush,
5407 .vdev_pause = ol_txrx_vdev_pause,
Ajit Pal Singh851a7772018-05-14 16:55:09 +05305408 .vdev_unpause = ol_txrx_vdev_unpause,
Ajit Pal Singhd6c08f22018-04-25 16:55:26 +05305409 .set_vdev_os_queue_status = ol_txrx_set_vdev_os_queue_status,
5410 .set_vdev_tx_desc_limit = ol_txrx_set_vdev_tx_desc_limit
Leo Chang98726762016-10-28 11:07:18 -07005411};
Ajit Pal Singh5d269612018-04-19 16:29:12 +05305412#else /* QCA_HL_NETDEV_FLOW_CONTROL */
5413static struct cdp_lflowctl_ops ol_ops_l_flowctl = { };
5414#endif
Leo Chang98726762016-10-28 11:07:18 -07005415
Leo Chang98726762016-10-28 11:07:18 -07005416#ifdef IPA_OFFLOAD
Yun Parkb4f591d2017-03-29 15:51:01 -07005417static struct cdp_ipa_ops ol_ops_ipa = {
Leo Chang98726762016-10-28 11:07:18 -07005418 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
5419 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
5420 .ipa_set_active = ol_txrx_ipa_uc_set_active,
5421 .ipa_op_response = ol_txrx_ipa_uc_op_response,
5422 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
5423 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
5424 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07005425 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
Yun Parkb4f591d2017-03-29 15:51:01 -07005426 .ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
5427 .ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
5428 .ipa_setup = ol_txrx_ipa_setup,
5429 .ipa_cleanup = ol_txrx_ipa_cleanup,
5430 .ipa_setup_iface = ol_txrx_ipa_setup_iface,
5431 .ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
5432 .ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
5433 .ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
5434 .ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
5435#ifdef FEATURE_METERING
Yun Park637d6482016-10-05 10:51:33 -07005436 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
5437 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Yun Parkb4f591d2017-03-29 15:51:01 -07005438#endif
Leo Chang98726762016-10-28 11:07:18 -07005439};
Yun Parkb4f591d2017-03-29 15:51:01 -07005440#endif
Leo Chang98726762016-10-28 11:07:18 -07005441
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005442#ifdef RECEIVE_OFFLOAD
5443static struct cdp_rx_offld_ops ol_rx_offld_ops = {
5444 .register_rx_offld_flush_cb = ol_register_offld_flush_cb,
5445 .deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
5446};
5447#endif
5448
Leo Chang98726762016-10-28 11:07:18 -07005449static struct cdp_bus_ops ol_ops_bus = {
5450 .bus_suspend = ol_txrx_bus_suspend,
5451 .bus_resume = ol_txrx_bus_resume
5452};
5453
Nirav Shah575282c2018-07-08 22:48:00 +05305454#ifdef WLAN_FEATURE_DSRC
Leo Chang98726762016-10-28 11:07:18 -07005455static struct cdp_ocb_ops ol_ops_ocb = {
5456 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
5457 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
5458};
Nirav Shah575282c2018-07-08 22:48:00 +05305459#endif
Leo Chang98726762016-10-28 11:07:18 -07005460
5461static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005462#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07005463 .throttle_init_period = ol_tx_throttle_init_period,
5464 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005465#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07005466};
5467
5468static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07005469 .clear_stats = ol_txrx_clear_stats,
5470 .stats = ol_txrx_stats
5471};
5472
5473static struct cdp_cfg_ops ol_ops_cfg = {
5474 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
5475 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
5476 .cfg_attach = ol_pdev_cfg_attach,
5477 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
5478 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
5479 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
5480 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
5481 .set_flow_control_parameters =
5482 ol_txrx_wrapper_set_flow_control_parameters,
5483 .set_flow_steering = ol_set_cfg_flow_steering,
Yu Wang66a250b2017-07-19 11:46:40 +08005484 .set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
Leo Chang98726762016-10-28 11:07:18 -07005485};
5486
5487static struct cdp_peer_ops ol_ops_peer = {
5488 .register_peer = ol_txrx_wrapper_register_peer,
5489 .clear_peer = ol_txrx_clear_peer,
Mohit Khannab7bec722017-11-10 11:43:44 -08005490 .peer_get_ref_by_addr = ol_txrx_wrapper_peer_get_ref_by_addr,
5491 .peer_release_ref = ol_txrx_wrapper_peer_release_ref,
Leo Chang98726762016-10-28 11:07:18 -07005492 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
5493 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
5494 .local_peer_id = ol_txrx_local_peer_id,
5495 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
5496 .peer_state_update = ol_txrx_wrapper_peer_state_update,
5497 .get_vdevid = ol_txrx_get_vdevid,
5498 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
5499 .register_ocb_peer = ol_txrx_register_ocb_peer,
5500 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
5501 .get_peer_state = ol_txrx_get_peer_state,
5502 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
5503 .update_ibss_add_peer_num_of_vdev =
5504 ol_txrx_update_ibss_add_peer_num_of_vdev,
5505 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
5506 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08005507#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07005508 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
5509 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005510 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
5511 .update_last_real_peer = ol_txrx_update_last_real_peer,
5512#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07005513 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
5514};
5515
5516static struct cdp_tx_delay_ops ol_ops_delay = {
5517#ifdef QCA_COMPUTE_TX_DELAY
5518 .tx_delay = ol_tx_delay,
5519 .tx_delay_hist = ol_tx_delay_hist,
5520 .tx_packet_count = ol_tx_packet_count,
5521 .tx_set_compute_interval = ol_tx_set_compute_interval
5522#endif /* QCA_COMPUTE_TX_DELAY */
5523};
5524
5525static struct cdp_pmf_ops ol_ops_pmf = {
5526 .get_pn_info = ol_txrx_get_pn_info
5527};
5528
Leo Chang98726762016-10-28 11:07:18 -07005529static struct cdp_ctrl_ops ol_ops_ctrl = {
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305530 .txrx_get_pldev = ol_get_pldev,
Venkata Sharath Chandra Manchala29965172018-01-18 14:17:29 -08005531 .txrx_wdi_event_sub = wdi_event_sub,
5532 .txrx_wdi_event_unsub = wdi_event_unsub,
Leo Chang98726762016-10-28 11:07:18 -07005533};
5534
Hanumanth Reddy Pothula855f7ef2018-02-13 18:32:05 +05305535/* WINplatform specific structures */
Leo Chang98726762016-10-28 11:07:18 -07005536static struct cdp_me_ops ol_ops_me = {
5537 /* EMPTY FOR MCL */
5538};
5539
5540static struct cdp_mon_ops ol_ops_mon = {
5541 /* EMPTY FOR MCL */
5542};
5543
5544static struct cdp_host_stats_ops ol_ops_host_stats = {
5545 /* EMPTY FOR MCL */
5546};
5547
5548static struct cdp_wds_ops ol_ops_wds = {
5549 /* EMPTY FOR MCL */
5550};
5551
5552static struct cdp_raw_ops ol_ops_raw = {
5553 /* EMPTY FOR MCL */
5554};
5555
5556static struct cdp_ops ol_txrx_ops = {
5557 .cmn_drv_ops = &ol_ops_cmn,
5558 .ctrl_ops = &ol_ops_ctrl,
5559 .me_ops = &ol_ops_me,
5560 .mon_ops = &ol_ops_mon,
5561 .host_stats_ops = &ol_ops_host_stats,
5562 .wds_ops = &ol_ops_wds,
5563 .raw_ops = &ol_ops_raw,
5564 .misc_ops = &ol_ops_misc,
5565 .cfg_ops = &ol_ops_cfg,
5566 .flowctl_ops = &ol_ops_flowctl,
5567 .l_flowctl_ops = &ol_ops_l_flowctl,
Yun Parkb4f591d2017-03-29 15:51:01 -07005568#ifdef IPA_OFFLOAD
Leo Chang98726762016-10-28 11:07:18 -07005569 .ipa_ops = &ol_ops_ipa,
Yun Parkb4f591d2017-03-29 15:51:01 -07005570#endif
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07005571#ifdef RECEIVE_OFFLOAD
5572 .rx_offld_ops = &ol_rx_offld_ops,
5573#endif
Leo Chang98726762016-10-28 11:07:18 -07005574 .bus_ops = &ol_ops_bus,
Nirav Shah575282c2018-07-08 22:48:00 +05305575#ifdef WLAN_FEATURE_DSRC
Leo Chang98726762016-10-28 11:07:18 -07005576 .ocb_ops = &ol_ops_ocb,
Nirav Shah575282c2018-07-08 22:48:00 +05305577#endif
Leo Chang98726762016-10-28 11:07:18 -07005578 .peer_ops = &ol_ops_peer,
5579 .throttle_ops = &ol_ops_throttle,
5580 .mob_stats_ops = &ol_ops_mob_stats,
5581 .delay_ops = &ol_ops_delay,
5582 .pmf_ops = &ol_ops_pmf
5583};
5584
Jeff Johnson02c37b42017-01-10 14:49:24 -08005585/*
5586 * Local prototype added to temporarily address warning caused by
5587 * -Wmissing-prototypes. A more correct solution, namely to expose
5588 * a prototype in an appropriate header file, will come later.
5589 */
5590struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5591 struct ol_if_ops *dp_ol_if_ops);
5592struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5593 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07005594{
5595 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07005596
Leo Chang98726762016-10-28 11:07:18 -07005597 if (!soc) {
5598 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5599 "%s: OL SOC memory allocation failed\n", __func__);
5600 return NULL;
5601 }
5602
5603 soc->ops = &ol_txrx_ops;
5604 return soc;
5605}
5606
5607