blob: 854e4f1bca7468c59319b13241989356e791676c [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Jeff Johnsonb9b49342016-12-19 16:46:23 -08002 * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== includes ===*/
29/* header files for OS primitives */
30#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053032#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053033#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053034#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053036#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037/* Required for WLAN_FEATURE_FASTPATH */
38#include <ce_api.h>
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053039#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080040/* header files for utilities */
41#include <cds_queue.h> /* TAILQ */
42
43/* header files for configuration API */
44#include <ol_cfg.h> /* ol_cfg_is_high_latency */
45#include <ol_if_athvar.h>
46
47/* header files for HTT API */
48#include <ol_htt_api.h>
49#include <ol_htt_tx_api.h>
50
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051/* header files for our own APIs */
52#include <ol_txrx_api.h>
53#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070054#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070055#include <ol_txrx_ctrl_api.h>
56#include <cdp_txrx_stats.h>
57#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058/* header files for our internal definitions */
59#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
60#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080061#include <ol_tx.h> /* ol_tx_ll */
62#include <ol_rx.h> /* ol_rx_deliver */
63#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
64#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
65#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
66#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
67#include <ol_rx_reorder.h>
68#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
69#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
70#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053071#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080072#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070073#include <ol_txrx_types.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080074#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070075#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080076#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070077#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080078#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053079#include "hif.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070080#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053081#ifndef REMOVE_PKT_LOG
82#include "pktlog_ac.h"
83#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070084#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053085#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053086#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080087#include <cdp_txrx_handle.h>
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080088#include <htt_internal.h>
Yun Parkb4f591d2017-03-29 15:51:01 -070089#include <ol_txrx_ipa.h>
90
Leo Chang98726762016-10-28 11:07:18 -070091#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
92ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080093ol_txrx_peer_find_by_local_id(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070094 uint8_t local_peer_id);
95
96#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080097QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070098 uint8_t *peer_mac,
99 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800100static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
101 bool val);
102int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700103extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800104ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700105 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700106
Leo Chang98726762016-10-28 11:07:18 -0700107extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
108 uint64_t **last_pn, uint32_t **rmf_pn_replays);
109
Mohit Khanna78cb6bb2017-03-31 17:05:14 -0700110/* thresh for peer's cached buf queue beyond which the elements are dropped */
111#define OL_TXRX_CACHED_BUFQ_THRESH 128
112
Yu Wang053d3e72017-02-08 18:48:24 +0800113#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530114
115/**
116 * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
117 * @vdev: the data virtual device
118 * @bss_addr: bss address
119 *
120 * Return: None
121 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800122static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800123ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530124{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800125 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700126
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530127 if (bss_addr && vdev->last_real_peer &&
Ankit Guptaa5076012016-09-14 11:32:19 -0700128 !qdf_mem_cmp((u8 *)bss_addr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530129 vdev->last_real_peer->mac_addr.raw,
Ankit Guptaa5076012016-09-14 11:32:19 -0700130 IEEE80211_ADDR_LEN))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530131 qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
132 vdev->last_real_peer->mac_addr.raw,
133 OL_TXRX_MAC_ADDR_LEN);
134}
135
136/**
137 * ol_txrx_add_last_real_peer() - add last peer
138 * @pdev: the data physical device
139 * @vdev: virtual device
140 * @peer_id: peer id
141 *
142 * Return: None
143 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800144static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800145ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
146 struct cdp_vdev *pvdev, uint8_t *peer_id)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530147{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800148 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
149 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530150 ol_txrx_peer_handle peer;
Yun Parkeaea8632017-04-09 09:53:45 -0700151
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530152 if (vdev->last_real_peer == NULL) {
Yun Parkeaea8632017-04-09 09:53:45 -0700153 peer = NULL;
154 peer = ol_txrx_find_peer_by_addr(
155 (struct cdp_pdev *)pdev,
156 vdev->hl_tdls_ap_mac_addr.raw,
157 peer_id);
158 if (peer && (peer->peer_ids[0] !=
159 HTT_INVALID_PEER_ID))
160 vdev->last_real_peer = peer;
161 }
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530162}
163
164/**
165 * is_vdev_restore_last_peer() - check for vdev last peer
166 * @peer: peer object
167 *
168 * Return: true if last peer is not null
169 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800170static bool
Leo Chang98726762016-10-28 11:07:18 -0700171is_vdev_restore_last_peer(void *ppeer)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530172{
Leo Chang98726762016-10-28 11:07:18 -0700173 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530174 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700175
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530176 vdev = peer->vdev;
177 return vdev->last_real_peer && (vdev->last_real_peer == peer);
178}
179
180/**
181 * ol_txrx_update_last_real_peer() - check for vdev last peer
182 * @pdev: the data physical device
183 * @peer: peer device
184 * @peer_id: peer id
185 * @restore_last_peer: restore last peer flag
186 *
187 * Return: None
188 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800189static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800190ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530191 uint8_t *peer_id, bool restore_last_peer)
192{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800193 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -0700194 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530195 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700196
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530197 vdev = peer->vdev;
198 if (restore_last_peer && (vdev->last_real_peer == NULL)) {
199 peer = NULL;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800200 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530201 vdev->hl_tdls_ap_mac_addr.raw, peer_id);
202 if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
203 vdev->last_real_peer = peer;
204 }
205}
206#endif
207
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530208/**
209 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
210 * fw is compatible for marking first packet after wow wakeup
211 * @value: 1 for enabled/ 0 for disabled
212 *
213 * Return: None
214 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800215static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530216{
217 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
218
219 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530220 ol_txrx_err(
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530221 "%s: pdev is NULL\n", __func__);
222 return;
223 }
224
225 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
226}
227
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530228u_int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800229ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530230{
231 u_int16_t desc_pool_size;
232 u_int16_t steady_state_tx_lifetime_ms;
233 u_int16_t safety_factor;
234
235 /*
236 * Steady-state tx latency:
237 * roughly 1-2 ms flight time
238 * + roughly 1-2 ms prep time,
239 * + roughly 1-2 ms target->host notification time.
240 * = roughly 6 ms total
241 * Thus, steady state number of frames =
242 * steady state max throughput / frame size * tx latency, e.g.
243 * 1 Gbps / 1500 bytes * 6 ms = 500
244 *
245 */
246 steady_state_tx_lifetime_ms = 6;
247
248 safety_factor = 8;
249
250 desc_pool_size =
251 ol_cfg_max_thruput_mbps(ctrl_pdev) *
252 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
253 (8 * OL_TX_AVG_FRM_BYTES) *
254 steady_state_tx_lifetime_ms *
255 safety_factor;
256
257 /* minimum */
258 if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
259 desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
260
261 /* maximum */
262 if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
263 desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
264
265 return desc_pool_size;
266}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800267
268/*=== function definitions ===*/
269
Nirav Shah22bf44d2015-12-10 15:39:48 +0530270/**
271 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
272 * wmi is enabled or not.
273 * @value: 1 for enabled/ 0 for disable
274 *
275 * Return: None
276 */
277void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
278{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530279 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700280
Nirav Shah22bf44d2015-12-10 15:39:48 +0530281 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530282 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530283 return;
284 }
285 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530286}
287
288/**
289 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
290 *
291 * Return: is_mgmt_over_wmi_enabled
292 */
293uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
294{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530295 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700296
Nirav Shah22bf44d2015-12-10 15:39:48 +0530297 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530298 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530299 return 0;
300 }
301 return pdev->is_mgmt_over_wmi_enabled;
302}
303
304
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800305#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800306static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800307ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
308 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800310 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
311 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800312 struct ol_txrx_peer_t *peer;
313
314 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
315 if (!peer)
316 return NULL;
317 *peer_id = peer->local_id;
Mohit Khannababadb82017-02-21 18:54:19 -0800318 OL_TXRX_PEER_UNREF_DELETE(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800319 return peer;
320}
321
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800322static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800323{
Leo Chang98726762016-10-28 11:07:18 -0700324 struct ol_txrx_peer_t *peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530326 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530327 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530328 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800329 }
330
331 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530332 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800333}
334
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800335static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800336{
337 struct ol_txrx_peer_t *peer = NULL;
338 ol_txrx_pdev_handle pdev = NULL;
339
340 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530341 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800342 "Invalid sta id passed");
343 return NULL;
344 }
345
Anurag Chouhan6d760662016-02-20 16:05:43 +0530346 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800347 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530348 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530349 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800350 return NULL;
351 }
352
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800353 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800354 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530355 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530356 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800357 return NULL;
358 }
359
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800360 return (struct cdp_vdev *)peer->vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800361}
362
Mohit Khannababadb82017-02-21 18:54:19 -0800363/**
364 * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
365 * @ppdev: pointer of type cdp_pdev
366 * @peer_addr: peer mac addr
367 * @peer_id: pointer to fill in the value of peer->local_id for caller
368 *
369 * This function finds a peer with given mac address and returns its peer_id.
370 * Note that this function does not increment the peer->ref_cnt.
371 * This means that the peer may be deleted in some other parallel context after
372 * its been found.
373 *
374 * Return: peer handle if peer is found, NULL if peer is not found.
375 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800376void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800377 uint8_t *peer_addr,
378 uint8_t *peer_id)
379{
380 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800381 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800382
Mohit Khannababadb82017-02-21 18:54:19 -0800383 peer = ol_txrx_peer_find_hash_find_inc_ref(pdev, peer_addr, 0, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800384 if (!peer)
385 return NULL;
386 *peer_id = peer->local_id;
Mohit Khannababadb82017-02-21 18:54:19 -0800387 OL_TXRX_PEER_UNREF_DELETE(peer);
388 return peer;
389}
390
391/**
392 * ol_txrx_find_peer_by_addr_inc_ref() - find peer via peer mac addr and peer_id
393 * @pdev: pointer of type ol_txrx_pdev_handle
394 * @peer_addr: peer mac addr
395 * @peer_id: pointer to fill in the value of peer->local_id for caller
396 *
397 * This function finds the peer with given mac address and returns its peer_id.
398 * Note that this function increments the peer->ref_cnt.
399 * This makes sure that peer will be valid. This also means the caller needs to
400 * call the corresponding API - OL_TXRX_PEER_UNREF_DELETE to delete the peer
401 * reference.
402 * Sample usage:
403 * {
404 * //the API call below increments the peer->ref_cnt
405 * peer = ol_txrx_find_peer_by_addr_inc_ref(pdev, peer_addr, peer_id);
406 *
407 * // Once peer usage is done
408 *
409 * //the API call below decrements the peer->ref_cnt
410 * OL_TXRX_PEER_UNREF_DELETE(peer);
411 * }
412 *
413 * Return: peer handle if the peer is found, NULL if peer is not found.
414 */
415ol_txrx_peer_handle ol_txrx_find_peer_by_addr_inc_ref(ol_txrx_pdev_handle pdev,
416 uint8_t *peer_addr,
417 uint8_t *peer_id)
418{
419 struct ol_txrx_peer_t *peer;
420
421 peer = ol_txrx_peer_find_hash_find_inc_ref(pdev, peer_addr, 0, 1);
422 if (!peer)
423 return NULL;
424 *peer_id = peer->local_id;
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800425 return peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800426}
427
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800428static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800429{
Leo Chang98726762016-10-28 11:07:18 -0700430 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800431 return peer->local_id;
432}
433
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700434/**
435 * @brief Find a txrx peer handle from a peer's local ID
436 * @details
437 * The control SW typically uses the txrx peer handle to refer to the peer.
438 * In unusual circumstances, if it is infeasible for the control SW maintain
439 * the txrx peer handle but it can maintain a small integer local peer ID,
440 * this function allows the peer handled to be retrieved, based on the local
441 * peer ID.
442 *
443 * @param pdev - the data physical device object
444 * @param local_peer_id - the ID txrx assigned locally to the peer in question
445 * @return handle to the txrx peer object
446 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800447ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800448ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800449 uint8_t local_peer_id)
450{
451 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800452 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700453
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800454 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
455 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
456 return NULL;
457 }
458
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530459 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800460 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530461 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800462 return peer;
463}
464
465static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
466{
467 int i;
468
469 /* point the freelist to the first ID */
470 pdev->local_peer_ids.freelist = 0;
471
472 /* link each ID to the next one */
473 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
474 pdev->local_peer_ids.pool[i] = i + 1;
475 pdev->local_peer_ids.map[i] = NULL;
476 }
477
478 /* link the last ID to itself, to mark the end of the list */
479 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
480 pdev->local_peer_ids.pool[i] = i;
481
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530482 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800483}
484
485static void
486ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
487 struct ol_txrx_peer_t *peer)
488{
489 int i;
490
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530491 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800492 i = pdev->local_peer_ids.freelist;
493 if (pdev->local_peer_ids.pool[i] == i) {
494 /* the list is empty, except for the list-end marker */
495 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
496 } else {
497 /* take the head ID and advance the freelist */
498 peer->local_id = i;
499 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
500 pdev->local_peer_ids.map[i] = peer;
501 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530502 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800503}
504
505static void
506ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
507 struct ol_txrx_peer_t *peer)
508{
509 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700510
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800511 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
512 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
513 return;
514 }
515 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530516 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800517 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
518 pdev->local_peer_ids.freelist = i;
519 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530520 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800521}
522
523static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
524{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530525 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800526}
527
528#else
529#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
530#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
531#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
532#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
533#endif
534
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530535#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
536
537/**
538 * ol_txrx_update_group_credit() - update group credit for tx queue
539 * @group: for which credit needs to be updated
540 * @credit: credits
541 * @absolute: TXQ group absolute
542 *
543 * Return: allocated pool size
544 */
545void ol_txrx_update_group_credit(
546 struct ol_tx_queue_group_t *group,
547 int32_t credit,
548 u_int8_t absolute)
549{
550 if (absolute)
551 qdf_atomic_set(&group->credit, credit);
552 else
553 qdf_atomic_add(credit, &group->credit);
554}
555
556/**
557 * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
558 * vdev id mask and ac mask is not matching
559 * @pdev: the data physical device
560 * @group_id: TXQ group id
561 * @credit: TXQ group credit count
562 * @absolute: TXQ group absolute
563 * @vdev_id_mask: TXQ vdev group id mask
564 * @ac_mask: TQX access category mask
565 *
566 * Return: None
567 */
568void ol_txrx_update_tx_queue_groups(
569 ol_txrx_pdev_handle pdev,
570 u_int8_t group_id,
571 int32_t credit,
572 u_int8_t absolute,
573 u_int32_t vdev_id_mask,
574 u_int32_t ac_mask
575 )
576{
577 struct ol_tx_queue_group_t *group;
578 u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
579 u_int32_t membership;
580 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700581
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530582 group = &pdev->txq_grps[group_id];
583
584 membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
585
586 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
587 /*
588 * if the membership (vdev id mask and ac mask)
589 * matches then no need to update tx qeue groups.
590 */
591 if (group->membership == membership)
592 /* Update Credit Only */
593 goto credit_update;
594
595
596 /*
597 * membership (vdev id mask and ac mask) is not matching
598 * TODO: ignoring ac mask for now
599 */
600 group_vdev_id_mask =
601 OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
602
603 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
604 group_vdev_bit_mask =
605 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
606 group_vdev_id_mask, vdev->vdev_id);
607 vdev_bit_mask =
608 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
609 vdev_id_mask, vdev->vdev_id);
610
611 if (group_vdev_bit_mask != vdev_bit_mask) {
612 /*
613 * Change in vdev tx queue group
614 */
615 if (!vdev_bit_mask) {
616 /* Set Group Pointer (vdev and peer) to NULL */
617 ol_tx_set_vdev_group_ptr(
618 pdev, vdev->vdev_id, NULL);
619 } else {
620 /* Set Group Pointer (vdev and peer) */
621 ol_tx_set_vdev_group_ptr(
622 pdev, vdev->vdev_id, group);
623 }
624 }
625 }
626 /* Update membership */
627 group->membership = membership;
628credit_update:
629 /* Update Credit */
630 ol_txrx_update_group_credit(group, credit, absolute);
631 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
632}
633#endif
634
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800635#ifdef WLAN_FEATURE_FASTPATH
636/**
637 * setup_fastpath_ce_handles() Update pdev with ce_handle for fastpath use.
638 *
639 * @osc: pointer to HIF context
640 * @pdev: pointer to ol pdev
641 *
642 * Return: void
643 */
Komal Seelam3d202862016-02-24 18:43:24 +0530644static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
645 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800646{
647 /*
648 * Before the HTT attach, set up the CE handles
649 * CE handles are (struct CE_state *)
650 * This is only required in the fast path
651 */
Komal Seelam7fde14c2016-02-02 13:05:57 +0530652 pdev->ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_H2T_MSG);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800653
654}
655
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800656#else /* not WLAN_FEATURE_FASTPATH */
Komal Seelam3d202862016-02-24 18:43:24 +0530657static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
658 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800659{
660}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800661#endif /* WLAN_FEATURE_FASTPATH */
662
663#ifdef QCA_LL_TX_FLOW_CONTROL_V2
664/**
665 * ol_tx_set_desc_global_pool_size() - set global pool size
666 * @num_msdu_desc: total number of descriptors
667 *
668 * Return: none
669 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800670static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800671{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530672 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700673
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800674 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530675 qdf_print("%s: pdev is NULL\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800676 return;
677 }
Nirav Shah2ae038d2015-12-23 20:36:11 +0530678 pdev->num_msdu_desc = num_msdu_desc;
679 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
680 pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
Kapil Gupta53d9b572017-06-28 17:53:25 +0530681 ol_txrx_info_high("Global pool size: %d\n",
Nirav Shah2ae038d2015-12-23 20:36:11 +0530682 pdev->num_msdu_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800683}
684
685/**
686 * ol_tx_get_desc_global_pool_size() - get global pool size
687 * @pdev: pdev handle
688 *
689 * Return: global pool size
690 */
691static inline
692uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
693{
694 return pdev->num_msdu_desc;
695}
Nirav Shah55b45a02016-01-21 10:00:16 +0530696
697/**
698 * ol_tx_get_total_free_desc() - get total free descriptors
699 * @pdev: pdev handle
700 *
701 * Return: total free descriptors
702 */
703static inline
704uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
705{
706 struct ol_tx_flow_pool_t *pool = NULL;
707 uint32_t free_desc;
708
709 free_desc = pdev->tx_desc.num_free;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530710 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530711 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
712 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530713 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530714 free_desc += pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530715 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530716 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530717 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530718
719 return free_desc;
720}
721
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800722#else
723/**
724 * ol_tx_get_desc_global_pool_size() - get global pool size
725 * @pdev: pdev handle
726 *
727 * Return: global pool size
728 */
729static inline
730uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
731{
732 return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
733}
Nirav Shah55b45a02016-01-21 10:00:16 +0530734
735/**
736 * ol_tx_get_total_free_desc() - get total free descriptors
737 * @pdev: pdev handle
738 *
739 * Return: total free descriptors
740 */
741static inline
742uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
743{
744 return pdev->tx_desc.num_free;
745}
746
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800747#endif
748
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530749#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
750
751/**
752 * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
753 * margin replenishment
754 * @desc_pool_size: tx desc pool size
755 *
756 * Return: threshold low
757 */
758static inline uint16_t
759ol_txrx_rsrc_threshold_lo(int desc_pool_size)
760{
761 int threshold_low;
Yun Parkeaea8632017-04-09 09:53:45 -0700762
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530763 /*
Yun Parkeaea8632017-04-09 09:53:45 -0700764 * 5% margin of unallocated desc is too much for per
765 * vdev mechanism.
766 * Define the value seperately.
767 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530768 threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
769
770 return threshold_low;
771}
772
773/**
774 * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
775 * during tx desc margin replenishment
776 * @desc_pool_size: tx desc pool size
777 *
778 * Return: threshold high
779 */
780static inline uint16_t
781ol_txrx_rsrc_threshold_hi(int desc_pool_size)
782{
783 int threshold_high;
784 /* when freeing up descriptors,
785 * keep going until there's a 7.5% margin
786 */
787 threshold_high = ((15 * desc_pool_size)/100)/2;
788
789 return threshold_high;
790}
791#else
792
793static inline uint16_t
794ol_txrx_rsrc_threshold_lo(int desc_pool_size)
795{
796 int threshold_low;
797 /* always maintain a 5% margin of unallocated descriptors */
798 threshold_low = (5 * desc_pool_size)/100;
799
800 return threshold_low;
801}
802
803static inline uint16_t
804ol_txrx_rsrc_threshold_hi(int desc_pool_size)
805{
806 int threshold_high;
807 /* when freeing up descriptors, keep going until
808 * there's a 15% margin
809 */
810 threshold_high = (15 * desc_pool_size)/100;
811
812 return threshold_high;
813}
814#endif
815
816#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
817
818/**
819 * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
820 * @pdev: the physical device object
821 *
822 * Return: None
823 */
824static void
825ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
826{
827 qdf_spinlock_create(&pdev->txq_log_spinlock);
828 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
829 pdev->txq_log.oldest_record_offset = 0;
830 pdev->txq_log.offset = 0;
831 pdev->txq_log.allow_wrap = 1;
832 pdev->txq_log.wrapped = 0;
833}
834
835/**
836 * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
837 * @pdev: the physical device object
838 *
839 * Return: None
840 */
841static inline void
842ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
843{
844 qdf_spinlock_destroy(&pdev->txq_log_spinlock);
845}
846
847#else
848
849static inline void
850ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
851{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530852}
853
854static inline void
855ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
856{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530857}
858
859
860#endif
861
862#if defined(DEBUG_HL_LOGGING)
863
864/**
865 * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
866 * @pdev: the physical device object
867 *
868 * Return: None
869 */
870static inline void
871ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
872{
873 qdf_spinlock_create(&pdev->grp_stat_spinlock);
874 pdev->grp_stats.last_valid_index = -1;
875 pdev->grp_stats.wrap_around = 0;
876}
877
878/**
879 * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
880 * @pdev: the physical device object
881 *
882 * Return: None
883 */
884static inline void
885ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
886{
887 qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
888}
889#else
890
891static inline void
892ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
893{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530894}
895
896static inline void
897ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
898{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530899}
900#endif
901
902#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
903
904/**
905 * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
906 * @vdev: the virtual device object
907 * @flag: flag
908 *
909 * Return: None
910 */
911void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800912ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530913{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800914 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530915 vdev->hlTdlsFlag = flag;
916}
917#endif
918
919#if defined(CONFIG_HL_SUPPORT)
920
921/**
922 * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
923 * @vdev: the virtual device object
924 *
925 * Return: None
926 */
927static void
928ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
929{
930 u_int8_t i;
Yun Parkeaea8632017-04-09 09:53:45 -0700931
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530932 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
933 TAILQ_INIT(&vdev->txqs[i].head);
934 vdev->txqs[i].paused_count.total = 0;
935 vdev->txqs[i].frms = 0;
936 vdev->txqs[i].bytes = 0;
937 vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
938 vdev->txqs[i].flag = ol_tx_queue_empty;
939 /* aggregation is not applicable for vdev tx queues */
940 vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
941 ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
942 ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
943 }
944}
945
946/**
947 * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
948 * @vdev: the virtual device object
949 *
950 * Return: None
951 */
952static void
953ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
954{
955 struct ol_txrx_pdev_t *pdev = vdev->pdev;
956 struct ol_tx_frms_queue_t *txq;
957 int i;
958
959 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
960 txq = &vdev->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530961 ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530962 }
963}
964
965/**
966 * ol_txrx_peer_txqs_init() - initialise peer tx queues
967 * @pdev: the physical device object
968 * @peer: peer object
969 *
970 * Return: None
971 */
972static void
973ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
974 struct ol_txrx_peer_t *peer)
975{
976 uint8_t i;
977 struct ol_txrx_vdev_t *vdev = peer->vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700978
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530979 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
980 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
981 TAILQ_INIT(&peer->txqs[i].head);
982 peer->txqs[i].paused_count.total = 0;
983 peer->txqs[i].frms = 0;
984 peer->txqs[i].bytes = 0;
985 peer->txqs[i].ext_tid = i;
986 peer->txqs[i].flag = ol_tx_queue_empty;
987 peer->txqs[i].aggr_state = ol_tx_aggr_untried;
988 ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
989 ol_txrx_set_txq_peer(&peer->txqs[i], peer);
990 }
991 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
992
993 /* aggregation is not applicable for mgmt and non-QoS tx queues */
994 for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
995 peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
996
997 ol_txrx_peer_pause(peer);
998}
999
1000/**
1001 * ol_txrx_peer_tx_queue_free() - free peer tx queues
1002 * @pdev: the physical device object
1003 * @peer: peer object
1004 *
1005 * Return: None
1006 */
1007static void
1008ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
1009 struct ol_txrx_peer_t *peer)
1010{
1011 struct ol_tx_frms_queue_t *txq;
1012 uint8_t i;
1013
1014 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1015 txq = &peer->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +05301016 ol_tx_queue_free(pdev, txq, i, true);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301017 }
1018}
1019#else
1020
1021static inline void
1022ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
1023{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301024}
1025
1026static inline void
1027ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1028{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301029}
1030
1031static inline void
1032ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1033 struct ol_txrx_peer_t *peer)
1034{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301035}
1036
1037static inline void
1038ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
1039 struct ol_txrx_peer_t *peer)
1040{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301041}
1042#endif
1043
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301044#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
1045static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1046{
1047 qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
1048}
1049
1050static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1051{
1052 qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
1053}
1054
1055static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1056{
1057 int msdu_idx;
1058 int seg_idx;
1059
Houston Hoffmanb10ec492017-08-10 15:35:25 -07001060 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301061 "TSO Statistics:");
Houston Hoffmanb10ec492017-08-10 15:35:25 -07001062 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301063 "TSO pkts %lld, bytes %lld\n",
1064 pdev->stats.pub.tx.tso.tso_pkts.pkts,
1065 pdev->stats.pub.tx.tso.tso_pkts.bytes);
1066
Houston Hoffmanb10ec492017-08-10 15:35:25 -07001067 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301068 "TSO Histogram for numbers of segments:\n"
1069 "Single segment %d\n"
1070 " 2-5 segments %d\n"
1071 " 6-10 segments %d\n"
1072 "11-15 segments %d\n"
1073 "16-20 segments %d\n"
1074 " 20+ segments %d\n",
1075 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
1076 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
1077 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
1078 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
1079 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
1080 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
1081
Houston Hoffmanb10ec492017-08-10 15:35:25 -07001082 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301083 "TSO History Buffer: Total size %d, current_index %d",
1084 NUM_MAX_TSO_MSDUS,
1085 TXRX_STATS_TSO_MSDU_IDX(pdev));
1086
1087 for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
1088 if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
1089 continue;
Houston Hoffmanb10ec492017-08-10 15:35:25 -07001090 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301091 "jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
1092 msdu_idx,
1093 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
1094 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
1095 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
1096 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
1097
1098 for (seg_idx = 0;
1099 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
1100 msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
1101 seg_idx++) {
1102 struct qdf_tso_seg_t tso_seg =
1103 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
1104
Houston Hoffmanb10ec492017-08-10 15:35:25 -07001105 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301106 "seg idx: %d", seg_idx);
Houston Hoffmanb10ec492017-08-10 15:35:25 -07001107 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301108 "tso_enable: %d",
1109 tso_seg.tso_flags.tso_enable);
Houston Hoffmanb10ec492017-08-10 15:35:25 -07001110 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301111 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
1112 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
1113 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
1114 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
1115 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
1116 tso_seg.tso_flags.ns);
Houston Hoffmanb10ec492017-08-10 15:35:25 -07001117 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301118 "tcp_seq_num: 0x%x ip_id: %d",
1119 tso_seg.tso_flags.tcp_seq_num,
1120 tso_seg.tso_flags.ip_id);
1121 }
1122 }
1123}
1124#else
1125static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1126{
1127 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1128 "TSO is not supported\n");
1129}
1130
1131static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1132{
1133 /*
1134 * keeping the body empty and not keeping an error print as print will
1135 * will show up everytime during driver load if TSO is not enabled.
1136 */
1137}
1138
1139static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1140{
1141 /*
1142 * keeping the body empty and not keeping an error print as print will
1143 * will show up everytime during driver unload if TSO is not enabled.
1144 */
1145}
1146
1147#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
1148
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001149/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001150 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001151 * @ctrl_pdev: cfg pdev
1152 * @htc_pdev: HTC pdev
1153 * @osdev: os dev
1154 *
1155 * Return: txrx pdev handle
1156 * NULL for failure
1157 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001158static struct cdp_pdev *
1159ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -07001160 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001161{
1162 struct ol_txrx_pdev_t *pdev;
hqufd227fe2017-06-26 17:01:14 +08001163 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001164
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301165 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001166 if (!pdev)
1167 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001168
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301169 /* init LL/HL cfg here */
1170 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001171 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
1172
1173 /* store provided params */
1174 pdev->ctrl_pdev = ctrl_pdev;
1175 pdev->osdev = osdev;
1176
1177 for (i = 0; i < htt_num_sec_types; i++)
1178 pdev->sec_types[i] = (enum ol_sec_type)i;
1179
1180 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301181 ol_txrx_tso_stats_init(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001182
1183 TAILQ_INIT(&pdev->vdev_list);
1184
tfyu9fcabd72017-09-26 17:46:48 +08001185 TAILQ_INIT(&pdev->req_list);
1186 pdev->req_list_depth = 0;
1187 qdf_spinlock_create(&pdev->req_list_spinlock);
1188
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001189 /* do initial set up of the peer ID -> peer object lookup map */
1190 if (ol_txrx_peer_find_attach(pdev))
1191 goto fail1;
1192
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301193 /* initialize the counter of the target's tx buffer availability */
1194 qdf_atomic_init(&pdev->target_tx_credit);
1195 qdf_atomic_init(&pdev->orig_target_tx_credit);
1196
1197 if (ol_cfg_is_high_latency(ctrl_pdev)) {
1198 qdf_spinlock_create(&pdev->tx_queue_spinlock);
1199 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
1200 if (pdev->tx_sched.scheduler == NULL)
1201 goto fail2;
1202 }
1203 ol_txrx_pdev_txq_log_init(pdev);
1204 ol_txrx_pdev_grp_stats_init(pdev);
1205
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001206 pdev->htt_pdev =
1207 htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
1208 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301209 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001210
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301211 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
1212 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +08001213
1214 /*
1215 * Init the tid --> category table.
1216 * Regular tids (0-15) map to their AC.
1217 * Extension tids get their own categories.
1218 */
1219 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
1220 int ac = TXRX_TID_TO_WMM_AC(tid);
1221
1222 pdev->tid_to_ac[tid] = ac;
1223 }
1224 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
1225 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
1226 pdev->tid_to_ac[OL_TX_MGMT_TID] =
1227 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
1228 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
1229 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
1230 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
1231 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
1232
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001233 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001234
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301235fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001236 ol_txrx_peer_find_detach(pdev);
1237
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301238fail2:
1239 if (ol_cfg_is_high_latency(ctrl_pdev))
1240 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
1241
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001242fail1:
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301243 ol_txrx_tso_stats_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301244 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001245
1246fail0:
1247 return NULL;
1248}
1249
Komal Seelamc4b28632016-02-03 15:02:18 +05301250#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1251/**
1252 * htt_pkt_log_init() - API to initialize packet log
1253 * @handle: pdev handle
1254 * @scn: HIF context
1255 *
1256 * Return: void
1257 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001258void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +05301259{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001260 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Komal Seelamc4b28632016-02-03 15:02:18 +05301261 if (handle->pkt_log_init)
1262 return;
1263
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301264 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001265 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Komal Seelamc4b28632016-02-03 15:02:18 +05301266 ol_pl_sethandle(&handle->pl_dev, scn);
1267 if (pktlogmod_init(scn))
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301268 qdf_print("%s: pktlogmod_init failed", __func__);
Komal Seelamc4b28632016-02-03 15:02:18 +05301269 else
1270 handle->pkt_log_init = true;
1271 }
1272}
1273
1274/**
1275 * htt_pktlogmod_exit() - API to cleanup pktlog info
1276 * @handle: Pdev handle
1277 * @scn: HIF Context
1278 *
1279 * Return: void
1280 */
Houston Hoffman8c485042017-02-08 13:40:21 -08001281static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +05301282{
Houston Hoffman8c485042017-02-08 13:40:21 -08001283 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001284 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +05301285 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -08001286 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +05301287 handle->pkt_log_init = false;
1288 }
1289}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001290
Komal Seelamc4b28632016-02-03 15:02:18 +05301291#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001292void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -08001293static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +05301294#endif
1295
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001296/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001297 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001298 * @pdev: txrx pdev
1299 *
1300 * Return: 0 for success
1301 */
1302int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001303ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001304{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001305 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001306 uint16_t i;
1307 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001308 int ret = 0;
1309 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301310 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001311
Leo Chang376398b2015-10-23 14:19:02 -07001312 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1313 union ol_tx_desc_list_elem_t *c_element;
1314 unsigned int sig_bit;
1315 uint16_t desc_per_page;
1316
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001317 if (!osc) {
1318 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001319 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001320 }
1321
1322 /*
1323 * For LL, limit the number of host's tx descriptors to match
1324 * the number of target FW tx descriptors.
1325 * This simplifies the FW, by ensuring the host will never
1326 * download more tx descriptors than the target has space for.
1327 * The FW will drop/free low-priority tx descriptors when it
1328 * starts to run low, so that in theory the host should never
1329 * run out of tx descriptors.
1330 */
1331
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001332 /*
1333 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301334 * HL - wait for a HTT target credit initialization
1335 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001336 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301337 if (pdev->cfg.is_high_latency) {
1338 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001339
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301340 qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
1341 qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001342
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301343 pdev->tx_queue.rsrc_threshold_lo =
1344 ol_txrx_rsrc_threshold_lo(desc_pool_size);
1345 pdev->tx_queue.rsrc_threshold_hi =
1346 ol_txrx_rsrc_threshold_hi(desc_pool_size);
1347
1348 for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
1349 qdf_atomic_init(&pdev->txq_grps[i].credit);
1350
1351 ol_tx_target_credit_init(pdev, desc_pool_size);
1352 } else {
1353 qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
1354 &pdev->target_tx_credit);
1355 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1356 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001357
Nirav Shah76291962016-04-25 10:50:37 +05301358 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1359
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001360 setup_fastpath_ce_handles(osc, pdev);
1361
1362 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1363 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301364 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001365
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001366 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -08001367 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
1368 ret = htt_ipa_uc_attach(pdev->htt_pdev);
1369 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -07001370 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001371 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001372
Leo Chang376398b2015-10-23 14:19:02 -07001373 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301374 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301375 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -07001376 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1377 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1378 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301379 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001380 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -08001381 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001382 goto page_alloc_fail;
1383 }
1384 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1385 pdev->tx_desc.offset_filter = desc_per_page - 1;
1386 /* Calculate page divider to find page number */
1387 sig_bit = 0;
1388 while (desc_per_page) {
1389 sig_bit++;
1390 desc_per_page = desc_per_page >> 1;
1391 }
1392 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -08001393 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -07001394 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1395 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1396 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1397 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001398
1399 /*
1400 * Each SW tx desc (used only within the tx datapath SW) has a
1401 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1402 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1403 * desc now, to avoid doing it during time-critical transmit.
1404 */
1405 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -07001406 pdev->tx_desc.freelist =
1407 (union ol_tx_desc_list_elem_t *)
1408 (*pdev->tx_desc.desc_pages.cacheable_pages);
1409 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001410 for (i = 0; i < desc_pool_size; i++) {
1411 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001412 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301413 qdf_dma_addr_t frag_paddr = 0;
1414 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001415
Leo Chang376398b2015-10-23 14:19:02 -07001416 if (i == (desc_pool_size - 1))
1417 c_element->next = NULL;
1418 else
1419 c_element->next = (union ol_tx_desc_list_elem_t *)
1420 ol_tx_desc_find(pdev, i + 1);
1421
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001422 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001423 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301424 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001425 "%s: failed to alloc HTT tx desc (%d of %d)",
1426 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001427 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -08001428 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001429 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001430 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001431
Leo Chang376398b2015-10-23 14:19:02 -07001432 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001433 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001434 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001435 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001436 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301437 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001438 "%s: failed to alloc HTT frag dsc (%d/%d)",
1439 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001440 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001441 fail_idx = i;
1442 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001443 }
Leo Chang376398b2015-10-23 14:19:02 -07001444 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -07001445 /*
1446 * Initialize the first 6 words (TSO flags)
1447 * of the frag descriptor
1448 */
Leo Chang376398b2015-10-23 14:19:02 -07001449 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1450 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001451 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001452 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001453#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001454 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001455#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001456 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001457 0xffffffff;
1458#endif
1459#endif
Leo Chang376398b2015-10-23 14:19:02 -07001460 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301461 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001462 c_element = c_element->next;
1463 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001464 }
1465
1466 /* link SW tx descs into a freelist */
1467 pdev->tx_desc.num_free = desc_pool_size;
Poddar, Siddarth14521792017-03-14 21:19:42 +05301468 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001469 "%s first tx_desc:0x%pK Last tx desc:0x%pK\n", __func__,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001470 (uint32_t *) pdev->tx_desc.freelist,
1471 (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001472
1473 /* check what format of frames are expected to be delivered by the OS */
1474 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1475 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1476 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1477 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1478 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1479 pdev->htt_pkt_type = htt_pkt_type_eth2;
1480 else
1481 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1482 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301483 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001484 "%s Invalid standard frame type: %d",
1485 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001486 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001487 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001488 }
1489
1490 /* setup the global rx defrag waitlist */
1491 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1492
1493 /* configure where defrag timeout and duplicate detection is handled */
1494 pdev->rx.flags.defrag_timeout_check =
1495 pdev->rx.flags.dup_check =
1496 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1497
1498#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1499 /* Need to revisit this part. Currently,hardcode to riva's caps */
1500 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1501 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1502 /*
1503 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1504 * header for non-first subframe of A-MSDU.
1505 */
1506 pdev->sw_subfrm_hdr_recovery_enable = 1;
1507 /*
1508 * The Riva HW doesn't have the capability to set Protected Frame bit
1509 * in the MAC header for encrypted data frame.
1510 */
1511 pdev->sw_pf_proc_enable = 1;
1512
1513 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001514 /*
1515 * sw llc process is only needed in
1516 * 802.3 to 802.11 transform case
1517 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001518 pdev->sw_tx_llc_proc_enable = 1;
1519 pdev->sw_rx_llc_proc_enable = 1;
1520 } else {
1521 pdev->sw_tx_llc_proc_enable = 0;
1522 pdev->sw_rx_llc_proc_enable = 0;
1523 }
1524
1525 switch (pdev->frame_format) {
1526 case wlan_frm_fmt_raw:
1527 pdev->sw_tx_encap =
1528 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1529 ? 0 : 1;
1530 pdev->sw_rx_decap =
1531 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1532 ? 0 : 1;
1533 break;
1534 case wlan_frm_fmt_native_wifi:
1535 pdev->sw_tx_encap =
1536 pdev->
1537 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1538 ? 0 : 1;
1539 pdev->sw_rx_decap =
1540 pdev->
1541 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1542 ? 0 : 1;
1543 break;
1544 case wlan_frm_fmt_802_3:
1545 pdev->sw_tx_encap =
1546 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1547 ? 0 : 1;
1548 pdev->sw_rx_decap =
1549 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1550 ? 0 : 1;
1551 break;
1552 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301553 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001554 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1555 pdev->frame_format,
1556 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001557 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001558 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001559 }
1560#endif
1561
1562 /*
1563 * Determine what rx processing steps are done within the host.
1564 * Possibilities:
1565 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1566 * (This is unlikely; even if the target is doing rx->tx forwarding,
1567 * the host should be doing rx->tx forwarding too, as a back up for
1568 * the target's rx->tx forwarding, in case the target runs short on
1569 * memory, and can't store rx->tx frames that are waiting for
1570 * missing prior rx frames to arrive.)
1571 * 2. Just rx -> tx forwarding.
1572 * This is the typical configuration for HL, and a likely
1573 * configuration for LL STA or small APs (e.g. retail APs).
1574 * 3. Both PN check and rx -> tx forwarding.
1575 * This is the typical configuration for large LL APs.
1576 * Host-side PN check without rx->tx forwarding is not a valid
1577 * configuration, since the PN check needs to be done prior to
1578 * the rx->tx forwarding.
1579 */
1580 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001581 /*
1582 * PN check, rx-tx forwarding and rx reorder is done by
1583 * the target
1584 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001585 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1586 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1587 else
1588 pdev->rx_opt_proc = ol_rx_fwd_check;
1589 } else {
1590 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1591 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1592 /*
1593 * PN check done on host,
1594 * rx->tx forwarding not done at all.
1595 */
1596 pdev->rx_opt_proc = ol_rx_pn_check_only;
1597 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1598 /*
1599 * Both PN check and rx->tx forwarding done
1600 * on host.
1601 */
1602 pdev->rx_opt_proc = ol_rx_pn_check;
1603 } else {
1604#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1605"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301606 QDF_TRACE(QDF_MODULE_ID_TXRX,
1607 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001608 "%s: %s", __func__, TRACESTR01);
1609#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001610 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001611 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001612 }
1613 } else {
1614 /* PN check done on target */
1615 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1616 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1617 /*
1618 * rx->tx forwarding done on host (possibly as
1619 * back-up for target-side primary rx->tx
1620 * forwarding)
1621 */
1622 pdev->rx_opt_proc = ol_rx_fwd_check;
1623 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001624 /*
1625 * rx->tx forwarding either done in target,
1626 * or not done at all
1627 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001628 pdev->rx_opt_proc = ol_rx_deliver;
1629 }
1630 }
1631 }
1632
1633 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301634 qdf_spinlock_create(&pdev->tx_mutex);
1635 qdf_spinlock_create(&pdev->peer_ref_mutex);
1636 qdf_spinlock_create(&pdev->rx.mutex);
1637 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001638 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001639 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1640
Yun Parkf01f6e22017-01-18 17:27:02 -08001641 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1642 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001643 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001644 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001645
Yun Parkf01f6e22017-01-18 17:27:02 -08001646 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1647 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001648 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001649 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001650
1651#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1652 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1653#endif
1654
1655 /*
1656 * WDI event attach
1657 */
1658 wdi_event_attach(pdev);
1659
1660 /*
1661 * Initialize rx PN check characteristics for different security types.
1662 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301663 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001664
1665 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1666 pdev->rx_pn[htt_sec_type_tkip].len =
1667 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1668 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1669 pdev->rx_pn[htt_sec_type_tkip].cmp =
1670 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1671 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1672
1673 /* WAPI: 128-bit PN */
1674 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1675 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1676
1677 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1678
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001679 ol_txrx_dbg("Created pdev %pK\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001680
1681 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1682
1683#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1684#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1685
1686/* #if 1 -- TODO: clean this up */
1687#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1688 /* avg = 100% * new + 0% * old */ \
1689 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1690/*
Yun Parkeaea8632017-04-09 09:53:45 -07001691 * #else
1692 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1693 * //avg = 25% * new + 25% * old
1694 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1695 * #endif
1696 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001697 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1698 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1699#endif
1700
1701 ol_txrx_local_peer_id_pool_init(pdev);
1702
1703 pdev->cfg.ll_pause_txq_limit =
1704 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1705
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301706 /* TX flow control for peer who is in very bad link status */
1707 ol_tx_badpeer_flow_cl_init(pdev);
1708
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001709#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301710 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301711 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001712
1713 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301714 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001715 {
1716 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001717
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001718 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301719 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001720 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1721 * 1000);
1722 /*
1723 * Compute a factor and shift that together are equal to the
1724 * inverse of the bin_width time, so that rather than dividing
1725 * by the bin width time, approximately the same result can be
1726 * obtained much more efficiently by a multiply + shift.
1727 * multiply_factor >> shift = 1 / bin_width_time, so
1728 * multiply_factor = (1 << shift) / bin_width_time.
1729 *
1730 * Pick the shift semi-arbitrarily.
1731 * If we knew statically what the bin_width would be, we could
1732 * choose a shift that minimizes the error.
1733 * Since the bin_width is determined dynamically, simply use a
1734 * shift that is about half of the uint32_t size. This should
1735 * result in a relatively large multiplier value, which
1736 * minimizes error from rounding the multiplier to an integer.
1737 * The rounding error only becomes significant if the tick units
1738 * are on the order of 1 microsecond. In most systems, it is
1739 * expected that the tick units will be relatively low-res,
1740 * on the order of 1 millisecond. In such systems the rounding
1741 * error is negligible.
1742 * It would be more accurate to dynamically try out different
1743 * shifts and choose the one that results in the smallest
1744 * rounding error, but that extra level of fidelity is
1745 * not needed.
1746 */
1747 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1748 pdev->tx_delay.hist_internal_bin_width_mult =
1749 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1750 1000 + (bin_width_1000ticks >> 1)) /
1751 bin_width_1000ticks;
1752 }
1753#endif /* QCA_COMPUTE_TX_DELAY */
1754
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001755 /* Thermal Mitigation */
1756 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001757
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001758 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001759
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301760 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1761
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001762 ol_tx_register_flow_control(pdev);
1763
1764 return 0; /* success */
1765
Leo Chang376398b2015-10-23 14:19:02 -07001766pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001767 OL_RX_REORDER_TRACE_DETACH(pdev);
1768
Leo Chang376398b2015-10-23 14:19:02 -07001769reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301770 qdf_spinlock_destroy(&pdev->tx_mutex);
1771 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1772 qdf_spinlock_destroy(&pdev->rx.mutex);
1773 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301774 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001775 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1776
Leo Chang376398b2015-10-23 14:19:02 -07001777control_init_fail:
1778desc_alloc_fail:
1779 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001780 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001781 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001782
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301783 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001784 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001785
Leo Chang376398b2015-10-23 14:19:02 -07001786page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001787 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1788 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001789uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001790 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301791htt_attach_fail:
1792 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001793ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001794 return ret; /* fail */
1795}
1796
Dhanashri Atre12a08392016-02-17 13:10:34 -08001797/**
1798 * ol_txrx_pdev_attach_target() - send target configuration
1799 *
1800 * @pdev - the physical device being initialized
1801 *
1802 * The majority of the data SW setup are done by the pdev_attach
1803 * functions, but this function completes the data SW setup by
1804 * sending datapath configuration messages to the target.
1805 *
1806 * Return: 0 - success 1 - failure
1807 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001808static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001809{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001810 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05301811 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001812}
1813
Dhanashri Atre12a08392016-02-17 13:10:34 -08001814/**
Mohit Khanna54f3a382017-03-13 17:56:32 -07001815 * ol_tx_free_descs_inuse - free tx descriptors which are in use
1816 * @pdev - the physical device for which tx descs need to be freed
1817 *
1818 * Cycle through the list of TX descriptors (for a pdev) which are in use,
1819 * for which TX completion has not been received and free them. Should be
1820 * called only when the interrupts are off and all lower layer RX is stopped.
1821 * Otherwise there may be a race condition with TX completions.
1822 *
1823 * Return: None
1824 */
1825static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1826{
1827 int i;
1828 void *htt_tx_desc;
1829 struct ol_tx_desc_t *tx_desc;
1830 int num_freed_tx_desc = 0;
1831
1832 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1833 tx_desc = ol_tx_desc_find(pdev, i);
1834 /*
1835 * Confirm that each tx descriptor is "empty", i.e. it has
1836 * no tx frame attached.
1837 * In particular, check that there are no frames that have
1838 * been given to the target to transmit, for which the
1839 * target has never provided a response.
1840 */
1841 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
1842 ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
1843 ol_tx_desc_frame_free_nonstd(pdev,
1844 tx_desc, 1);
1845 num_freed_tx_desc++;
1846 }
1847 htt_tx_desc = tx_desc->htt_tx_desc;
1848 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1849 }
1850
1851 if (num_freed_tx_desc)
1852 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1853 "freed %d tx frames for which no resp from target",
1854 num_freed_tx_desc);
1855
1856}
1857
1858/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301859 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08001860 * @pdev - the data physical device object being removed
1861 * @force - delete the pdev (and its vdevs and peers) even if
1862 * there are outstanding references by the target to the vdevs
1863 * and peers within the pdev
1864 *
1865 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301866 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08001867 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301868 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08001869 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301870static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001871{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001872 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001873
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001874 /* preconditions */
1875 TXRX_ASSERT2(pdev);
1876
1877 /* check that the pdev has no vdevs allocated */
1878 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1879
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001880#ifdef QCA_SUPPORT_TX_THROTTLE
1881 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301882 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1883 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001884#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301885 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1886 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001887#endif
1888#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001889
1890 if (force) {
1891 /*
1892 * The assertion above confirms that all vdevs within this pdev
1893 * were detached. However, they may not have actually been
1894 * deleted.
1895 * If the vdev had peers which never received a PEER_UNMAP msg
1896 * from the target, then there are still zombie peer objects,
1897 * and the vdev parents of the zombie peers are also zombies,
1898 * hanging around until their final peer gets deleted.
1899 * Go through the peer hash table and delete any peers left.
1900 * As a side effect, this will complete the deletion of any
1901 * vdevs that are waiting for their peers to finish deletion.
1902 */
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07001903 ol_txrx_dbg("Force delete for pdev %pK\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001904 pdev);
1905 ol_txrx_peer_find_hash_erase(pdev);
1906 }
1907
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301908 /* to get flow pool status before freeing descs */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07001909 ol_tx_dump_flow_pool_info((void *)pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001910 ol_tx_free_descs_inuse(pdev);
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301911 ol_tx_deregister_flow_control(pdev);
Mohit Khanna54f3a382017-03-13 17:56:32 -07001912
1913 /*
1914 * ol_tso_seg_list_deinit should happen after
1915 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
1916 * which is being de-initilized in ol_tso_seg_list_deinit
1917 */
1918 ol_tso_seg_list_deinit(pdev);
1919 ol_tso_num_seg_list_deinit(pdev);
1920
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301921 /* Stop the communication between HTT and target at first */
1922 htt_detach_target(pdev->htt_pdev);
1923
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301924 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001925 &pdev->tx_desc.desc_pages, 0, true);
1926 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001927
1928 /* Detach micro controller data path offload resource */
1929 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1930 htt_ipa_uc_detach(pdev->htt_pdev);
1931
1932 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05301933 ol_tx_desc_dup_detect_deinit(pdev);
1934
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301935 qdf_spinlock_destroy(&pdev->tx_mutex);
1936 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1937 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1938 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001939 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001940#ifdef QCA_SUPPORT_TX_THROTTLE
1941 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301942 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001943#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301944
1945 /* TX flow control for peer who is in very bad link status */
1946 ol_tx_badpeer_flow_cl_deinit(pdev);
1947
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001948 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1949
1950 OL_RX_REORDER_TRACE_DETACH(pdev);
1951 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301952
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001953 /*
1954 * WDI event detach
1955 */
1956 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301957
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001958 ol_txrx_local_peer_id_cleanup(pdev);
1959
1960#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301961 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001962#endif
Houston Hoffmane5ec0492017-01-30 12:28:32 -08001963 qdf_mem_free(ppdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001964}
1965
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301966/**
1967 * ol_txrx_pdev_detach() - delete the data SW state
1968 * @ppdev - the data physical device object being removed
1969 * @force - delete the pdev (and its vdevs and peers) even if
1970 * there are outstanding references by the target to the vdevs
1971 * and peers within the pdev
1972 *
1973 * This function is used when the WLAN driver is being removed to
1974 * remove the host data component within the driver.
1975 * All virtual devices within the physical device need to be deleted
1976 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1977 *
1978 * Return: None
1979 */
1980static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
1981{
1982 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
tfyu9fcabd72017-09-26 17:46:48 +08001983 struct ol_txrx_stats_req_internal *req;
1984 int i = 0;
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301985
1986 /*checking to ensure txrx pdev structure is not NULL */
1987 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05301988 ol_txrx_err(
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301989 "NULL pdev passed to %s\n", __func__);
1990 return;
1991 }
1992
1993 htt_pktlogmod_exit(pdev);
1994
tfyu9fcabd72017-09-26 17:46:48 +08001995 qdf_spin_lock_bh(&pdev->req_list_spinlock);
1996 if (pdev->req_list_depth > 0)
1997 ol_txrx_err(
1998 "Warning: the txrx req list is not empty, depth=%d\n",
1999 pdev->req_list_depth
2000 );
2001 TAILQ_FOREACH(req, &pdev->req_list, req_list_elem) {
2002 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
2003 pdev->req_list_depth--;
2004 ol_txrx_err(
2005 "%d: %p,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
2006 i++,
2007 req,
2008 req->base.print.verbose,
2009 req->base.print.concise,
2010 req->base.stats_type_upload_mask,
2011 req->base.stats_type_reset_mask
2012 );
2013 qdf_mem_free(req);
2014 }
2015 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
2016
2017 qdf_spinlock_destroy(&pdev->req_list_spinlock);
2018
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05302019 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
2020
2021 if (pdev->cfg.is_high_latency)
2022 ol_tx_sched_detach(pdev);
2023
2024 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
2025
2026 htt_pdev_free(pdev->htt_pdev);
2027 ol_txrx_peer_find_detach(pdev);
2028 ol_txrx_tso_stats_deinit(pdev);
2029
2030 ol_txrx_pdev_txq_log_destroy(pdev);
2031 ol_txrx_pdev_grp_stat_destroy(pdev);
2032}
2033
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302034#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
2035
2036/**
2037 * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
2038 * @vdev: the virtual device object
2039 *
2040 * Return: None
2041 */
2042static inline void
2043ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2044{
2045 qdf_atomic_init(&vdev->tx_desc_count);
2046}
2047#else
2048
2049static inline void
2050ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
2051{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302052}
2053#endif
2054
Dhanashri Atre12a08392016-02-17 13:10:34 -08002055/**
2056 * ol_txrx_vdev_attach - Allocate and initialize the data object
2057 * for a new virtual device.
2058 *
2059 * @data_pdev - the physical device the virtual device belongs to
2060 * @vdev_mac_addr - the MAC address of the virtual device
2061 * @vdev_id - the ID used to identify the virtual device to the target
2062 * @op_mode - whether this virtual device is operating as an AP,
2063 * an IBSS, or a STA
2064 *
2065 * Return: success: handle to new data vdev object, failure: NULL
2066 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002067static struct cdp_vdev *
2068ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002069 uint8_t *vdev_mac_addr,
2070 uint8_t vdev_id, enum wlan_op_mode op_mode)
2071{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002072 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002073 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002074 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002075
2076 /* preconditions */
2077 TXRX_ASSERT2(pdev);
2078 TXRX_ASSERT2(vdev_mac_addr);
2079
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302080 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002081 if (!vdev)
2082 return NULL; /* failure */
2083
2084 /* store provided params */
2085 vdev->pdev = pdev;
2086 vdev->vdev_id = vdev_id;
2087 vdev->opmode = op_mode;
2088
2089 vdev->delete.pending = 0;
2090 vdev->safemode = 0;
2091 vdev->drop_unenc = 1;
2092 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05302093 vdev->fwd_tx_packets = 0;
2094 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002095
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302096 ol_txrx_vdev_tx_desc_cnt_init(vdev);
2097
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302098 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002099 OL_TXRX_MAC_ADDR_LEN);
2100
2101 TAILQ_INIT(&vdev->peer_list);
2102 vdev->last_real_peer = NULL;
2103
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002104 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302105
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002106#ifdef QCA_IBSS_SUPPORT
2107 vdev->ibss_peer_num = 0;
2108 vdev->ibss_peer_heart_beat_timer = 0;
2109#endif
2110
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302111 ol_txrx_vdev_txqs_init(vdev);
2112
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302113 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002114 vdev->ll_pause.paused_reason = 0;
2115 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
2116 vdev->ll_pause.txq.depth = 0;
wadesong5e2e8012017-08-21 16:56:03 +08002117 qdf_atomic_init(&vdev->delete.detaching);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302118 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002119 &vdev->ll_pause.timer,
2120 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302121 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302122 qdf_atomic_init(&vdev->os_q_paused);
2123 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002124 vdev->tx_fl_lwm = 0;
2125 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002126 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002127 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05302128 qdf_mem_zero(&vdev->last_peer_mac_addr,
2129 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302130 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002131 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002132 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002133 vdev->osif_fc_ctx = NULL;
2134
2135 /* Default MAX Q depth for every VDEV */
2136 vdev->ll_pause.max_q_depth =
2137 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002138 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002139 /* add this vdev into the pdev's list */
2140 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
2141
Poddar, Siddarth14521792017-03-14 21:19:42 +05302142 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002143 "Created vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002144 vdev,
2145 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2146 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2147 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2148
2149 /*
2150 * We've verified that htt_op_mode == wlan_op_mode,
2151 * so no translation is needed.
2152 */
2153 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
2154
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002155 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002156}
2157
Dhanashri Atre12a08392016-02-17 13:10:34 -08002158/**
2159 *ol_txrx_vdev_register - Link a vdev's data object with the
2160 * matching OS shim vdev object.
2161 *
2162 * @txrx_vdev: the virtual device's data object
2163 * @osif_vdev: the virtual device's OS shim object
2164 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
2165 *
2166 * The data object for a virtual device is created by the
2167 * function ol_txrx_vdev_attach. However, rather than fully
2168 * linking the data vdev object with the vdev objects from the
2169 * other subsystems that the data vdev object interacts with,
2170 * the txrx_vdev_attach function focuses primarily on creating
2171 * the data vdev object. After the creation of both the data
2172 * vdev object and the OS shim vdev object, this
2173 * txrx_osif_vdev_attach function is used to connect the two
2174 * vdev objects, so the data SW can use the OS shim vdev handle
2175 * when passing rx data received by a vdev up to the OS shim.
2176 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002177static void ol_txrx_vdev_register(struct cdp_vdev *pvdev,
2178 void *osif_vdev,
2179 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002180{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002181 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002182 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
2183 qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
2184 qdf_assert(0);
2185 return;
2186 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002187
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002188 vdev->osif_dev = osif_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002189 vdev->rx = txrx_ops->rx.rx;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002190 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002191}
2192
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002193#ifdef currently_unused
Dhanashri Atre12a08392016-02-17 13:10:34 -08002194/**
2195 * ol_txrx_set_curchan - Setup the current operating channel of
2196 * the device
2197 * @pdev - the data physical device object
2198 * @chan_mhz - the channel frequency (mhz) packets on
2199 *
2200 * Mainly used when populating monitor mode status that requires
2201 * the current operating channel
2202 *
2203 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002204void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
2205{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002206}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002207#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002208
2209void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
2210{
2211 vdev->safemode = val;
2212}
2213
Dhanashri Atre12a08392016-02-17 13:10:34 -08002214/**
2215 * ol_txrx_set_privacy_filters - set the privacy filter
2216 * @vdev - the data virtual device object
2217 * @filter - filters to be set
2218 * @num - the number of filters
2219 *
2220 * Rx related. Set the privacy filters. When rx packets, check
2221 * the ether type, filter type and packet type to decide whether
2222 * discard these packets.
2223 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002224static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002225ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
2226 void *filters, uint32_t num)
2227{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302228 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002229 num * sizeof(struct privacy_exemption));
2230 vdev->num_filters = num;
2231}
2232
2233void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
2234{
2235 vdev->drop_unenc = val;
2236}
2237
gbian016a42e2017-03-01 18:49:11 +08002238#if defined(CONFIG_HL_SUPPORT)
2239
2240static void
2241ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2242{
2243 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2244 int i;
2245 struct ol_tx_desc_t *tx_desc;
2246
2247 qdf_spin_lock_bh(&pdev->tx_mutex);
2248 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2249 tx_desc = ol_tx_desc_find(pdev, i);
2250 if (tx_desc->vdev == vdev)
2251 tx_desc->vdev = NULL;
2252 }
2253 qdf_spin_unlock_bh(&pdev->tx_mutex);
2254}
2255
2256#else
2257
2258static void
2259ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2260{
2261
2262}
2263
2264#endif
2265
Dhanashri Atre12a08392016-02-17 13:10:34 -08002266/**
2267 * ol_txrx_vdev_detach - Deallocate the specified data virtual
2268 * device object.
2269 * @data_vdev: data object for the virtual device in question
2270 * @callback: function to call (if non-NULL) once the vdev has
2271 * been wholly deleted
2272 * @callback_context: context to provide in the callback
2273 *
2274 * All peers associated with the virtual device need to be deleted
2275 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
2276 * However, for the peers to be fully deleted, the peer deletion has to
2277 * percolate through the target data FW and back up to the host data SW.
2278 * Thus, even though the host control SW may have issued a peer_detach
2279 * call for each of the vdev's peers, the peer objects may still be
2280 * allocated, pending removal of all references to them by the target FW.
2281 * In this case, though the vdev_detach function call will still return
2282 * immediately, the vdev itself won't actually be deleted, until the
2283 * deletions of all its peers complete.
2284 * The caller can provide a callback function pointer to be notified when
2285 * the vdev deletion actually happens - whether it's directly within the
2286 * vdev_detach call, or if it's deferred until all in-progress peer
2287 * deletions have completed.
2288 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002289static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002290ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002291 ol_txrx_vdev_delete_cb callback, void *context)
2292{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002293 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
wadesong5e2e8012017-08-21 16:56:03 +08002294 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002295
2296 /* preconditions */
2297 TXRX_ASSERT2(vdev);
wadesong5e2e8012017-08-21 16:56:03 +08002298 pdev = vdev->pdev;
2299
2300 /* prevent anyone from restarting the ll_pause timer again */
2301 qdf_atomic_set(&vdev->delete.detaching, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002302
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302303 ol_txrx_vdev_tx_queue_free(vdev);
2304
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302305 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302306 qdf_timer_stop(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002307 vdev->ll_pause.is_q_timer_on = false;
2308 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302309 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07002310
Nirav Shahcbc6d722016-03-01 16:24:53 +05302311 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
2312 qdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
Anurag Chouhandf2b2682016-02-29 14:15:27 +05302313 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302314 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002315 vdev->ll_pause.txq.head = next;
2316 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302317 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
wadesong5e2e8012017-08-21 16:56:03 +08002318
2319 /* ll_pause timer should be deleted without any locks held, and
2320 * no timer function should be executed after this point because
2321 * qdf_timer_free is deleting the timer synchronously.
2322 */
2323 qdf_timer_free(&vdev->ll_pause.timer);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302324 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002325
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302326 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002327 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08002328 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002329 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302330 qdf_spin_unlock_bh(&vdev->flow_control_lock);
2331 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002332
2333 /* remove the vdev from its parent pdev's list */
2334 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
2335
2336 /*
2337 * Use peer_ref_mutex while accessing peer_list, in case
2338 * a peer is in the process of being removed from the list.
2339 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302340 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002341 /* check that the vdev has no peers allocated */
2342 if (!TAILQ_EMPTY(&vdev->peer_list)) {
2343 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05302344 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002345 "%s: not deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002346 __func__, vdev,
2347 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2348 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2349 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2350 /* indicate that the vdev needs to be deleted */
2351 vdev->delete.pending = 1;
2352 vdev->delete.callback = callback;
2353 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302354 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002355 return;
2356 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302357 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002358 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002359
Poddar, Siddarth14521792017-03-14 21:19:42 +05302360 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002361 "%s: deleting vdev obj %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002362 __func__, vdev,
2363 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2364 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2365 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2366
2367 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2368
2369 /*
Yun Parkeaea8632017-04-09 09:53:45 -07002370 * The ol_tx_desc_free might access the invalid content of vdev referred
2371 * by tx desc, since this vdev might be detached in another thread
2372 * asynchronous.
2373 *
2374 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2375 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2376 * to avoid crash.
2377 *
2378 */
gbian016a42e2017-03-01 18:49:11 +08002379 ol_txrx_tx_desc_reset_vdev(vdev);
2380
2381 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002382 * Doesn't matter if there are outstanding tx frames -
2383 * they will be freed once the target sends a tx completion
2384 * message for them.
2385 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302386 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002387 if (callback)
2388 callback(context);
2389}
2390
2391/**
2392 * ol_txrx_flush_rx_frames() - flush cached rx frames
2393 * @peer: peer
2394 * @drop: set flag to drop frames
2395 *
2396 * Return: None
2397 */
2398void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302399 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002400{
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002401 struct ol_txrx_cached_bufq_t *bufqi;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002402 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302403 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002404 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002405
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302406 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2407 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002408 return;
2409 }
2410
Dhanashri Atre182b0272016-02-17 15:35:07 -08002411 qdf_assert(peer->vdev);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302412 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002413 bufqi = &peer->bufq_info;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002414
Dhanashri Atre50141c52016-04-07 13:15:29 -07002415 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002416 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002417 else
2418 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302419 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002420
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002421 qdf_spin_lock_bh(&bufqi->bufq_lock);
2422 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002423 typeof(*cache_buf), list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002424 while (!list_empty(&bufqi->cached_bufq)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002425 list_del(&cache_buf->list);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002426 bufqi->curr--;
2427 qdf_assert(bufqi->curr >= 0);
2428 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002429 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302430 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002431 } else {
2432 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002433 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302434 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302435 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002436 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302437 qdf_mem_free(cache_buf);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002438 qdf_spin_lock_bh(&bufqi->bufq_lock);
2439 cache_buf = list_entry((&bufqi->cached_bufq)->next,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002440 typeof(*cache_buf), list);
2441 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002442 bufqi->qdepth_no_thresh = bufqi->curr;
2443 qdf_spin_unlock_bh(&bufqi->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302444 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002445}
2446
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07002447static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302448{
2449 uint8_t sta_id;
2450 struct ol_txrx_peer_t *peer;
2451 struct ol_txrx_pdev_t *pdev;
2452
2453 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2454 if (!pdev)
2455 return;
2456
2457 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002458 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2459 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302460 if (!peer)
2461 continue;
2462 ol_txrx_flush_rx_frames(peer, 1);
2463 }
2464}
2465
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302466/* Define short name to use in cds_trigger_recovery */
2467#define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2468
Dhanashri Atre12a08392016-02-17 13:10:34 -08002469/**
2470 * ol_txrx_peer_attach - Allocate and set up references for a
2471 * data peer object.
2472 * @data_pdev: data physical device object that will indirectly
2473 * own the data_peer object
2474 * @data_vdev - data virtual device object that will directly
2475 * own the data_peer object
2476 * @peer_mac_addr - MAC address of the new peer
2477 *
2478 * When an association with a peer starts, the host's control SW
2479 * uses this function to inform the host data SW.
2480 * The host data SW allocates its own peer object, and stores a
2481 * reference to the control peer object within the data peer object.
2482 * The host data SW also stores a reference to the virtual device
2483 * that the peer is associated with. This virtual device handle is
2484 * used when the data SW delivers rx data frames to the OS shim layer.
2485 * The host data SW returns a handle to the new peer data object,
2486 * so a reference within the control peer object can be set to the
2487 * data peer object.
2488 *
2489 * Return: handle to new data peer object, or NULL if the attach
2490 * fails
2491 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002492static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002493ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002494{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002495 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002496 struct ol_txrx_peer_t *peer;
2497 struct ol_txrx_peer_t *temp_peer;
2498 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002499 bool wait_on_deletion = false;
2500 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002501 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302502 bool cmp_wait_mac = false;
2503 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002504
2505 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002506 TXRX_ASSERT2(vdev);
2507 TXRX_ASSERT2(peer_mac_addr);
2508
Dhanashri Atre12a08392016-02-17 13:10:34 -08002509 pdev = vdev->pdev;
2510 TXRX_ASSERT2(pdev);
2511
Abhishek Singh217d9782017-04-28 23:49:11 +05302512 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2513 QDF_MAC_ADDR_SIZE))
2514 cmp_wait_mac = true;
2515
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302516 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002517 /* check for duplicate exsisting peer */
2518 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2519 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2520 (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302521 ol_txrx_info_high(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002522 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exsist.\n",
2523 vdev->vdev_id,
2524 peer_mac_addr[0], peer_mac_addr[1],
2525 peer_mac_addr[2], peer_mac_addr[3],
2526 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302527 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002528 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002529 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002530 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302531 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002532 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302533 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002534 return NULL;
2535 }
2536 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302537 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2538 &temp_peer->mac_addr,
2539 &vdev->last_peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302540 ol_txrx_info_high(
Abhishek Singh217d9782017-04-28 23:49:11 +05302541 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exsist.\n",
2542 vdev->vdev_id,
2543 vdev->last_peer_mac_addr.raw[0],
2544 vdev->last_peer_mac_addr.raw[1],
2545 vdev->last_peer_mac_addr.raw[2],
2546 vdev->last_peer_mac_addr.raw[3],
2547 vdev->last_peer_mac_addr.raw[4],
2548 vdev->last_peer_mac_addr.raw[5]);
2549 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2550 vdev->wait_on_peer_id = temp_peer->local_id;
2551 qdf_event_reset(&vdev->wait_delete_comp);
2552 wait_on_deletion = true;
2553 break;
2554 } else {
2555 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2556 ol_txrx_err("peer not found");
2557 return NULL;
2558 }
2559 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002560 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302561 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002562
Abhishek Singh217d9782017-04-28 23:49:11 +05302563 qdf_mem_zero(&vdev->last_peer_mac_addr,
2564 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002565 if (wait_on_deletion) {
2566 /* wait for peer deletion */
Anurag Chouhance0dc992016-02-16 18:18:03 +05302567 rc = qdf_wait_single_event(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002568 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002569 if (QDF_STATUS_SUCCESS != rc) {
Dustin Brown100201e2017-07-10 11:48:40 -07002570 ol_txrx_err("error waiting for peer(%d) deletion, status %d\n",
2571 vdev->wait_on_peer_id, (int) rc);
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302572 cds_trigger_recovery(PEER_DEL_TIMEOUT);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002573 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Dustin Brown100201e2017-07-10 11:48:40 -07002574
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002575 return NULL;
2576 }
2577 }
2578
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302579 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002580 if (!peer)
2581 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002582
2583 /* store provided params */
2584 peer->vdev = vdev;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302585 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002586 OL_TXRX_MAC_ADDR_LEN);
2587
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302588 ol_txrx_peer_txqs_init(pdev, peer);
2589
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002590 INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302591 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002592 /* add this peer into the vdev's list */
2593 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302594 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002595 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
2596 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr))
2597 vdev->last_real_peer = peer;
2598
2599 peer->rx_opt_proc = pdev->rx_opt_proc;
2600
2601 ol_rx_peer_init(pdev, peer);
2602
2603 /* initialize the peer_id */
2604 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2605 peer->peer_ids[i] = HTT_INVALID_PEER;
2606
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302607 qdf_spinlock_create(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07002608 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2609
2610 peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002611
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302612 qdf_atomic_init(&peer->delete_in_progress);
2613 qdf_atomic_init(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002614
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302615 qdf_atomic_init(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002616
2617 /* keep one reference for attach */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08002618 OL_TXRX_PEER_INC_REF_CNT(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002619
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002620 /*
2621 * Set a flag to indicate peer create is pending in firmware and
2622 * increment ref_cnt so that peer will not get deleted while
2623 * peer create command is pending in firmware.
2624 * First peer_map event from firmware signifies successful
2625 * peer creation and it will be decremented in peer_map handling.
2626 */
2627 qdf_atomic_init(&peer->fw_create_pending);
2628 qdf_atomic_set(&peer->fw_create_pending, 1);
Mohit Khannab04dfcd2017-02-13 18:54:35 -08002629 OL_TXRX_PEER_INC_REF_CNT(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002630
2631 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002632 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2633 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002634
2635 ol_txrx_peer_find_hash_add(pdev, peer);
2636
Mohit Khanna47384bc2016-08-15 15:37:05 -07002637 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07002638 "vdev %pK created peer %pK ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
Mohit Khanna47384bc2016-08-15 15:37:05 -07002639 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002640 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2641 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2642 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2643 /*
2644 * For every peer MAp message search and set if bss_peer
2645 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002646 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2647 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002648 peer->bss_peer = 1;
2649
2650 /*
2651 * The peer starts in the "disc" state while association is in progress.
2652 * Once association completes, the peer will get updated to "auth" state
2653 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2654 * or else to the "conn" state. For non-open mode, the peer will
2655 * progress to "auth" state once the authentication completes.
2656 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002657 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002658 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002659 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002660
2661#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2662 peer->rssi_dbm = HTT_RSSI_INVALID;
2663#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002664 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2665 !pdev->self_peer) {
2666 pdev->self_peer = peer;
2667 /*
2668 * No Tx in monitor mode, otherwise results in target assert.
2669 * Setting disable_intrabss_fwd to true
2670 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002671 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002672 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002673
2674 ol_txrx_local_peer_id_alloc(pdev, peer);
2675
Leo Chang98726762016-10-28 11:07:18 -07002676 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002677}
2678
Anurag Chouhan4085ff72017-10-05 18:09:56 +05302679#undef PEER_DEL_TIMEOUT
2680
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002681/*
2682 * Discarding tx filter - removes all data frames (disconnected state)
2683 */
2684static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2685{
2686 return A_ERROR;
2687}
2688
2689/*
2690 * Non-autentication tx filter - filters out data frames that are not
2691 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2692 * data frames (connected state)
2693 */
2694static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2695{
2696 return
2697 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2698 tx_msdu_info->htt.info.ethertype ==
2699 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2700}
2701
2702/*
2703 * Pass-through tx filter - lets all data frames through (authenticated state)
2704 */
2705static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2706{
2707 return A_OK;
2708}
2709
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002710/**
2711 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2712 * @peer: handle to peer
2713 *
2714 * returns mac addrs for module which do not know peer type
2715 *
2716 * Return: the mac_addr from peer
2717 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002718static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002719ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002720{
Leo Chang98726762016-10-28 11:07:18 -07002721 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002722 if (!peer)
2723 return NULL;
2724
2725 return peer->mac_addr.raw;
2726}
2727
Abhishek Singhcfb44482017-03-10 12:42:37 +05302728#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002729/**
2730 * ol_txrx_get_pn_info() - Returns pn info from peer
2731 * @peer: handle to peer
2732 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2733 * @last_pn: return last_rmf_pn value from peer.
2734 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2735 *
2736 * Return: NONE
2737 */
2738void
Leo Chang98726762016-10-28 11:07:18 -07002739ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002740 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2741{
Leo Chang98726762016-10-28 11:07:18 -07002742 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002743 *last_pn_valid = &peer->last_rmf_pn_valid;
2744 *last_pn = &peer->last_rmf_pn;
2745 *rmf_pn_replays = &peer->rmf_pn_replays;
2746}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302747#else
2748void
2749ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2750 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2751{
2752}
2753#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002754
2755/**
2756 * ol_txrx_get_opmode() - Return operation mode of vdev
2757 * @vdev: vdev handle
2758 *
2759 * Return: operation mode.
2760 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002761static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002762{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002763 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002764 return vdev->opmode;
2765}
2766
2767/**
2768 * ol_txrx_get_peer_state() - Return peer state of peer
2769 * @peer: peer handle
2770 *
2771 * Return: return peer state
2772 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002773static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002774{
Leo Chang98726762016-10-28 11:07:18 -07002775 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002776 return peer->state;
2777}
2778
2779/**
2780 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2781 * @peer: peer handle
2782 *
2783 * Return: vdev handle from peer
2784 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002785static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002786{
Leo Chang98726762016-10-28 11:07:18 -07002787 ol_txrx_peer_handle peer = ppeer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002788 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002789}
2790
2791/**
2792 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2793 * @vdev: vdev handle
2794 *
2795 * Return: vdev mac address
2796 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002797static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002798ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002799{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002800 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002801 if (!vdev)
2802 return NULL;
2803
2804 return vdev->mac_addr.raw;
2805}
2806
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002807#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002808/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002809 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002810 * vdev
2811 * @vdev: vdev handle
2812 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002813 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002814 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002815struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002816ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2817{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002818 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002819}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002820#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002821
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002822#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002823/**
2824 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2825 * @vdev: vdev handle
2826 *
2827 * Return: Handle to pdev
2828 */
2829ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2830{
2831 return vdev->pdev;
2832}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002833#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002834
2835/**
2836 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2837 * @vdev: vdev handle
2838 *
2839 * Return: Handle to control pdev
2840 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002841static struct cdp_cfg *
2842ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002843{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002844 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
2845 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002846}
2847
2848/**
2849 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2850 * @vdev: vdev handle
2851 *
2852 * Return: Rx Fwd disabled status
2853 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002854static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002855ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002856{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002857 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002858 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2859 vdev->pdev->ctrl_pdev;
2860 return cfg->rx_fwd_disabled;
2861}
2862
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002863#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002864/**
2865 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2866 * @vdev: vdev handle
2867 * @peer_num_delta: peer nums to be adjusted
2868 *
2869 * Return: -1 for failure or total peer nums after adjustment.
2870 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002871static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002872ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002873 int16_t peer_num_delta)
2874{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002875 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002876 int16_t new_peer_num;
2877
2878 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07002879 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002880 return OL_TXRX_INVALID_NUM_PEERS;
2881
2882 vdev->ibss_peer_num = new_peer_num;
2883
2884 return new_peer_num;
2885}
2886
2887/**
2888 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2889 * beat timer
2890 * @vdev: vdev handle
2891 * @timer_value_sec: new heart beat timer value
2892 *
2893 * Return: Old timer value set in vdev.
2894 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002895static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2896 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002897{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002898 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002899 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2900
2901 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2902
2903 return old_timer_value;
2904}
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002905#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002906
2907/**
2908 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
2909 * @vdev: vdev handle
2910 * @callback: callback function to remove the peer.
2911 * @callback_context: handle for callback function
2912 * @remove_last_peer: Does it required to last peer.
2913 *
2914 * Return: NONE
2915 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002916static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002917ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002918 ol_txrx_vdev_peer_remove_cb callback,
2919 void *callback_context, bool remove_last_peer)
2920{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002921 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002922 ol_txrx_peer_handle peer, temp;
2923 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002924 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002925
2926 temp = NULL;
2927 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
2928 peer_list_elem) {
2929 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002930 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2931 if (qdf_atomic_read(&temp->delete_in_progress) == 0) {
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002932 callback(callback_context, temp->mac_addr.raw,
2933 vdev->vdev_id, temp, false);
2934 }
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002935 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002936 }
2937 /* self peer is deleted last */
2938 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302939 ol_txrx_info_high(
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002940 "%s: self peer removed by caller ",
2941 __func__);
2942 break;
Yun Parkeaea8632017-04-09 09:53:45 -07002943 }
2944 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002945 }
2946
Mohit Khanna137b97d2016-04-21 16:11:33 -07002947 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2948
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002949 if (remove_last_peer) {
2950 /* remove IBSS bss peer last */
2951 peer = TAILQ_FIRST(&vdev->peer_list);
2952 callback(callback_context, (uint8_t *) &vdev->mac_addr,
2953 vdev->vdev_id, peer, false);
2954 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002955}
2956
2957/**
2958 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
2959 * @vdev: vdev handle
2960 * @callback: callback function to remove the peer.
2961 * @callback_context: handle for callback function
2962 *
2963 * Return: NONE
2964 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002965static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002966ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002967 ol_txrx_vdev_peer_remove_cb callback,
2968 void *callback_context)
2969{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002970 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002971 ol_txrx_peer_handle peer = NULL;
2972
2973 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302974 ol_txrx_info_high(
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002975 "%s: peer found for vdev id %d. deleting the peer",
2976 __func__, vdev->vdev_id);
2977 callback(callback_context, (uint8_t *)&vdev->mac_addr,
2978 vdev->vdev_id, peer, false);
2979 }
2980}
2981
2982/**
2983 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2984 * @vdev: vdev handle
2985 * @ocb_set_chan: OCB channel information to be set in vdev.
2986 *
2987 * Return: NONE
2988 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002989static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002990 struct ol_txrx_ocb_set_chan ocb_set_chan)
2991{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002992 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002993 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2994 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2995}
2996
2997/**
2998 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2999 * @vdev: vdev handle
3000 *
3001 * Return: handle to struct ol_txrx_ocb_chan_info
3002 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003003static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003004ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003005{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003006 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07003007 return vdev->ocb_channel_info;
3008}
3009
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003010/**
3011 * @brief specify the peer's authentication state
3012 * @details
3013 * Specify the peer's authentication state (none, connected, authenticated)
3014 * to allow the data SW to determine whether to filter out invalid data frames.
3015 * (In the "connected" state, where security is enabled, but authentication
3016 * has not completed, tx and rx data frames other than EAPOL or WAPI should
3017 * be discarded.)
3018 * This function is only relevant for systems in which the tx and rx filtering
3019 * are done in the host rather than in the target.
3020 *
3021 * @param data_peer - which peer has changed its state
3022 * @param state - the new state of the peer
3023 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07003024 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003025 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003026QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07003027 uint8_t *peer_mac,
3028 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003029{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003030 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003031 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003032 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003033
Anurag Chouhanc5548422016-02-24 18:33:27 +05303034 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303035 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05303036 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303037 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003038 }
3039
Mohit Khannababadb82017-02-21 18:54:19 -08003040 peer = ol_txrx_peer_find_hash_find_inc_ref(pdev, peer_mac, 0, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003041 if (NULL == peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303042 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303043 "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
3044 __func__,
3045 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
3046 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303047 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003048 }
3049
3050 /* TODO: Should we send WMI command of the connection state? */
3051 /* avoid multiple auth state change. */
3052 if (peer->state == state) {
3053#ifdef TXRX_PRINT_VERBOSE_ENABLE
Poddar, Siddarth14521792017-03-14 21:19:42 +05303054 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003055 "%s: no state change, returns directly\n",
3056 __func__);
3057#endif
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003058 peer_ref_cnt = OL_TXRX_PEER_UNREF_DELETE(peer);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303059 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003060 }
3061
Poddar, Siddarth14521792017-03-14 21:19:42 +05303062 ol_txrx_dbg("%s: change from %d to %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003063 __func__, peer->state, state);
3064
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003065 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003066 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003067 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003068 ? ol_tx_filter_non_auth
3069 : ol_tx_filter_discard);
3070
3071 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003072 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003073 int tid;
3074 /*
3075 * Pause all regular (non-extended) TID tx queues until
3076 * data arrives and ADDBA negotiation has completed.
3077 */
Poddar, Siddarth14521792017-03-14 21:19:42 +05303078 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003079 "%s: pause peer and unpause mgmt/non-qos\n",
3080 __func__);
3081 ol_txrx_peer_pause(peer); /* pause all tx queues */
3082 /* unpause mgmt and non-QoS tx queues */
3083 for (tid = OL_TX_NUM_QOS_TIDS;
3084 tid < OL_TX_NUM_TIDS; tid++)
3085 ol_txrx_peer_tid_unpause(peer, tid);
3086 }
3087 }
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003088 peer_ref_cnt = OL_TXRX_PEER_UNREF_DELETE(peer);
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003089 /*
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003090 * after OL_TXRX_PEER_UNREF_DELETE, peer object cannot be accessed
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003091 * if the return code was 0
3092 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003093 if (peer_ref_cnt > 0)
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003094 /*
3095 * Set the state after the Pause to avoid the race condiction
3096 * with ADDBA check in tx path
3097 */
3098 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303099 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003100}
3101
3102void
3103ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
3104{
3105 peer->keyinstalled = val;
3106}
3107
3108void
3109ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
3110 uint8_t *peer_mac,
3111 union ol_txrx_peer_update_param_t *param,
3112 enum ol_txrx_peer_update_select_t select)
3113{
3114 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003115 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003116
Mohit Khannababadb82017-02-21 18:54:19 -08003117 peer = ol_txrx_peer_find_hash_find_inc_ref(vdev->pdev, peer_mac, 0, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003118 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303119 ol_txrx_dbg("%s: peer is null",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003120 __func__);
3121 return;
3122 }
3123
3124 switch (select) {
3125 case ol_txrx_peer_update_qos_capable:
3126 {
3127 /* save qos_capable here txrx peer,
3128 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
3129 */
3130 peer->qos_capable = param->qos_capable;
3131 /*
3132 * The following function call assumes that the peer has a
3133 * single ID. This is currently true, and
3134 * is expected to remain true.
3135 */
3136 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
3137 peer->peer_ids[0],
3138 peer->qos_capable);
3139 break;
3140 }
3141 case ol_txrx_peer_update_uapsdMask:
3142 {
3143 peer->uapsd_mask = param->uapsd_mask;
3144 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
3145 peer->peer_ids[0],
3146 peer->uapsd_mask);
3147 break;
3148 }
3149 case ol_txrx_peer_update_peer_security:
3150 {
3151 enum ol_sec_type sec_type = param->sec_type;
3152 enum htt_sec_type peer_sec_type = htt_sec_type_none;
3153
3154 switch (sec_type) {
3155 case ol_sec_type_none:
3156 peer_sec_type = htt_sec_type_none;
3157 break;
3158 case ol_sec_type_wep128:
3159 peer_sec_type = htt_sec_type_wep128;
3160 break;
3161 case ol_sec_type_wep104:
3162 peer_sec_type = htt_sec_type_wep104;
3163 break;
3164 case ol_sec_type_wep40:
3165 peer_sec_type = htt_sec_type_wep40;
3166 break;
3167 case ol_sec_type_tkip:
3168 peer_sec_type = htt_sec_type_tkip;
3169 break;
3170 case ol_sec_type_tkip_nomic:
3171 peer_sec_type = htt_sec_type_tkip_nomic;
3172 break;
3173 case ol_sec_type_aes_ccmp:
3174 peer_sec_type = htt_sec_type_aes_ccmp;
3175 break;
3176 case ol_sec_type_wapi:
3177 peer_sec_type = htt_sec_type_wapi;
3178 break;
3179 default:
3180 peer_sec_type = htt_sec_type_none;
3181 break;
3182 }
3183
3184 peer->security[txrx_sec_ucast].sec_type =
3185 peer->security[txrx_sec_mcast].sec_type =
3186 peer_sec_type;
3187
3188 break;
3189 }
3190 default:
3191 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303192 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003193 "ERROR: unknown param %d in %s", select,
3194 __func__);
3195 break;
3196 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003197 } /* switch */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003198 peer_ref_cnt = OL_TXRX_PEER_UNREF_DELETE(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003199}
3200
3201uint8_t
3202ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3203{
3204
3205 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07003206
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003207 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3208 if (peer)
3209 return peer->uapsd_mask;
3210 return 0;
3211}
3212
3213uint8_t
3214ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3215{
3216
3217 struct ol_txrx_peer_t *peer_t =
3218 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3219 if (peer_t != NULL)
3220 return peer_t->qos_capable;
3221 return 0;
3222}
3223
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003224int ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer,
3225 const char *fname,
3226 int line)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003227{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003228 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003229 struct ol_txrx_vdev_t *vdev;
3230 struct ol_txrx_pdev_t *pdev;
3231 int i;
3232
3233 /* preconditions */
3234 TXRX_ASSERT2(peer);
3235
3236 vdev = peer->vdev;
3237 if (NULL == vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303238 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003239 "The vdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003240 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003241 }
3242
3243 pdev = vdev->pdev;
3244 if (NULL == pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303245 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003246 "The pdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003247 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003248 }
3249
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003250
3251 /*
3252 * Hold the lock all the way from checking if the peer ref count
3253 * is zero until the peer references are removed from the hash
3254 * table and vdev list (if the peer ref count is zero).
3255 * This protects against a new HL tx operation starting to use the
3256 * peer object just after this function concludes it's done being used.
3257 * Furthermore, the lock needs to be held while checking whether the
3258 * vdev's list of peers is empty, to make sure that list is not modified
3259 * concurrently with the empty check.
3260 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303261 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003262
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003263 /*
3264 * Check for the reference count before deleting the peer
3265 * as we noticed that sometimes we are re-entering this
3266 * function again which is leading to dead-lock.
3267 * (A double-free should never happen, so assert if it does.)
3268 */
3269 rc = qdf_atomic_read(&(peer->ref_cnt));
3270
3271 if (rc == 0) {
3272 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3273 ol_txrx_err("The Peer is not present anymore\n");
3274 qdf_assert(0);
3275 return -EACCES;
3276 }
3277 /*
3278 * now decrement rc; this will be the return code.
3279 * 0 : peer deleted
3280 * >0: peer ref removed, but still has other references
3281 * <0: sanity failed - no changes to the state of the peer
3282 */
3283 rc--;
3284
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07003285 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003286 u_int16_t peer_id;
3287
Mohit Khannababadb82017-02-21 18:54:19 -08003288 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003289 "[%s][%d]: Deleting peer %pK (%pM) ref_cnt %d\n",
Mohit Khannababadb82017-02-21 18:54:19 -08003290 fname, line, peer, peer->mac_addr.raw,
3291 qdf_atomic_read(&peer->ref_cnt));
3292
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003293 peer_id = peer->local_id;
3294 /* remove the reference to the peer from the hash table */
3295 ol_txrx_peer_find_hash_remove(pdev, peer);
3296
3297 /* remove the peer from its parent vdev's list */
3298 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3299
3300 /* cleanup the Rx reorder queues for this peer */
3301 ol_rx_peer_cleanup(vdev, peer);
3302
3303 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303304 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003305
3306 /*
3307 * Set wait_delete_comp event if the current peer id matches
3308 * with registered peer id.
3309 */
3310 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303311 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003312 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3313 }
3314
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003315 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3316 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003317
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003318 /* check whether the parent vdev has no peers left */
3319 if (TAILQ_EMPTY(&vdev->peer_list)) {
3320 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003321 * Check if the parent vdev was waiting for its peers
3322 * to be deleted, in order for it to be deleted too.
3323 */
3324 if (vdev->delete.pending) {
3325 ol_txrx_vdev_delete_cb vdev_delete_cb =
3326 vdev->delete.callback;
3327 void *vdev_delete_context =
3328 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303329 /*
3330 * Now that there are no references to the peer,
3331 * we can release the peer reference lock.
3332 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303333 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303334
gbian016a42e2017-03-01 18:49:11 +08003335 /*
Yun Parkeaea8632017-04-09 09:53:45 -07003336 * The ol_tx_desc_free might access the invalid
3337 * content of vdev referred by tx desc, since
3338 * this vdev might be detached in another thread
3339 * asynchronous.
3340 *
3341 * Go through tx desc pool to set corresponding
3342 * tx desc's vdev to NULL when detach this vdev,
3343 * and add vdev checking in the ol_tx_desc_free
3344 * to avoid crash.
3345 */
gbian016a42e2017-03-01 18:49:11 +08003346 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05303347 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003348 "%s: deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
Yun Parkeaea8632017-04-09 09:53:45 -07003349 __func__, vdev,
3350 vdev->mac_addr.raw[0],
3351 vdev->mac_addr.raw[1],
3352 vdev->mac_addr.raw[2],
3353 vdev->mac_addr.raw[3],
3354 vdev->mac_addr.raw[4],
3355 vdev->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003356 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303357 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003358 if (vdev_delete_cb)
3359 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303360 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303361 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003362 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303363 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303364 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303365 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003366
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -07003367 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003368 "%s: Deleting peer %pK (%pM) ref_cnt = %d",
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -07003369 __func__, peer, peer->mac_addr.raw,
3370 qdf_atomic_read(&peer->ref_cnt));
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303371 ol_txrx_peer_tx_queue_free(pdev, peer);
3372
Deepak Dhamdhereb0d2dda2017-04-03 01:01:50 -07003373 /* Remove mappings from peer_id to peer object */
3374 ol_txrx_peer_clear_map_peer(pdev, peer);
3375
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003376 /*
3377 * 'array' is allocated in addba handler and is supposed to be
3378 * freed in delba handler. There is the case (for example, in
3379 * SSR) where delba handler is not called. Because array points
3380 * to address of 'base' by default and is reallocated in addba
3381 * handler later, only free the memory when the array does not
3382 * point to base.
3383 */
3384 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
3385 if (peer->tids_rx_reorder[i].array !=
3386 &peer->tids_rx_reorder[i].base) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303387 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003388 "%s, delete reorder arr, tid:%d\n",
3389 __func__, i);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303390 qdf_mem_free(peer->tids_rx_reorder[i].array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003391 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
3392 (uint8_t) i);
3393 }
3394 }
3395
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303396 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003397 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303398 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -07003399 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003400 "[%s][%d]: ref delete peer %pK peer->ref_cnt = %d",
Mohit Khannababadb82017-02-21 18:54:19 -08003401 fname, line, peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003402 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003403
3404 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003405}
3406
Dhanashri Atre12a08392016-02-17 13:10:34 -08003407/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003408 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3409 * @peer: pointer to ol txrx peer structure
3410 *
3411 * Return: QDF Status
3412 */
3413static QDF_STATUS
3414ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3415{
3416 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3417 /* Drop pending Rx frames in CDS */
3418 if (sched_ctx)
3419 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3420
3421 /* Purge the cached rx frame queue */
3422 ol_txrx_flush_rx_frames(peer, 1);
3423
3424 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003425 peer->state = OL_TXRX_PEER_STATE_DISC;
3426 qdf_spin_unlock_bh(&peer->peer_info_lock);
3427
3428 return QDF_STATUS_SUCCESS;
3429}
3430
3431/**
3432 * ol_txrx_clear_peer() - clear peer
3433 * @sta_id: sta id
3434 *
3435 * Return: QDF Status
3436 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003437static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003438{
3439 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003440 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003441
3442 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303443 ol_txrx_err("%s: Unable to find pdev!",
Mohit Khanna0696eef2016-04-14 16:14:08 -07003444 __func__);
3445 return QDF_STATUS_E_FAILURE;
3446 }
3447
3448 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303449 ol_txrx_err("Invalid sta id %d", sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003450 return QDF_STATUS_E_INVAL;
3451 }
3452
3453
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003454 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003455 if (!peer)
3456 return QDF_STATUS_E_FAULT;
3457
3458 return ol_txrx_clear_peer_internal(peer);
3459
3460}
3461
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003462void peer_unmap_timer_work_function(void *param)
3463{
3464 WMA_LOGE("Enter: %s", __func__);
Anurag Chouhan4085ff72017-10-05 18:09:56 +05303465 cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003466}
3467
Mohit Khanna0696eef2016-04-14 16:14:08 -07003468/**
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003469 * peer_unmap_timer_handler() - peer unmap timer function
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003470 * @data: peer object pointer
3471 *
3472 * Return: none
3473 */
3474void peer_unmap_timer_handler(void *data)
3475{
3476 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003477 ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003478
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003479 ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003480 peer, qdf_atomic_read(&peer->ref_cnt));
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003481 ol_txrx_err("peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003482 peer,
3483 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3484 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3485 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Nachiket Kukadea48fd772017-07-28 18:48:57 +05303486 if (!cds_is_driver_recovering() && !cds_is_fw_down()) {
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07003487 qdf_create_work(0, &txrx_pdev->peer_unmap_timer_work,
3488 peer_unmap_timer_work_function,
3489 NULL);
3490 qdf_sched_work(0, &txrx_pdev->peer_unmap_timer_work);
Deepak Dhamdhered42ab7c2017-04-13 19:32:16 -07003491 } else {
3492 ol_txrx_err("Recovery is in progress, ignore!");
3493 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003494}
3495
3496
3497/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003498 * ol_txrx_peer_detach() - Delete a peer's data object.
3499 * @peer - the object to detach
Dhanashri Atre12a08392016-02-17 13:10:34 -08003500 *
3501 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003502 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003503 * stored in the control peer object to the data peer
3504 * object (set up by a call to ol_peer_store()) is provided.
3505 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003506 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003507 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003508static void ol_txrx_peer_detach(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003509{
Leo Chang98726762016-10-28 11:07:18 -07003510 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003511 struct ol_txrx_vdev_t *vdev = peer->vdev;
3512
3513 /* redirect peer's rx delivery function to point to a discard func */
3514 peer->rx_opt_proc = ol_rx_discard;
3515
3516 peer->valid = 0;
3517
Mohit Khanna0696eef2016-04-14 16:14:08 -07003518 /* flush all rx packets before clearing up the peer local_id */
3519 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003520 ol_txrx_local_peer_id_free(peer->vdev->pdev, peer);
3521
3522 /* debug print to dump rx reorder state */
3523 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3524
Poddar, Siddarth14521792017-03-14 21:19:42 +05303525 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003526 "%s:peer %pK (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003527 __func__, peer,
3528 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3529 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3530 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003531
3532 if (peer->vdev->last_real_peer == peer)
3533 peer->vdev->last_real_peer = NULL;
3534
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303535 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003536 if (vdev->last_real_peer == peer)
3537 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303538 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003539 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3540
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303541 qdf_spinlock_destroy(&peer->peer_info_lock);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07003542 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003543 /*
3544 * set delete_in_progress to identify that wma
3545 * is waiting for unmap massage for this peer
3546 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303547 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003548
Abhishek Singh217d9782017-04-28 23:49:11 +05303549 if (vdev->opmode == wlan_op_mode_sta) {
3550 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3551 &peer->mac_addr,
3552 sizeof(union ol_txrx_align_mac_addr_t));
3553 }
3554
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003555 /*
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003556 * Create a timer to track unmap events when the sta peer gets deleted.
3557 */
3558 if (vdev->opmode == wlan_op_mode_sta) {
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003559 qdf_timer_start(&peer->peer_unmap_timer,
3560 OL_TXRX_PEER_UNMAP_TIMEOUT);
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003561 ol_txrx_info_high("%s: started peer_unmap_timer for peer %pK",
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003562 __func__, peer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003563 }
3564
3565 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003566 * Remove the reference added during peer_attach.
3567 * The peer will still be left allocated until the
3568 * PEER_UNMAP message arrives to remove the other
3569 * reference, added by the PEER_MAP message.
3570 */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003571 OL_TXRX_PEER_UNREF_DELETE(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003572}
3573
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003574/**
3575 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
3576 * @peer - the object to detach
3577 *
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003578 * Detach a peer and force peer object to be removed. It is called during
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003579 * roaming scenario when the firmware has already deleted a peer.
Deepak Dhamdhered40f4b12017-03-24 11:07:45 -07003580 * Remove it from the peer_id_to_object map. Peer object is actually freed
3581 * when last reference is deleted.
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003582 *
3583 * Return: None
3584 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003585static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003586{
Leo Chang98726762016-10-28 11:07:18 -07003587 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003588 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3589
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003590 ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d",
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003591 __func__, peer, qdf_atomic_read(&peer->ref_cnt));
3592
3593 /* Clear the peer_id_to_obj map entries */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003594 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003595 ol_txrx_peer_detach(peer);
3596}
3597
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003598ol_txrx_peer_handle
3599ol_txrx_peer_find_by_addr(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac_addr)
3600{
3601 struct ol_txrx_peer_t *peer;
Mohit Khannababadb82017-02-21 18:54:19 -08003602 peer = ol_txrx_peer_find_hash_find_inc_ref(pdev, peer_mac_addr, 0, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003603 if (peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05303604 ol_txrx_info_high(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07003605 "%s: Delete extra reference %pK", __func__, peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003606 /* release the extra reference */
Mohit Khannab04dfcd2017-02-13 18:54:35 -08003607 OL_TXRX_PEER_UNREF_DELETE(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003608 }
3609 return peer;
3610}
3611
3612/**
3613 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3614 * @txrx_pdev: Pointer to txrx pdev
3615 *
3616 * Return: none
3617 */
3618static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3619{
3620 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003621 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003622
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303623 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3624 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3625 else
3626 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003627
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003628 num_free = ol_tx_get_total_free_desc(pdev);
3629
Kapil Gupta53d9b572017-06-28 17:53:25 +05303630 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303631 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003632 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003633
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003634}
3635
3636/**
3637 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3638 * @timeout: timeout in ms
3639 *
3640 * Wait for tx queue to be empty, return timeout error if
3641 * queue doesn't empty before timeout occurs.
3642 *
3643 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303644 * QDF_STATUS_SUCCESS if the queue empties,
3645 * QDF_STATUS_E_TIMEOUT in case of timeout,
3646 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003647 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003648static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003649{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003650 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003651
3652 if (txrx_pdev == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303653 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003654 "%s: txrx context is null", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303655 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003656 }
3657
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003658 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303659 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003660 if (timeout <= 0) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303661 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303662 "%s: tx frames are pending", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003663 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303664 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003665 }
3666 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3667 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303668 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003669}
3670
3671#ifndef QCA_WIFI_3_0_EMU
Himanshu Agarwal83a87572017-05-25 14:09:50 +05303672#define SUSPEND_DRAIN_WAIT 500
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003673#else
3674#define SUSPEND_DRAIN_WAIT 3000
3675#endif
3676
Yue Ma1e11d792016-02-26 18:58:44 -08003677#ifdef FEATURE_RUNTIME_PM
3678/**
3679 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3680 * @txrx_pdev: TXRX pdev context
3681 *
3682 * TXRX is ready to runtime suspend if there are no pending packets
3683 * in the tx queue.
3684 *
3685 * Return: QDF_STATUS
3686 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003687static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003688{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003689 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003690
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003691 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003692 return QDF_STATUS_E_BUSY;
3693 else
3694 return QDF_STATUS_SUCCESS;
3695}
3696
3697/**
3698 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3699 * @txrx_pdev: TXRX pdev context
3700 *
3701 * This is a dummy function for symmetry.
3702 *
3703 * Return: QDF_STATUS_SUCCESS
3704 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003705static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003706{
3707 return QDF_STATUS_SUCCESS;
3708}
3709#endif
3710
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003711/**
3712 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003713 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003714 *
3715 * Ensure that ol_txrx is ready for bus suspend
3716 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303717 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003718 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003719static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003720{
3721 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3722}
3723
3724/**
3725 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003726 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003727 *
3728 * Dummy function for symetry
3729 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303730 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003731 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003732static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003733{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303734 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003735}
3736
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003737/**
3738 * ol_txrx_get_tx_pending - Get the number of pending transmit
3739 * frames that are awaiting completion.
3740 *
3741 * @pdev - the data physical device object
3742 * Mainly used in clean up path to make sure all buffers have been freed
3743 *
3744 * Return: count of pending frames
3745 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003746int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003747{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003748 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003749 uint32_t total;
3750
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303751 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3752 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3753 else
3754 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003755
Nirav Shah55b45a02016-01-21 10:00:16 +05303756 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003757}
3758
3759void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3760{
3761 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07003762 /*
3763 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303764 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07003765 * which is tha same with normal data send complete path
3766 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003767 htt_tx_pending_discard(pdev_handle->htt_pdev);
3768
3769 TAILQ_INIT(&tx_descs);
3770 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3771 /* Discard Frames in Discard List */
3772 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3773
3774 ol_tx_discard_target_frms(pdev_handle);
3775}
3776
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003777static inline
3778uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3779{
3780 return (uint64_t) ((size_t) req);
3781}
3782
3783static inline
3784struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3785{
3786 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3787}
3788
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003789#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003790void
3791ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3792 uint8_t cfg_stats_type, uint32_t cfg_val)
3793{
3794 uint64_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07003795
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003796 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3797 0 /* reset mask */,
3798 cfg_stats_type, cfg_val, dummy_cookie);
3799}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003800#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003801
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003802static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003803ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07003804 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003805{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003806 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003807 struct ol_txrx_pdev_t *pdev = vdev->pdev;
3808 uint64_t cookie;
3809 struct ol_txrx_stats_req_internal *non_volatile_req;
3810
3811 if (!pdev ||
3812 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
3813 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
3814 return A_ERROR;
3815 }
3816
3817 /*
3818 * Allocate a non-transient stats request object.
3819 * (The one provided as an argument is likely allocated on the stack.)
3820 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303821 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003822 if (!non_volatile_req)
3823 return A_NO_MEMORY;
3824
3825 /* copy the caller's specifications */
3826 non_volatile_req->base = *req;
3827 non_volatile_req->serviced = 0;
3828 non_volatile_req->offset = 0;
3829
3830 /* use the non-volatile request object's address as the cookie */
3831 cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
3832
tfyu9fcabd72017-09-26 17:46:48 +08003833 if (response_expected) {
3834 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3835 TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
3836 pdev->req_list_depth++;
3837 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3838 }
3839
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003840 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
3841 req->stats_type_upload_mask,
3842 req->stats_type_reset_mask,
3843 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
3844 cookie)) {
tfyu9fcabd72017-09-26 17:46:48 +08003845 if (response_expected) {
3846 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3847 TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem);
3848 pdev->req_list_depth--;
3849 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3850 }
3851
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303852 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003853 return A_ERROR;
3854 }
3855
Nirav Shahd2310422016-01-21 18:58:06 +05303856 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303857 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05303858
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003859 return A_OK;
3860}
Dhanashri Atre12a08392016-02-17 13:10:34 -08003861
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003862void
3863ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
3864 uint64_t cookie, uint8_t *stats_info_list)
3865{
3866 enum htt_dbg_stats_type type;
3867 enum htt_dbg_stats_status status;
3868 int length;
3869 uint8_t *stats_data;
tfyu9fcabd72017-09-26 17:46:48 +08003870 struct ol_txrx_stats_req_internal *req, *tmp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003871 int more = 0;
tfyu9fcabd72017-09-26 17:46:48 +08003872 int found = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003873
3874 req = ol_txrx_u64_to_stats_ptr(cookie);
3875
tfyu9fcabd72017-09-26 17:46:48 +08003876 qdf_spin_lock_bh(&pdev->req_list_spinlock);
3877 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
3878 if (req == tmp) {
3879 found = 1;
3880 break;
3881 }
3882 }
3883 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
3884
3885 if (!found) {
3886 ol_txrx_err(
3887 "req(%p) from firmware can't be found in the list\n", req);
3888 return;
3889 }
3890
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003891 do {
3892 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
3893 &length, &stats_data);
3894 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
3895 break;
3896 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
3897 status == HTT_DBG_STATS_STATUS_PARTIAL) {
3898 uint8_t *buf;
3899 int bytes = 0;
3900
3901 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
3902 more = 1;
3903 if (req->base.print.verbose || req->base.print.concise)
3904 /* provide the header along with the data */
3905 htt_t2h_stats_print(stats_info_list,
3906 req->base.print.concise);
3907
3908 switch (type) {
3909 case HTT_DBG_STATS_WAL_PDEV_TXRX:
3910 bytes = sizeof(struct wlan_dbg_stats);
3911 if (req->base.copy.buf) {
3912 int lmt;
3913
3914 lmt = sizeof(struct wlan_dbg_stats);
3915 if (req->base.copy.byte_limit < lmt)
3916 lmt = req->base.copy.byte_limit;
3917 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303918 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003919 }
3920 break;
3921 case HTT_DBG_STATS_RX_REORDER:
3922 bytes = sizeof(struct rx_reorder_stats);
3923 if (req->base.copy.buf) {
3924 int lmt;
3925
3926 lmt = sizeof(struct rx_reorder_stats);
3927 if (req->base.copy.byte_limit < lmt)
3928 lmt = req->base.copy.byte_limit;
3929 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303930 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003931 }
3932 break;
3933 case HTT_DBG_STATS_RX_RATE_INFO:
3934 bytes = sizeof(wlan_dbg_rx_rate_info_t);
3935 if (req->base.copy.buf) {
3936 int lmt;
3937
3938 lmt = sizeof(wlan_dbg_rx_rate_info_t);
3939 if (req->base.copy.byte_limit < lmt)
3940 lmt = req->base.copy.byte_limit;
3941 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303942 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003943 }
3944 break;
3945
3946 case HTT_DBG_STATS_TX_RATE_INFO:
3947 bytes = sizeof(wlan_dbg_tx_rate_info_t);
3948 if (req->base.copy.buf) {
3949 int lmt;
3950
3951 lmt = sizeof(wlan_dbg_tx_rate_info_t);
3952 if (req->base.copy.byte_limit < lmt)
3953 lmt = req->base.copy.byte_limit;
3954 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303955 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003956 }
3957 break;
3958
3959 case HTT_DBG_STATS_TX_PPDU_LOG:
3960 bytes = 0;
3961 /* TO DO: specify how many bytes are present */
3962 /* TO DO: add copying to the requestor's buf */
3963
3964 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003965 bytes = sizeof(struct
3966 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003967 if (req->base.copy.buf) {
3968 int limit;
3969
Yun Parkeaea8632017-04-09 09:53:45 -07003970 limit = sizeof(struct
3971 rx_remote_buffer_mgmt_stats);
3972 if (req->base.copy.byte_limit < limit)
3973 limit = req->base.copy.
3974 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003975 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303976 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003977 }
3978 break;
3979
3980 case HTT_DBG_STATS_TXBF_INFO:
3981 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
3982 if (req->base.copy.buf) {
3983 int limit;
3984
Yun Parkeaea8632017-04-09 09:53:45 -07003985 limit = sizeof(struct
3986 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003987 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003988 limit = req->base.copy.
3989 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003990 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303991 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003992 }
3993 break;
3994
3995 case HTT_DBG_STATS_SND_INFO:
3996 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
3997 if (req->base.copy.buf) {
3998 int limit;
3999
Yun Parkeaea8632017-04-09 09:53:45 -07004000 limit = sizeof(struct
4001 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004002 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004003 limit = req->base.copy.
4004 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004005 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304006 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004007 }
4008 break;
4009
4010 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07004011 bytes = sizeof(struct
4012 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004013 if (req->base.copy.buf) {
4014 int limit;
4015
Yun Parkeaea8632017-04-09 09:53:45 -07004016 limit = sizeof(struct
4017 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004018 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004019 limit = req->base.copy.
4020 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004021 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304022 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004023 }
4024 break;
4025
4026 case HTT_DBG_STATS_ERROR_INFO:
4027 bytes =
4028 sizeof(struct wlan_dbg_wifi2_error_stats);
4029 if (req->base.copy.buf) {
4030 int limit;
4031
Yun Parkeaea8632017-04-09 09:53:45 -07004032 limit = sizeof(struct
4033 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004034 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07004035 limit = req->base.copy.
4036 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004037 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304038 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004039 }
4040 break;
4041
4042 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
4043 bytes =
4044 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
4045 if (req->base.copy.buf) {
4046 int limit;
4047
4048 limit = sizeof(struct
4049 rx_txbf_musu_ndpa_pkts_stats);
4050 if (req->base.copy.byte_limit < limit)
4051 limit =
4052 req->base.copy.byte_limit;
4053 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304054 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004055 }
4056 break;
4057
4058 default:
4059 break;
4060 }
Yun Parkeaea8632017-04-09 09:53:45 -07004061 buf = req->base.copy.buf ?
4062 req->base.copy.buf : stats_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004063 if (req->base.callback.fp)
4064 req->base.callback.fp(req->base.callback.ctxt,
4065 type, buf, bytes);
4066 }
4067 stats_info_list += length;
4068 } while (1);
4069
4070 if (!more) {
tfyu9fcabd72017-09-26 17:46:48 +08004071 qdf_spin_lock_bh(&pdev->req_list_spinlock);
4072 TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4073 if (req == tmp) {
4074 TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
4075 pdev->req_list_depth--;
4076 qdf_mem_free(req);
4077 break;
4078 }
4079 }
4080 qdf_spin_unlock_bh(&pdev->req_list_spinlock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004081 }
4082}
4083
4084#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
4085int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
4086{
4087 if (debug_specs & TXRX_DBG_MASK_OBJS) {
4088#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4089 ol_txrx_pdev_display(vdev->pdev, 0);
4090#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304091 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304092 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004093#endif
4094 }
Yun Parkeaea8632017-04-09 09:53:45 -07004095 if (debug_specs & TXRX_DBG_MASK_STATS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004096 ol_txrx_stats_display(vdev->pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004097 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
4098#if defined(ENABLE_TXRX_PROT_ANALYZE)
4099 ol_txrx_prot_ans_display(vdev->pdev);
4100#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304101 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304102 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004103#endif
4104 }
4105 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
4106#if defined(ENABLE_RX_REORDER_TRACE)
4107 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4108#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304109 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304110 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004111#endif
4112
4113 }
4114 return 0;
4115}
4116#endif
4117
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004118#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004119int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4120 int max_subfrms_ampdu, int max_subfrms_amsdu)
4121{
4122 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4123 max_subfrms_ampdu, max_subfrms_amsdu);
4124}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08004125#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004126
4127#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4128void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4129{
4130 struct ol_txrx_vdev_t *vdev;
4131
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304132 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004133 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304134 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004135 "%*spdev object: %pK", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304136 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004137 "%*svdev list:", indent + 4, " ");
4138 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304139 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004140 }
4141 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304142 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004143 "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004144 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304145 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004146 htt_display(pdev->htt_pdev, indent);
4147}
4148
4149void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4150{
4151 struct ol_txrx_peer_t *peer;
4152
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304153 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004154 "%*stxrx vdev: %pK\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304155 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004156 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304157 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004158 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4159 indent + 4, " ",
4160 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4161 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4162 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304163 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004164 "%*speer list:", indent + 4, " ");
4165 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304166 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004167 }
4168}
4169
4170void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4171{
4172 int i;
4173
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304174 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004175 "%*stxrx peer: %pK", indent, " ", peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004176 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4177 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304178 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004179 "%*sID: %d", indent + 4, " ",
4180 peer->peer_ids[i]);
4181 }
4182 }
4183}
4184#endif /* TXRX_DEBUG_LEVEL */
4185
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004186/**
4187 * ol_txrx_stats() - update ol layer stats
4188 * @vdev_id: vdev_id
4189 * @buffer: pointer to buffer
4190 * @buf_len: length of the buffer
4191 *
4192 * Return: length of string
4193 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004194static int
Yun Parkeaea8632017-04-09 09:53:45 -07004195ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004196{
4197 uint32_t len = 0;
4198
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004199 struct ol_txrx_vdev_t *vdev =
4200 (struct ol_txrx_vdev_t *)
4201 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004202
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004203 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304204 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304205 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004206 snprintf(buffer, buf_len, "vdev not found");
4207 return len;
4208 }
4209
4210 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004211 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304212 ((vdev->ll_pause.is_q_paused == false) ?
4213 "UNPAUSED" : "PAUSED"),
4214 vdev->ll_pause.q_pause_cnt,
4215 vdev->ll_pause.q_unpause_cnt,
4216 vdev->ll_pause.q_overflow_cnt,
4217 ((vdev->ll_pause.is_q_timer_on == false)
4218 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004219 return len;
4220}
4221
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004222#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4223/**
4224 * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4225 * @peer: peer pointer
4226 *
4227 * Return: None
4228 */
4229static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4230{
4231 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4232 "cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4233 peer->bufq_info.curr,
4234 peer->bufq_info.dropped,
4235 peer->bufq_info.high_water_mark,
4236 peer->bufq_info.qdepth_no_thresh,
4237 peer->bufq_info.thresh);
4238}
4239
4240/**
4241 * ol_txrx_disp_peer_stats() - display peer stats
4242 * @pdev: pdev pointer
4243 *
4244 * Return: None
4245 */
4246static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4247{ int i;
4248 struct ol_txrx_peer_t *peer;
4249 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
4250
4251 if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4252 return;
4253
4254 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4255 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4256 peer = pdev->local_peer_ids.map[i];
4257 if (peer)
4258 OL_TXRX_PEER_INC_REF_CNT(peer);
4259 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4260
4261 if (peer) {
4262 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07004263 "stats: peer 0x%pK local peer id %d", peer, i);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004264 ol_txrx_disp_peer_cached_bufq_stats(peer);
4265 OL_TXRX_PEER_UNREF_DELETE(peer);
4266 }
4267 }
4268}
4269#else
4270static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4271{
4272 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4273 "peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
4274}
4275#endif
4276
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004277void ol_txrx_stats_display(ol_txrx_pdev_handle pdev)
4278{
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304279 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304280 "TX PATH Statistics:");
4281 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05304282 "sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4283 pdev->stats.pub.tx.from_stack.pkts,
4284 pdev->stats.pub.tx.from_stack.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004285 pdev->stats.pub.tx.dropped.host_reject.pkts,
4286 pdev->stats.pub.tx.dropped.host_reject.bytes,
4287 pdev->stats.pub.tx.dropped.download_fail.pkts
4288 + pdev->stats.pub.tx.dropped.target_discard.pkts
4289 + pdev->stats.pub.tx.dropped.no_ack.pkts,
4290 pdev->stats.pub.tx.dropped.download_fail.bytes
4291 + pdev->stats.pub.tx.dropped.target_discard.bytes
4292 + pdev->stats.pub.tx.dropped.no_ack.bytes);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304293 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Yun Parkeaea8632017-04-09 09:53:45 -07004294 "successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B)",
Nirav Shahda008342016-05-17 18:50:40 +05304295 pdev->stats.pub.tx.delivered.pkts,
4296 pdev->stats.pub.tx.delivered.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004297 pdev->stats.pub.tx.dropped.download_fail.pkts,
4298 pdev->stats.pub.tx.dropped.download_fail.bytes,
4299 pdev->stats.pub.tx.dropped.target_discard.pkts,
4300 pdev->stats.pub.tx.dropped.target_discard.bytes,
4301 pdev->stats.pub.tx.dropped.no_ack.pkts,
4302 pdev->stats.pub.tx.dropped.no_ack.bytes);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304303 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05304304 "Tx completions per HTT message:\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004305 "Single Packet %d\n"
4306 " 2-10 Packets %d\n"
4307 "11-20 Packets %d\n"
4308 "21-30 Packets %d\n"
4309 "31-40 Packets %d\n"
4310 "41-50 Packets %d\n"
4311 "51-60 Packets %d\n"
4312 " 60+ Packets %d\n",
4313 pdev->stats.pub.tx.comp_histogram.pkts_1,
4314 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4315 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4316 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4317 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4318 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4319 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4320 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304321
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304322 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304323 "RX PATH Statistics:");
4324 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4325 "%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
Nirav Shahda008342016-05-17 18:50:40 +05304326 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4327 "msdus with frag_ind: %d msdus with offload_ind: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004328 pdev->stats.priv.rx.normal.ppdus,
4329 pdev->stats.priv.rx.normal.mpdus,
4330 pdev->stats.pub.rx.delivered.pkts,
4331 pdev->stats.pub.rx.delivered.bytes,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304332 pdev->stats.pub.rx.dropped_err.pkts,
4333 pdev->stats.pub.rx.dropped_err.bytes,
4334 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4335 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4336 pdev->stats.pub.rx.dropped_mic_err.pkts,
Nirav Shahda008342016-05-17 18:50:40 +05304337 pdev->stats.pub.rx.dropped_mic_err.bytes,
4338 pdev->stats.pub.rx.msdus_with_frag_ind,
4339 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004340
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304341 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004342 " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4343 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4344 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4345 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304346
4347 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05304348 "Rx packets per HTT message:\n"
Nirav Shah6a4eee62016-04-25 10:15:04 +05304349 "Single Packet %d\n"
4350 " 2-10 Packets %d\n"
4351 "11-20 Packets %d\n"
4352 "21-30 Packets %d\n"
4353 "31-40 Packets %d\n"
4354 "41-50 Packets %d\n"
4355 "51-60 Packets %d\n"
4356 " 60+ Packets %d\n",
4357 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4358 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4359 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4360 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4361 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4362 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4363 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4364 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004365
4366 ol_txrx_disp_peer_stats(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004367}
4368
4369void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4370{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304371 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004372}
4373
4374#if defined(ENABLE_TXRX_PROT_ANALYZE)
4375
4376void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4377{
4378 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4379 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4380}
4381
4382#endif /* ENABLE_TXRX_PROT_ANALYZE */
4383
4384#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4385int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4386{
4387 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4388 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4389}
4390#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4391
4392#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4393A_STATUS
4394ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4395 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4396{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304397 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304398 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304399 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304400 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004401 return A_OK;
4402}
4403#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4404
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004405static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004406{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004407 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004408 if (NULL == vdev)
4409 return;
4410
4411 vdev->disable_intrabss_fwd = val;
4412}
4413
Nirav Shahc657ef52016-07-26 14:22:38 +05304414/**
4415 * ol_txrx_update_mac_id() - update mac_id for vdev
4416 * @vdev_id: vdev id
4417 * @mac_id: mac id
4418 *
4419 * Return: none
4420 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004421static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304422{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004423 struct ol_txrx_vdev_t *vdev =
4424 (struct ol_txrx_vdev_t *)
4425 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304426
4427 if (NULL == vdev) {
4428 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4429 "%s: Invalid vdev_id %d", __func__, vdev_id);
4430 return;
4431 }
4432 vdev->mac_id = mac_id;
4433}
4434
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004435#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
4436
4437/**
4438 * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
4439 * @sta_id: sta_id
4440 *
4441 * Return: vdev handle
4442 * NULL if not found.
4443 */
4444static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
4445{
4446 struct ol_txrx_peer_t *peer = NULL;
4447 ol_txrx_pdev_handle pdev = NULL;
4448
4449 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304450 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304451 "Invalid sta id passed");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004452 return NULL;
4453 }
4454
Anurag Chouhan6d760662016-02-20 16:05:43 +05304455 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004456 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304457 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304458 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004459 return NULL;
4460 }
4461
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004462 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004463
4464 if (!peer) {
Zhu Jianminf7ffe942017-08-24 10:24:15 +08004465 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304466 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004467 return NULL;
4468 }
4469
4470 return peer->vdev;
4471}
4472
4473/**
4474 * ol_txrx_register_tx_flow_control() - register tx flow control callback
4475 * @vdev_id: vdev_id
4476 * @flowControl: flow control callback
4477 * @osif_fc_ctx: callback context
bings284f8be2017-08-11 10:41:30 +08004478 * @flow_control_is_pause: is vdev paused by flow control
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004479 *
4480 * Return: 0 for sucess or error code
4481 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004482static int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
bings284f8be2017-08-11 10:41:30 +08004483 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
4484 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004485{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004486 struct ol_txrx_vdev_t *vdev =
4487 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004488
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004489 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304490 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304491 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004492 return -EINVAL;
4493 }
4494
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304495 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004496 vdev->osif_flow_control_cb = flowControl;
bings284f8be2017-08-11 10:41:30 +08004497 vdev->osif_flow_control_is_pause = flow_control_is_pause;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004498 vdev->osif_fc_ctx = osif_fc_ctx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304499 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004500 return 0;
4501}
4502
4503/**
Yun Parkeaea8632017-04-09 09:53:45 -07004504 * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
4505 * callback
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004506 * @vdev_id: vdev_id
4507 *
4508 * Return: 0 for success or error code
4509 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004510static int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004511{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004512 struct ol_txrx_vdev_t *vdev =
4513 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004514
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004515 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304516 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304517 "%s: Invalid vdev_id", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004518 return -EINVAL;
4519 }
4520
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304521 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004522 vdev->osif_flow_control_cb = NULL;
bings284f8be2017-08-11 10:41:30 +08004523 vdev->osif_flow_control_is_pause = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004524 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304525 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004526 return 0;
4527}
4528
4529/**
4530 * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
4531 * @sta_id: sta id
4532 * @low_watermark: low watermark
4533 * @high_watermark_offset: high watermark offset value
4534 *
4535 * Return: true/false
4536 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004537static bool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004538ol_txrx_get_tx_resource(uint8_t sta_id,
4539 unsigned int low_watermark,
4540 unsigned int high_watermark_offset)
4541{
4542 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004543
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004544 if (NULL == vdev) {
Zhu Jianminf7ffe942017-08-24 10:24:15 +08004545 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304546 "%s: Invalid sta_id %d", __func__, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004547 /* Return true so caller do not understand that resource
4548 * is less than low_watermark.
4549 * sta_id validation will be done in ol_tx_send_data_frame
4550 * and if sta_id is not registered then host will drop
4551 * packet.
4552 */
4553 return true;
4554 }
4555
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304556 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304557
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004558 if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
4559 vdev->tx_fl_lwm = (uint16_t) low_watermark;
4560 vdev->tx_fl_hwm =
4561 (uint16_t) (low_watermark + high_watermark_offset);
4562 /* Not enough free resource, stop TX OS Q */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05304563 qdf_atomic_set(&vdev->os_q_paused, 1);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304564 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004565 return false;
4566 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304567 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004568 return true;
4569}
4570
4571/**
4572 * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
4573 * @vdev_id: vdev id
4574 * @pause_q_depth: pause queue depth
4575 *
4576 * Return: 0 for success or error code
4577 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004578static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004579ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
4580{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004581 struct ol_txrx_vdev_t *vdev =
4582 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004583
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004584 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304585 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304586 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004587 return -EINVAL;
4588 }
4589
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304590 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004591 vdev->ll_pause.max_q_depth = pause_q_depth;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304592 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004593
4594 return 0;
4595}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004596#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
4597
Leo Chang8e073612015-11-13 10:55:34 -08004598/**
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004599 * ol_txrx_display_stats() - Display OL TXRX display stats
4600 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304601 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004602 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304603 */
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004604static QDF_STATUS ol_txrx_display_stats(void *soc, uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004605{
4606 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004607 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004608
Anurag Chouhan6d760662016-02-20 16:05:43 +05304609 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004610 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304611 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304612 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004613 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004614 }
4615
4616 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004617 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004618 ol_txrx_stats_display(pdev);
4619 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004620 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004621 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004622 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004623 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07004624 ol_tx_dump_flow_pool_info((void *)pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004625 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004626 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304627 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004628 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004629 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4630 htt_display_rx_buf_debug(pdev->htt_pdev);
4631 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304632#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004633 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304634 ol_tx_sched_cur_state_display(pdev);
4635 ol_tx_sched_stats_display(pdev);
4636 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004637 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304638 ol_tx_queue_log_display(pdev);
4639 break;
4640#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004641 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304642 ol_tx_dump_group_credit_stats(pdev);
4643 break;
4644#endif
4645
4646#ifdef DEBUG_HL_LOGGING
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004647 case CDP__BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304648 htt_dump_bundle_stats(pdev->htt_pdev);
4649 break;
4650#endif
4651#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004652 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004653 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004654 break;
4655 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004656 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004657}
4658
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004659/**
4660 * ol_txrx_clear_stats() - Clear OL TXRX stats
4661 * @value: Module id for which stats needs to be cleared
4662 *
4663 * Return: None
4664 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004665static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004666{
4667 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004668 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004669
Anurag Chouhan6d760662016-02-20 16:05:43 +05304670 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004671 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304672 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304673 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004674 return;
4675 }
4676
4677 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004678 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004679 ol_txrx_stats_clear(pdev);
4680 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004681 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004682 ol_tx_clear_flow_pool_stats();
4683 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004684 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304685 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004686 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304687#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004688 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304689 ol_tx_sched_stats_clear(pdev);
4690 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004691 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304692 ol_tx_queue_log_clear(pdev);
4693 break;
4694#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004695 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304696 ol_tx_clear_group_credit_stats(pdev);
4697 break;
4698#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004699 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304700 htt_clear_bundle_stats(pdev->htt_pdev);
4701 break;
4702#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004703 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004704 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004705 break;
4706 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004707
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004708}
4709
4710/**
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004711 * ol_txrx_drop_nbuf_list() - drop an nbuf list
4712 * @buf_list: buffer list to be dropepd
4713 *
4714 * Return: int (number of bufs dropped)
4715 */
4716static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
4717{
4718 int num_dropped = 0;
4719 qdf_nbuf_t buf, next_buf;
4720 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4721
4722 buf = buf_list;
4723 while (buf) {
Himanshu Agarwaldd2196a2017-07-31 11:38:14 +05304724 QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004725 next_buf = qdf_nbuf_queue_next(buf);
4726 if (pdev)
4727 TXRX_STATS_MSDU_INCR(pdev,
4728 rx.dropped_peer_invalid, buf);
4729 qdf_nbuf_free(buf);
4730 buf = next_buf;
4731 num_dropped++;
4732 }
4733 return num_dropped;
4734}
4735
4736/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004737 * ol_rx_data_cb() - data rx callback
4738 * @peer: peer
4739 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304740 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004741 *
4742 * Return: None
4743 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304744static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
4745 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004746{
Mohit Khanna0696eef2016-04-14 16:14:08 -07004747 void *osif_dev;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004748 uint8_t drop_count = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304749 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304750 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004751 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304752 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004753
Jeff Johnsondac9e382017-09-24 10:36:08 -07004754 if (qdf_unlikely(!pdev))
Nirav Shah36a87bf2016-02-22 12:38:46 +05304755 goto free_buf;
4756
4757 /* Do not use peer directly. Derive peer from staid to
4758 * make sure that peer is valid.
4759 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004760 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, staid);
Nirav Shah36a87bf2016-02-22 12:38:46 +05304761 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004762 goto free_buf;
4763
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304764 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07004765 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
4766 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304767 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004768 goto free_buf;
4769 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08004770
4771 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07004772 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304773 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004774
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004775 qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
4776 if (!list_empty(&peer->bufq_info.cached_bufq)) {
4777 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004778 /* Flush the cached frames to HDD before passing new rx frame */
4779 ol_txrx_flush_rx_frames(peer, 0);
4780 } else
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004781 qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004782
4783 buf = buf_list;
4784 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304785 next_buf = qdf_nbuf_queue_next(buf);
4786 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07004787 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304788 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304789 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05304790 if (pdev)
4791 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304792 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004793 }
4794 buf = next_buf;
4795 }
4796 return;
4797
4798free_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004799 drop_count = ol_txrx_drop_nbuf_list(buf_list);
4800 ol_txrx_warn("%s:Dropped frames %u", __func__, drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004801}
4802
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004803/* print for every 16th packet */
4804#define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
4805struct ol_rx_cached_buf *cache_buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304806
4807/** helper function to drop packets
4808 * Note: caller must hold the cached buq lock before invoking
4809 * this function. Also, it assumes that the pointers passed in
4810 * are valid (non-NULL)
4811 */
4812static inline void ol_txrx_drop_frames(
4813 struct ol_txrx_cached_bufq_t *bufqi,
4814 qdf_nbuf_t rx_buf_list)
4815{
4816 uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
4817 bufqi->dropped += dropped;
4818 bufqi->qdepth_no_thresh += dropped;
4819
4820 if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
4821 bufqi->high_water_mark = bufqi->qdepth_no_thresh;
4822}
4823
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004824static QDF_STATUS ol_txrx_enqueue_rx_frames(
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304825 struct ol_txrx_peer_t *peer,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004826 struct ol_txrx_cached_bufq_t *bufqi,
4827 qdf_nbuf_t rx_buf_list)
4828{
4829 struct ol_rx_cached_buf *cache_buf;
4830 qdf_nbuf_t buf, next_buf;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004831 static uint32_t count;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004832
4833 if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
4834 ol_txrx_info_high(
4835 "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
4836 bufqi->curr, bufqi->dropped);
4837
4838 qdf_spin_lock_bh(&bufqi->bufq_lock);
4839 if (bufqi->curr >= bufqi->thresh) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304840 ol_txrx_drop_frames(bufqi, rx_buf_list);
4841 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4842 return QDF_STATUS_E_FAULT;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004843 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004844 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4845
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004846 buf = rx_buf_list;
4847 while (buf) {
4848 next_buf = qdf_nbuf_queue_next(buf);
4849 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
4850 if (!cache_buf) {
4851 ol_txrx_err(
4852 "Failed to allocate buf to cache the rx frames");
4853 qdf_nbuf_free(buf);
4854 } else {
4855 /* Add NULL terminator */
4856 qdf_nbuf_set_next(buf, NULL);
4857 cache_buf->buf = buf;
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304858 if (peer && peer->valid) {
4859 qdf_spin_lock_bh(&bufqi->bufq_lock);
4860 list_add_tail(&cache_buf->list,
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004861 &bufqi->cached_bufq);
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304862 bufqi->curr++;
4863 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4864 } else {
4865 qdf_mem_free(cache_buf);
4866 rx_buf_list = buf;
4867 qdf_nbuf_set_next(rx_buf_list, next_buf);
4868 qdf_spin_lock_bh(&bufqi->bufq_lock);
4869 ol_txrx_drop_frames(bufqi, rx_buf_list);
4870 qdf_spin_unlock_bh(&bufqi->bufq_lock);
4871 return QDF_STATUS_E_FAULT;
4872 }
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004873 }
4874 buf = next_buf;
4875 }
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304876 return QDF_STATUS_SUCCESS;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004877}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004878/**
4879 * ol_rx_data_process() - process rx frame
4880 * @peer: peer
4881 * @rx_buf_list: rx buffer list
4882 *
4883 * Return: None
4884 */
4885void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05304886 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004887{
Yun Parkeaea8632017-04-09 09:53:45 -07004888 /*
4889 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004890 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07004891 * IPA kernel module API should not be called on SIRQ CTXT
4892 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08004893 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304894 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004895 uint8_t drop_count;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004896
4897 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304898 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004899 goto drop_rx_buf;
4900 }
4901
Dhanashri Atre182b0272016-02-17 15:35:07 -08004902 qdf_assert(peer->vdev);
4903
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304904 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004905 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08004906 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304907 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004908
4909 /*
4910 * If there is a data frame from peer before the peer is
4911 * registered for data service, enqueue them on to pending queue
4912 * which will be flushed to HDD once that station is registered.
4913 */
4914 if (!data_rx) {
Himanshu Agarwald8cffb32017-04-27 15:41:29 +05304915 if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
4916 rx_buf_list)
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004917 != QDF_STATUS_SUCCESS)
Poddar, Siddarth07eebf32017-04-19 12:40:26 +05304918 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4919 "%s: failed to enqueue rx frm to cached_bufq",
4920 __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004921 } else {
4922#ifdef QCA_CONFIG_SMP
4923 /*
4924 * If the kernel is SMP, schedule rx thread to
4925 * better use multicores.
4926 */
4927 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05304928 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004929 } else {
4930 p_cds_sched_context sched_ctx =
4931 get_cds_sched_ctxt();
4932 struct cds_ol_rx_pkt *pkt;
4933
4934 if (unlikely(!sched_ctx))
4935 goto drop_rx_buf;
4936
4937 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
4938 if (!pkt) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05304939 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304940 "No available Rx message buffer");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004941 goto drop_rx_buf;
4942 }
4943 pkt->callback = (cds_ol_rx_thread_cb)
4944 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304945 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004946 pkt->Rxpkt = (void *)rx_buf_list;
4947 pkt->staId = peer->local_id;
4948 cds_indicate_rxpkt(sched_ctx, pkt);
4949 }
4950#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304951 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004952#endif /* QCA_CONFIG_SMP */
4953 }
4954
4955 return;
4956
4957drop_rx_buf:
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07004958 drop_count = ol_txrx_drop_nbuf_list(rx_buf_list);
4959 ol_txrx_info_high("Dropped rx packets %u", drop_count);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004960}
4961
4962/**
4963 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004964 * @sta_desc: sta descriptor
4965 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304966 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004967 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004968static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004969{
4970 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304971 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004972 union ol_txrx_peer_update_param_t param;
4973 struct privacy_exemption privacy_filter;
4974
4975 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304976 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304977 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004978 }
4979
4980 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304981 ol_txrx_err("Invalid sta id :%d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004982 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304983 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004984 }
4985
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004986 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
4987 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004988 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304989 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004990
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304991 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004992 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304993 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004994
4995 param.qos_capable = sta_desc->is_qos_enabled;
4996 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
4997 ol_txrx_peer_update_qos_capable);
4998
4999 if (sta_desc->is_wapi_supported) {
5000 /*Privacy filter to accept unencrypted WAI frames */
5001 privacy_filter.ether_type = ETHERTYPE_WAI;
5002 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
5003 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
5004 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
5005 }
5006
5007 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305008 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005009}
5010
5011/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005012 * ol_txrx_register_ocb_peer - Function to register the OCB peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005013 * @mac_addr: MAC address of the self peer
5014 * @peer_id: Pointer to the peer ID
5015 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305016 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005017 */
Jeff Johnson382bce02017-09-01 14:21:07 -07005018static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005019 uint8_t *peer_id)
5020{
5021 ol_txrx_pdev_handle pdev;
5022 ol_txrx_peer_handle peer;
5023
Anurag Chouhan6d760662016-02-20 16:05:43 +05305024 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005025 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305026 ol_txrx_err("%s: Unable to find pdev!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005027 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305028 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005029 }
5030
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005031 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
5032 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005033 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305034 ol_txrx_err("%s: Unable to find OCB peer!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005035 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305036 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005037 }
5038
5039 ol_txrx_set_ocb_peer(pdev, peer);
5040
5041 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005042 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08005043 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005044
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305045 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005046}
5047
5048/**
5049 * ol_txrx_set_ocb_peer - Function to store the OCB peer
5050 * @pdev: Handle to the HTT instance
5051 * @peer: Pointer to the peer
5052 */
5053void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
5054 struct ol_txrx_peer_t *peer)
5055{
5056 if (pdev == NULL)
5057 return;
5058
5059 pdev->ocb_peer = peer;
5060 pdev->ocb_peer_valid = (NULL != peer);
5061}
5062
5063/**
5064 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
5065 * @pdev: Handle to the HTT instance
5066 * @peer: Pointer to the returned peer
5067 *
5068 * Return: true if the peer is valid, false if not
5069 */
5070bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
5071 struct ol_txrx_peer_t **peer)
5072{
5073 int rc;
5074
5075 if ((pdev == NULL) || (peer == NULL)) {
5076 rc = false;
5077 goto exit;
5078 }
5079
5080 if (pdev->ocb_peer_valid) {
5081 *peer = pdev->ocb_peer;
5082 rc = true;
5083 } else {
5084 rc = false;
5085 }
5086
5087exit:
5088 return rc;
5089}
5090
5091#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5092/**
5093 * ol_txrx_register_pause_cb() - register pause callback
5094 * @pause_cb: pause callback
5095 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05305096 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005097 */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005098static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
5099 tx_pause_callback pause_cb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005100{
Anurag Chouhan6d760662016-02-20 16:05:43 +05305101 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -07005102
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005103 if (!pdev || !pause_cb) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05305104 ol_txrx_err("pdev or pause_cb is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305105 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005106 }
5107 pdev->pause_cb = pause_cb;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05305108 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005109}
5110#endif
5111
Poddar, Siddarth34872782017-08-10 14:08:51 +05305112/**
5113 * ol_register_data_stall_detect_cb() - register data stall callback
5114 * @data_stall_detect_callback: data stall callback function
5115 *
5116 *
5117 * Return: QDF_STATUS Enumeration
5118 */
5119static QDF_STATUS ol_register_data_stall_detect_cb(
5120 data_stall_detect_cb data_stall_detect_callback)
5121{
5122 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5123
5124 if (pdev == NULL) {
5125 ol_txrx_err("%s: pdev NULL!", __func__);
5126 return QDF_STATUS_E_INVAL;
5127 }
5128 pdev->data_stall_detect_callback = data_stall_detect_callback;
5129 return QDF_STATUS_SUCCESS;
5130}
5131
5132/**
5133 * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5134 * @data_stall_detect_callback: data stall callback function
5135 *
5136 *
5137 * Return: QDF_STATUS Enumeration
5138 */
5139static QDF_STATUS ol_deregister_data_stall_detect_cb(
5140 data_stall_detect_cb data_stall_detect_callback)
5141{
5142 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5143
5144 if (pdev == NULL) {
5145 ol_txrx_err("%s: pdev NULL!", __func__);
5146 return QDF_STATUS_E_INVAL;
5147 }
5148 pdev->data_stall_detect_callback = NULL;
5149 return QDF_STATUS_SUCCESS;
5150}
5151
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305152void
5153ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5154{
Jeff Johnsonc13bfe02017-09-18 08:16:17 -07005155 qdf_print("%s: Pkt: VA 0x%pK PA 0x%llx len %d\n", __func__,
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305156 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5157 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5158 qdf_nbuf_data(nbuf), len, true);
5159}
5160
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305161#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5162bool
5163ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5164{
5165 struct ol_tx_flow_pool_t *pool = vdev->pool;
5166 bool enough_desc_flag;
5167
5168 if (!vdev)
Yun Parkff5da562017-01-18 14:44:20 -08005169 return false;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305170
5171 pool = vdev->pool;
5172
Yun Parkff5da562017-01-18 14:44:20 -08005173 if (!pool)
5174 return false;
5175
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305176 qdf_spin_lock_bh(&pool->flow_pool_lock);
5177 enough_desc_flag = (pool->avail_desc < (pool->stop_th +
Yun Parkff5da562017-01-18 14:44:20 -08005178 OL_TX_NON_FWD_RESERVE))
5179 ? false : true;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305180 qdf_spin_unlock_bh(&pool->flow_pool_lock);
5181 return enough_desc_flag;
5182}
5183#else
5184bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5185{
5186 return true;
5187}
5188#endif
5189
Dhanashri Atre12a08392016-02-17 13:10:34 -08005190/**
5191 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5192 * @vdev_id: vdev_id
5193 *
5194 * Return: vdev handle
5195 * NULL if not found.
5196 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005197struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005198{
5199 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5200 ol_txrx_vdev_handle vdev = NULL;
5201
5202 if (qdf_unlikely(!pdev))
5203 return NULL;
5204
5205 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5206 if (vdev->vdev_id == vdev_id)
5207 break;
5208 }
5209
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005210 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005211}
Nirav Shah2e583a02016-04-30 14:06:12 +05305212
5213/**
5214 * ol_txrx_set_wisa_mode() - set wisa mode
5215 * @vdev: vdev handle
5216 * @enable: enable flag
5217 *
5218 * Return: QDF STATUS
5219 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005220static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305221{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005222 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005223
Nirav Shah2e583a02016-04-30 14:06:12 +05305224 if (!vdev)
5225 return QDF_STATUS_E_INVAL;
5226
5227 vdev->is_wisa_mode_enable = enable;
5228 return QDF_STATUS_SUCCESS;
5229}
Leo Chang98726762016-10-28 11:07:18 -07005230
5231/**
5232 * ol_txrx_get_vdev_id() - get interface id from interface context
5233 * @pvdev: vdev handle
5234 *
5235 * Return: virtual interface id
5236 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005237static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005238{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005239 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005240 return vdev->vdev_id;
5241}
5242
5243/**
5244 * ol_txrx_last_assoc_received() - get time of last assoc received
5245 * @ppeer: peer handle
5246 *
5247 * Return: pointer of the time of last assoc received
5248 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005249static qdf_time_t *ol_txrx_last_assoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005250{
5251 ol_txrx_peer_handle peer = ppeer;
5252
5253 return &peer->last_assoc_rcvd;
5254}
5255
5256/**
5257 * ol_txrx_last_disassoc_received() - get time of last disassoc received
5258 * @ppeer: peer handle
5259 *
5260 * Return: pointer of the time of last disassoc received
5261 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005262static qdf_time_t *ol_txrx_last_disassoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005263{
5264 ol_txrx_peer_handle peer = ppeer;
5265
5266 return &peer->last_disassoc_rcvd;
5267}
5268
5269/**
5270 * ol_txrx_last_deauth_received() - get time of last deauth received
5271 * @ppeer: peer handle
5272 *
5273 * Return: pointer of the time of last deauth received
5274 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005275static qdf_time_t *ol_txrx_last_deauth_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005276{
5277 ol_txrx_peer_handle peer = ppeer;
5278
5279 return &peer->last_deauth_rcvd;
5280}
5281
5282/**
5283 * ol_txrx_soc_attach_target() - attach soc target
5284 * @soc: soc handle
5285 *
5286 * MCL legacy OL do nothing here
5287 *
5288 * Return: 0
5289 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005290static int ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005291{
5292 /* MCL legacy OL do nothing here */
5293 return 0;
5294}
5295
5296/**
5297 * ol_txrx_soc_detach() - detach soc target
5298 * @soc: soc handle
5299 *
5300 * MCL legacy OL do nothing here
5301 *
5302 * Return: noe
5303 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005304static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005305{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005306 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005307}
5308
5309/**
5310 * ol_txrx_pkt_log_con_service() - connect packet log service
5311 * @ppdev: physical device handle
5312 * @scn: device context
5313 *
5314 * Return: noe
5315 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005316static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005317{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005318 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005319
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005320 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005321 pktlog_htc_attach();
5322}
5323
5324/* OL wrapper functions for CDP abstraction */
5325/**
5326 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5327 * @peer: peer handle
5328 * @drop: rx packets drop or deliver
5329 *
5330 * Return: none
5331 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005332static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005333{
5334 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5335}
5336
5337/**
5338 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5339 * @ppdev: pdev handle
5340 * @vdev_id: interface id
5341 *
5342 * Return: virtual interface instance
5343 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005344static
5345struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5346 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005347{
5348 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5349}
5350
5351/**
5352 * ol_txrx_wrapper_register_peer() - register peer
5353 * @pdev: pdev handle
5354 * @sta_desc: peer description
5355 *
5356 * Return: QDF STATUS
5357 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005358static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005359 struct ol_txrx_desc_type *sta_desc)
5360{
5361 return ol_txrx_register_peer(sta_desc);
5362}
5363
5364/**
5365 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5366 * @pdev - the data physical device object
5367 * @local_peer_id - the ID txrx assigned locally to the peer in question
5368 *
5369 * The control SW typically uses the txrx peer handle to refer to the peer.
5370 * In unusual circumstances, if it is infeasible for the control SW maintain
5371 * the txrx peer handle but it can maintain a small integer local peer ID,
5372 * this function allows the peer handled to be retrieved, based on the local
5373 * peer ID.
5374 *
5375 * @return handle to the txrx peer object
5376 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005377static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005378ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5379 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005380{
5381 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005382 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005383}
5384
5385/**
5386 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5387 * @pdev: pdev handle
5388 *
5389 * Return: 1 high latency bus
5390 * 0 low latency bus
5391 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005392static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005393{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005394 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005395}
5396
5397/**
5398 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5399 * @data_peer - which peer has changed its state
5400 * @state - the new state of the peer
5401 *
5402 * Specify the peer's authentication state (none, connected, authenticated)
5403 * to allow the data SW to determine whether to filter out invalid data frames.
5404 * (In the "connected" state, where security is enabled, but authentication
5405 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5406 * be discarded.)
5407 * This function is only relevant for systems in which the tx and rx filtering
5408 * are done in the host rather than in the target.
5409 *
5410 * Return: QDF Status
5411 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005412static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005413 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5414{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005415 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005416 peer_mac, state);
5417}
5418
5419/**
5420 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5421 * @pdev: pdev handle
5422 * @peer_addr: peer address wnat to find
5423 * @peer_id: peer id
5424 *
5425 * Return: peer instance pointer
5426 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005427static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005428 uint8_t *peer_addr, uint8_t *peer_id)
5429{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005430 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005431 peer_addr, peer_id);
5432}
5433
5434/**
5435 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5436 * @cfg_ctx: cfg context
5437 * @cfg_param: cfg parameters
5438 *
5439 * Return: none
5440 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08005441static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005442ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5443 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07005444{
5445 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005446 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005447 (struct txrx_pdev_cfg_param_t *)cfg_param);
5448}
5449
5450static struct cdp_cmn_ops ol_ops_cmn = {
5451 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
5452 .txrx_vdev_attach = ol_txrx_vdev_attach,
5453 .txrx_vdev_detach = ol_txrx_vdev_detach,
5454 .txrx_pdev_attach = ol_txrx_pdev_attach,
5455 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
5456 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05305457 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07005458 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08005459 .txrx_peer_create = ol_txrx_peer_attach,
5460 .txrx_peer_setup = NULL,
5461 .txrx_peer_teardown = NULL,
5462 .txrx_peer_delete = ol_txrx_peer_detach,
Leo Chang98726762016-10-28 11:07:18 -07005463 .txrx_vdev_register = ol_txrx_vdev_register,
5464 .txrx_soc_detach = ol_txrx_soc_detach,
5465 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
5466 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
5467 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08005468 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07005469 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
5470 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
5471 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07005472 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005473 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
5474 .display_stats = ol_txrx_display_stats,
Leo Chang98726762016-10-28 11:07:18 -07005475 /* TODO: Add other functions */
5476};
5477
5478static struct cdp_misc_ops ol_ops_misc = {
5479 .set_ibss_vdev_heart_beat_timer =
5480 ol_txrx_set_ibss_vdev_heart_beat_timer,
5481#ifdef CONFIG_HL_SUPPORT
5482 .set_wmm_param = ol_txrx_set_wmm_param,
5483#endif /* CONFIG_HL_SUPPORT */
5484 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
5485 .bad_peer_txctl_update_threshold =
5486 ol_txrx_bad_peer_txctl_update_threshold,
5487 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
5488 .tx_non_std = ol_tx_non_std,
5489 .get_vdev_id = ol_txrx_get_vdev_id,
5490 .set_wisa_mode = ol_txrx_set_wisa_mode,
Poddar, Siddarth34872782017-08-10 14:08:51 +05305491 .txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
5492 .txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
Leo Chang98726762016-10-28 11:07:18 -07005493#ifdef FEATURE_RUNTIME_PM
5494 .runtime_suspend = ol_txrx_runtime_suspend,
5495 .runtime_resume = ol_txrx_runtime_resume,
5496#endif /* FEATURE_RUNTIME_PM */
5497 .get_opmode = ol_txrx_get_opmode,
5498 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
5499 .update_mac_id = ol_txrx_update_mac_id,
5500 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
5501 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
5502 .pkt_log_init = htt_pkt_log_init,
5503 .pkt_log_con_service = ol_txrx_pkt_log_con_service
5504};
5505
5506static struct cdp_flowctl_ops ol_ops_flowctl = {
5507#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5508 .register_pause_cb = ol_txrx_register_pause_cb,
5509 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07005510 .dump_flow_pool_info = ol_tx_dump_flow_pool_info,
Leo Chang98726762016-10-28 11:07:18 -07005511#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
5512};
5513
5514static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
5515#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
5516 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
5517 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
5518 .flow_control_cb = ol_txrx_flow_control_cb,
5519 .get_tx_resource = ol_txrx_get_tx_resource,
5520 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
5521 .vdev_flush = ol_txrx_vdev_flush,
5522 .vdev_pause = ol_txrx_vdev_pause,
5523 .vdev_unpause = ol_txrx_vdev_unpause
5524#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
5525};
5526
Leo Chang98726762016-10-28 11:07:18 -07005527#ifdef IPA_OFFLOAD
Yun Parkb4f591d2017-03-29 15:51:01 -07005528static struct cdp_ipa_ops ol_ops_ipa = {
Leo Chang98726762016-10-28 11:07:18 -07005529 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
5530 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
5531 .ipa_set_active = ol_txrx_ipa_uc_set_active,
5532 .ipa_op_response = ol_txrx_ipa_uc_op_response,
5533 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
5534 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
5535 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07005536 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
Yun Parkb4f591d2017-03-29 15:51:01 -07005537 .ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
5538 .ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
5539 .ipa_setup = ol_txrx_ipa_setup,
5540 .ipa_cleanup = ol_txrx_ipa_cleanup,
5541 .ipa_setup_iface = ol_txrx_ipa_setup_iface,
5542 .ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
5543 .ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
5544 .ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
5545 .ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
5546#ifdef FEATURE_METERING
Yun Park637d6482016-10-05 10:51:33 -07005547 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
5548 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Yun Parkb4f591d2017-03-29 15:51:01 -07005549#endif
Leo Chang98726762016-10-28 11:07:18 -07005550};
Yun Parkb4f591d2017-03-29 15:51:01 -07005551#endif
Leo Chang98726762016-10-28 11:07:18 -07005552
Leo Chang98726762016-10-28 11:07:18 -07005553static struct cdp_bus_ops ol_ops_bus = {
5554 .bus_suspend = ol_txrx_bus_suspend,
5555 .bus_resume = ol_txrx_bus_resume
5556};
5557
5558static struct cdp_ocb_ops ol_ops_ocb = {
5559 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
5560 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
5561};
5562
5563static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005564#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07005565 .throttle_init_period = ol_tx_throttle_init_period,
5566 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005567#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07005568};
5569
5570static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07005571 .clear_stats = ol_txrx_clear_stats,
5572 .stats = ol_txrx_stats
5573};
5574
5575static struct cdp_cfg_ops ol_ops_cfg = {
5576 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
5577 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
5578 .cfg_attach = ol_pdev_cfg_attach,
5579 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
5580 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
5581 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
5582 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
5583 .set_flow_control_parameters =
5584 ol_txrx_wrapper_set_flow_control_parameters,
5585 .set_flow_steering = ol_set_cfg_flow_steering,
5586};
5587
5588static struct cdp_peer_ops ol_ops_peer = {
5589 .register_peer = ol_txrx_wrapper_register_peer,
5590 .clear_peer = ol_txrx_clear_peer,
5591 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
5592 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
5593 .local_peer_id = ol_txrx_local_peer_id,
5594 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
5595 .peer_state_update = ol_txrx_wrapper_peer_state_update,
5596 .get_vdevid = ol_txrx_get_vdevid,
5597 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
5598 .register_ocb_peer = ol_txrx_register_ocb_peer,
5599 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
5600 .get_peer_state = ol_txrx_get_peer_state,
5601 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
5602 .update_ibss_add_peer_num_of_vdev =
5603 ol_txrx_update_ibss_add_peer_num_of_vdev,
5604 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
5605 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08005606#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07005607 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
5608 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005609 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
5610 .update_last_real_peer = ol_txrx_update_last_real_peer,
5611#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07005612 .last_assoc_received = ol_txrx_last_assoc_received,
5613 .last_disassoc_received = ol_txrx_last_disassoc_received,
5614 .last_deauth_received = ol_txrx_last_deauth_received,
Leo Chang98726762016-10-28 11:07:18 -07005615 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
5616};
5617
5618static struct cdp_tx_delay_ops ol_ops_delay = {
5619#ifdef QCA_COMPUTE_TX_DELAY
5620 .tx_delay = ol_tx_delay,
5621 .tx_delay_hist = ol_tx_delay_hist,
5622 .tx_packet_count = ol_tx_packet_count,
5623 .tx_set_compute_interval = ol_tx_set_compute_interval
5624#endif /* QCA_COMPUTE_TX_DELAY */
5625};
5626
5627static struct cdp_pmf_ops ol_ops_pmf = {
5628 .get_pn_info = ol_txrx_get_pn_info
5629};
5630
5631/* WINplatform specific structures */
5632static struct cdp_ctrl_ops ol_ops_ctrl = {
5633 /* EMPTY FOR MCL */
5634};
5635
5636static struct cdp_me_ops ol_ops_me = {
5637 /* EMPTY FOR MCL */
5638};
5639
5640static struct cdp_mon_ops ol_ops_mon = {
5641 /* EMPTY FOR MCL */
5642};
5643
5644static struct cdp_host_stats_ops ol_ops_host_stats = {
5645 /* EMPTY FOR MCL */
5646};
5647
5648static struct cdp_wds_ops ol_ops_wds = {
5649 /* EMPTY FOR MCL */
5650};
5651
5652static struct cdp_raw_ops ol_ops_raw = {
5653 /* EMPTY FOR MCL */
5654};
5655
5656static struct cdp_ops ol_txrx_ops = {
5657 .cmn_drv_ops = &ol_ops_cmn,
5658 .ctrl_ops = &ol_ops_ctrl,
5659 .me_ops = &ol_ops_me,
5660 .mon_ops = &ol_ops_mon,
5661 .host_stats_ops = &ol_ops_host_stats,
5662 .wds_ops = &ol_ops_wds,
5663 .raw_ops = &ol_ops_raw,
5664 .misc_ops = &ol_ops_misc,
5665 .cfg_ops = &ol_ops_cfg,
5666 .flowctl_ops = &ol_ops_flowctl,
5667 .l_flowctl_ops = &ol_ops_l_flowctl,
Yun Parkb4f591d2017-03-29 15:51:01 -07005668#ifdef IPA_OFFLOAD
Leo Chang98726762016-10-28 11:07:18 -07005669 .ipa_ops = &ol_ops_ipa,
Yun Parkb4f591d2017-03-29 15:51:01 -07005670#endif
Leo Chang98726762016-10-28 11:07:18 -07005671 .bus_ops = &ol_ops_bus,
5672 .ocb_ops = &ol_ops_ocb,
5673 .peer_ops = &ol_ops_peer,
5674 .throttle_ops = &ol_ops_throttle,
5675 .mob_stats_ops = &ol_ops_mob_stats,
5676 .delay_ops = &ol_ops_delay,
5677 .pmf_ops = &ol_ops_pmf
5678};
5679
Jeff Johnson02c37b42017-01-10 14:49:24 -08005680/*
5681 * Local prototype added to temporarily address warning caused by
5682 * -Wmissing-prototypes. A more correct solution, namely to expose
5683 * a prototype in an appropriate header file, will come later.
5684 */
5685struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5686 struct ol_if_ops *dp_ol_if_ops);
5687struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5688 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07005689{
5690 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
5691 if (!soc) {
5692 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5693 "%s: OL SOC memory allocation failed\n", __func__);
5694 return NULL;
5695 }
5696
5697 soc->ops = &ol_txrx_ops;
5698 return soc;
5699}
5700
5701