blob: d75139fb80f7353d41cc61b2dbe664dcbf792622 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Jeff Johnsonb9b49342016-12-19 16:46:23 -08002 * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== includes ===*/
29/* header files for OS primitives */
30#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053032#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053033#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053034#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053036#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037/* Required for WLAN_FEATURE_FASTPATH */
38#include <ce_api.h>
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053039#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080040/* header files for utilities */
41#include <cds_queue.h> /* TAILQ */
42
43/* header files for configuration API */
44#include <ol_cfg.h> /* ol_cfg_is_high_latency */
45#include <ol_if_athvar.h>
46
47/* header files for HTT API */
48#include <ol_htt_api.h>
49#include <ol_htt_tx_api.h>
50
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051/* header files for our own APIs */
52#include <ol_txrx_api.h>
53#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070054#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070055#include <ol_txrx_ctrl_api.h>
56#include <cdp_txrx_stats.h>
57#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058/* header files for our internal definitions */
59#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
60#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080061#include <ol_tx.h> /* ol_tx_ll */
62#include <ol_rx.h> /* ol_rx_deliver */
63#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
64#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
65#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
66#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
67#include <ol_rx_reorder.h>
68#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
69#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
70#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053071#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080072#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070073#include <ol_txrx_types.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080074#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070075#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080076#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070077#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080078#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053079#include "hif.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070080#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053081#ifndef REMOVE_PKT_LOG
82#include "pktlog_ac.h"
83#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070084#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053085#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053086#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080087#include <cdp_txrx_handle.h>
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -080088#include <htt_internal.h>
Leo Chang98726762016-10-28 11:07:18 -070089#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
90ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080091ol_txrx_peer_find_by_local_id(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070092 uint8_t local_peer_id);
93
94#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080095QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070096 uint8_t *peer_mac,
97 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080098static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
99 bool val);
100int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700101extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800102ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700103 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700104
Leo Chang98726762016-10-28 11:07:18 -0700105extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
106 uint64_t **last_pn, uint32_t **rmf_pn_replays);
107
Yu Wang053d3e72017-02-08 18:48:24 +0800108#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530109
110/**
111 * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
112 * @vdev: the data virtual device
113 * @bss_addr: bss address
114 *
115 * Return: None
116 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800117static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800118ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530119{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800120 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700121
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530122 if (bss_addr && vdev->last_real_peer &&
Ankit Guptaa5076012016-09-14 11:32:19 -0700123 !qdf_mem_cmp((u8 *)bss_addr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530124 vdev->last_real_peer->mac_addr.raw,
Ankit Guptaa5076012016-09-14 11:32:19 -0700125 IEEE80211_ADDR_LEN))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530126 qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
127 vdev->last_real_peer->mac_addr.raw,
128 OL_TXRX_MAC_ADDR_LEN);
129}
130
131/**
132 * ol_txrx_add_last_real_peer() - add last peer
133 * @pdev: the data physical device
134 * @vdev: virtual device
135 * @peer_id: peer id
136 *
137 * Return: None
138 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800139static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800140ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
141 struct cdp_vdev *pvdev, uint8_t *peer_id)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530142{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800143 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
144 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530145 ol_txrx_peer_handle peer;
Yun Parkeaea8632017-04-09 09:53:45 -0700146
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530147 if (vdev->last_real_peer == NULL) {
Yun Parkeaea8632017-04-09 09:53:45 -0700148 peer = NULL;
149 peer = ol_txrx_find_peer_by_addr(
150 (struct cdp_pdev *)pdev,
151 vdev->hl_tdls_ap_mac_addr.raw,
152 peer_id);
153 if (peer && (peer->peer_ids[0] !=
154 HTT_INVALID_PEER_ID))
155 vdev->last_real_peer = peer;
156 }
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530157}
158
159/**
160 * is_vdev_restore_last_peer() - check for vdev last peer
161 * @peer: peer object
162 *
163 * Return: true if last peer is not null
164 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800165static bool
Leo Chang98726762016-10-28 11:07:18 -0700166is_vdev_restore_last_peer(void *ppeer)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530167{
Leo Chang98726762016-10-28 11:07:18 -0700168 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530169 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700170
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530171 vdev = peer->vdev;
172 return vdev->last_real_peer && (vdev->last_real_peer == peer);
173}
174
175/**
176 * ol_txrx_update_last_real_peer() - check for vdev last peer
177 * @pdev: the data physical device
178 * @peer: peer device
179 * @peer_id: peer id
180 * @restore_last_peer: restore last peer flag
181 *
182 * Return: None
183 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800184static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800185ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530186 uint8_t *peer_id, bool restore_last_peer)
187{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800188 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -0700189 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530190 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700191
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530192 vdev = peer->vdev;
193 if (restore_last_peer && (vdev->last_real_peer == NULL)) {
194 peer = NULL;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800195 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530196 vdev->hl_tdls_ap_mac_addr.raw, peer_id);
197 if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
198 vdev->last_real_peer = peer;
199 }
200}
201#endif
202
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530203/**
204 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
205 * fw is compatible for marking first packet after wow wakeup
206 * @value: 1 for enabled/ 0 for disabled
207 *
208 * Return: None
209 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800210static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530211{
212 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
213
214 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530215 ol_txrx_err(
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530216 "%s: pdev is NULL\n", __func__);
217 return;
218 }
219
220 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
221}
222
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530223u_int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800224ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530225{
226 u_int16_t desc_pool_size;
227 u_int16_t steady_state_tx_lifetime_ms;
228 u_int16_t safety_factor;
229
230 /*
231 * Steady-state tx latency:
232 * roughly 1-2 ms flight time
233 * + roughly 1-2 ms prep time,
234 * + roughly 1-2 ms target->host notification time.
235 * = roughly 6 ms total
236 * Thus, steady state number of frames =
237 * steady state max throughput / frame size * tx latency, e.g.
238 * 1 Gbps / 1500 bytes * 6 ms = 500
239 *
240 */
241 steady_state_tx_lifetime_ms = 6;
242
243 safety_factor = 8;
244
245 desc_pool_size =
246 ol_cfg_max_thruput_mbps(ctrl_pdev) *
247 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
248 (8 * OL_TX_AVG_FRM_BYTES) *
249 steady_state_tx_lifetime_ms *
250 safety_factor;
251
252 /* minimum */
253 if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
254 desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
255
256 /* maximum */
257 if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
258 desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
259
260 return desc_pool_size;
261}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800262
263/*=== function definitions ===*/
264
Nirav Shah22bf44d2015-12-10 15:39:48 +0530265/**
266 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
267 * wmi is enabled or not.
268 * @value: 1 for enabled/ 0 for disable
269 *
270 * Return: None
271 */
272void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
273{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530274 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700275
Nirav Shah22bf44d2015-12-10 15:39:48 +0530276 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530277 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530278 return;
279 }
280 pdev->is_mgmt_over_wmi_enabled = value;
Nirav Shah22bf44d2015-12-10 15:39:48 +0530281}
282
283/**
284 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
285 *
286 * Return: is_mgmt_over_wmi_enabled
287 */
288uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
289{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530290 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700291
Nirav Shah22bf44d2015-12-10 15:39:48 +0530292 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530293 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530294 return 0;
295 }
296 return pdev->is_mgmt_over_wmi_enabled;
297}
298
299
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800301static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800302ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
303 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800305 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
306 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800307 struct ol_txrx_peer_t *peer;
308
309 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
310 if (!peer)
311 return NULL;
312 *peer_id = peer->local_id;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530313 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700314 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
315 "%s: peer %p peer->ref_cnt %d", __func__, peer,
316 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800317 return peer;
318}
319
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800320static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800321{
Leo Chang98726762016-10-28 11:07:18 -0700322 struct ol_txrx_peer_t *peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800323 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530324 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530325 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530326 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800327 }
328
329 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530330 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800331}
332
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800333static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800334{
335 struct ol_txrx_peer_t *peer = NULL;
336 ol_txrx_pdev_handle pdev = NULL;
337
338 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530339 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 "Invalid sta id passed");
341 return NULL;
342 }
343
Anurag Chouhan6d760662016-02-20 16:05:43 +0530344 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800345 if (!pdev) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530346 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530347 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800348 return NULL;
349 }
350
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800351 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800352 if (!peer) {
Poddar, Siddarth31b9b8b2017-04-07 12:04:55 +0530353 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530354 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355 return NULL;
356 }
357
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800358 return (struct cdp_vdev *)peer->vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800359}
360
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800361void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800362 uint8_t *peer_addr,
363 uint8_t *peer_id)
364{
365 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800366 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367
368 peer = ol_txrx_peer_find_hash_find(pdev, peer_addr, 0, 1);
369 if (!peer)
370 return NULL;
371 *peer_id = peer->local_id;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530372 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700373 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
374 "%s: peer %p peer->ref_cnt %d", __func__, peer,
375 qdf_atomic_read(&peer->ref_cnt));
Leo Chang98726762016-10-28 11:07:18 -0700376 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800377}
378
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800379static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800380{
Leo Chang98726762016-10-28 11:07:18 -0700381 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800382 return peer->local_id;
383}
384
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700385/**
386 * @brief Find a txrx peer handle from a peer's local ID
387 * @details
388 * The control SW typically uses the txrx peer handle to refer to the peer.
389 * In unusual circumstances, if it is infeasible for the control SW maintain
390 * the txrx peer handle but it can maintain a small integer local peer ID,
391 * this function allows the peer handled to be retrieved, based on the local
392 * peer ID.
393 *
394 * @param pdev - the data physical device object
395 * @param local_peer_id - the ID txrx assigned locally to the peer in question
396 * @return handle to the txrx peer object
397 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800398ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800399ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800400 uint8_t local_peer_id)
401{
402 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800403 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700404
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
406 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
407 return NULL;
408 }
409
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530410 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800411 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530412 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800413 return peer;
414}
415
416static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
417{
418 int i;
419
420 /* point the freelist to the first ID */
421 pdev->local_peer_ids.freelist = 0;
422
423 /* link each ID to the next one */
424 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
425 pdev->local_peer_ids.pool[i] = i + 1;
426 pdev->local_peer_ids.map[i] = NULL;
427 }
428
429 /* link the last ID to itself, to mark the end of the list */
430 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
431 pdev->local_peer_ids.pool[i] = i;
432
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530433 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800434}
435
436static void
437ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
438 struct ol_txrx_peer_t *peer)
439{
440 int i;
441
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530442 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800443 i = pdev->local_peer_ids.freelist;
444 if (pdev->local_peer_ids.pool[i] == i) {
445 /* the list is empty, except for the list-end marker */
446 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
447 } else {
448 /* take the head ID and advance the freelist */
449 peer->local_id = i;
450 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
451 pdev->local_peer_ids.map[i] = peer;
452 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530453 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800454}
455
456static void
457ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
458 struct ol_txrx_peer_t *peer)
459{
460 int i = peer->local_id;
Yun Parkeaea8632017-04-09 09:53:45 -0700461
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800462 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
463 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
464 return;
465 }
466 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530467 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800468 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
469 pdev->local_peer_ids.freelist = i;
470 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530471 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472}
473
474static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
475{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530476 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477}
478
479#else
480#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
481#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
482#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
483#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
484#endif
485
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530486#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
487
488/**
489 * ol_txrx_update_group_credit() - update group credit for tx queue
490 * @group: for which credit needs to be updated
491 * @credit: credits
492 * @absolute: TXQ group absolute
493 *
494 * Return: allocated pool size
495 */
496void ol_txrx_update_group_credit(
497 struct ol_tx_queue_group_t *group,
498 int32_t credit,
499 u_int8_t absolute)
500{
501 if (absolute)
502 qdf_atomic_set(&group->credit, credit);
503 else
504 qdf_atomic_add(credit, &group->credit);
505}
506
507/**
508 * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
509 * vdev id mask and ac mask is not matching
510 * @pdev: the data physical device
511 * @group_id: TXQ group id
512 * @credit: TXQ group credit count
513 * @absolute: TXQ group absolute
514 * @vdev_id_mask: TXQ vdev group id mask
515 * @ac_mask: TQX access category mask
516 *
517 * Return: None
518 */
519void ol_txrx_update_tx_queue_groups(
520 ol_txrx_pdev_handle pdev,
521 u_int8_t group_id,
522 int32_t credit,
523 u_int8_t absolute,
524 u_int32_t vdev_id_mask,
525 u_int32_t ac_mask
526 )
527{
528 struct ol_tx_queue_group_t *group;
529 u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
530 u_int32_t membership;
531 struct ol_txrx_vdev_t *vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700532
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530533 group = &pdev->txq_grps[group_id];
534
535 membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
536
537 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
538 /*
539 * if the membership (vdev id mask and ac mask)
540 * matches then no need to update tx qeue groups.
541 */
542 if (group->membership == membership)
543 /* Update Credit Only */
544 goto credit_update;
545
546
547 /*
548 * membership (vdev id mask and ac mask) is not matching
549 * TODO: ignoring ac mask for now
550 */
551 group_vdev_id_mask =
552 OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
553
554 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
555 group_vdev_bit_mask =
556 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
557 group_vdev_id_mask, vdev->vdev_id);
558 vdev_bit_mask =
559 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
560 vdev_id_mask, vdev->vdev_id);
561
562 if (group_vdev_bit_mask != vdev_bit_mask) {
563 /*
564 * Change in vdev tx queue group
565 */
566 if (!vdev_bit_mask) {
567 /* Set Group Pointer (vdev and peer) to NULL */
568 ol_tx_set_vdev_group_ptr(
569 pdev, vdev->vdev_id, NULL);
570 } else {
571 /* Set Group Pointer (vdev and peer) */
572 ol_tx_set_vdev_group_ptr(
573 pdev, vdev->vdev_id, group);
574 }
575 }
576 }
577 /* Update membership */
578 group->membership = membership;
579credit_update:
580 /* Update Credit */
581 ol_txrx_update_group_credit(group, credit, absolute);
582 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
583}
584#endif
585
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800586#ifdef WLAN_FEATURE_FASTPATH
587/**
588 * setup_fastpath_ce_handles() Update pdev with ce_handle for fastpath use.
589 *
590 * @osc: pointer to HIF context
591 * @pdev: pointer to ol pdev
592 *
593 * Return: void
594 */
Komal Seelam3d202862016-02-24 18:43:24 +0530595static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
596 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800597{
598 /*
599 * Before the HTT attach, set up the CE handles
600 * CE handles are (struct CE_state *)
601 * This is only required in the fast path
602 */
Komal Seelam7fde14c2016-02-02 13:05:57 +0530603 pdev->ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_H2T_MSG);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800604
605}
606
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800607#else /* not WLAN_FEATURE_FASTPATH */
Komal Seelam3d202862016-02-24 18:43:24 +0530608static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
609 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800610{
611}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800612#endif /* WLAN_FEATURE_FASTPATH */
613
614#ifdef QCA_LL_TX_FLOW_CONTROL_V2
615/**
616 * ol_tx_set_desc_global_pool_size() - set global pool size
617 * @num_msdu_desc: total number of descriptors
618 *
619 * Return: none
620 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800621static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800622{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530623 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -0700624
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800625 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530626 qdf_print("%s: pdev is NULL\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800627 return;
628 }
Nirav Shah2ae038d2015-12-23 20:36:11 +0530629 pdev->num_msdu_desc = num_msdu_desc;
630 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
631 pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
Kapil Gupta53d9b572017-06-28 17:53:25 +0530632 ol_txrx_info_high("Global pool size: %d\n",
Nirav Shah2ae038d2015-12-23 20:36:11 +0530633 pdev->num_msdu_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800634}
635
636/**
637 * ol_tx_get_desc_global_pool_size() - get global pool size
638 * @pdev: pdev handle
639 *
640 * Return: global pool size
641 */
642static inline
643uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
644{
645 return pdev->num_msdu_desc;
646}
Nirav Shah55b45a02016-01-21 10:00:16 +0530647
648/**
649 * ol_tx_get_total_free_desc() - get total free descriptors
650 * @pdev: pdev handle
651 *
652 * Return: total free descriptors
653 */
654static inline
655uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
656{
657 struct ol_tx_flow_pool_t *pool = NULL;
658 uint32_t free_desc;
659
660 free_desc = pdev->tx_desc.num_free;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530661 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530662 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
663 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530664 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530665 free_desc += pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530666 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530667 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530668 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530669
670 return free_desc;
671}
672
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800673#else
674/**
675 * ol_tx_get_desc_global_pool_size() - get global pool size
676 * @pdev: pdev handle
677 *
678 * Return: global pool size
679 */
680static inline
681uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
682{
683 return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
684}
Nirav Shah55b45a02016-01-21 10:00:16 +0530685
686/**
687 * ol_tx_get_total_free_desc() - get total free descriptors
688 * @pdev: pdev handle
689 *
690 * Return: total free descriptors
691 */
692static inline
693uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
694{
695 return pdev->tx_desc.num_free;
696}
697
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800698#endif
699
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530700#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
701
702/**
703 * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
704 * margin replenishment
705 * @desc_pool_size: tx desc pool size
706 *
707 * Return: threshold low
708 */
709static inline uint16_t
710ol_txrx_rsrc_threshold_lo(int desc_pool_size)
711{
712 int threshold_low;
Yun Parkeaea8632017-04-09 09:53:45 -0700713
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530714 /*
Yun Parkeaea8632017-04-09 09:53:45 -0700715 * 5% margin of unallocated desc is too much for per
716 * vdev mechanism.
717 * Define the value seperately.
718 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530719 threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
720
721 return threshold_low;
722}
723
724/**
725 * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
726 * during tx desc margin replenishment
727 * @desc_pool_size: tx desc pool size
728 *
729 * Return: threshold high
730 */
731static inline uint16_t
732ol_txrx_rsrc_threshold_hi(int desc_pool_size)
733{
734 int threshold_high;
735 /* when freeing up descriptors,
736 * keep going until there's a 7.5% margin
737 */
738 threshold_high = ((15 * desc_pool_size)/100)/2;
739
740 return threshold_high;
741}
742#else
743
744static inline uint16_t
745ol_txrx_rsrc_threshold_lo(int desc_pool_size)
746{
747 int threshold_low;
748 /* always maintain a 5% margin of unallocated descriptors */
749 threshold_low = (5 * desc_pool_size)/100;
750
751 return threshold_low;
752}
753
754static inline uint16_t
755ol_txrx_rsrc_threshold_hi(int desc_pool_size)
756{
757 int threshold_high;
758 /* when freeing up descriptors, keep going until
759 * there's a 15% margin
760 */
761 threshold_high = (15 * desc_pool_size)/100;
762
763 return threshold_high;
764}
765#endif
766
767#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
768
769/**
770 * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
771 * @pdev: the physical device object
772 *
773 * Return: None
774 */
775static void
776ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
777{
778 qdf_spinlock_create(&pdev->txq_log_spinlock);
779 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
780 pdev->txq_log.oldest_record_offset = 0;
781 pdev->txq_log.offset = 0;
782 pdev->txq_log.allow_wrap = 1;
783 pdev->txq_log.wrapped = 0;
784}
785
786/**
787 * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
788 * @pdev: the physical device object
789 *
790 * Return: None
791 */
792static inline void
793ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
794{
795 qdf_spinlock_destroy(&pdev->txq_log_spinlock);
796}
797
798#else
799
800static inline void
801ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
802{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530803}
804
805static inline void
806ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
807{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530808}
809
810
811#endif
812
813#if defined(DEBUG_HL_LOGGING)
814
815/**
816 * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
817 * @pdev: the physical device object
818 *
819 * Return: None
820 */
821static inline void
822ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
823{
824 qdf_spinlock_create(&pdev->grp_stat_spinlock);
825 pdev->grp_stats.last_valid_index = -1;
826 pdev->grp_stats.wrap_around = 0;
827}
828
829/**
830 * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
831 * @pdev: the physical device object
832 *
833 * Return: None
834 */
835static inline void
836ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
837{
838 qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
839}
840#else
841
842static inline void
843ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
844{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530845}
846
847static inline void
848ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
849{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530850}
851#endif
852
853#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
854
855/**
856 * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
857 * @vdev: the virtual device object
858 * @flag: flag
859 *
860 * Return: None
861 */
862void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800863ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530864{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800865 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530866 vdev->hlTdlsFlag = flag;
867}
868#endif
869
870#if defined(CONFIG_HL_SUPPORT)
871
872/**
873 * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
874 * @vdev: the virtual device object
875 *
876 * Return: None
877 */
878static void
879ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
880{
881 u_int8_t i;
Yun Parkeaea8632017-04-09 09:53:45 -0700882
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530883 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
884 TAILQ_INIT(&vdev->txqs[i].head);
885 vdev->txqs[i].paused_count.total = 0;
886 vdev->txqs[i].frms = 0;
887 vdev->txqs[i].bytes = 0;
888 vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
889 vdev->txqs[i].flag = ol_tx_queue_empty;
890 /* aggregation is not applicable for vdev tx queues */
891 vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
892 ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
893 ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
894 }
895}
896
897/**
898 * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
899 * @vdev: the virtual device object
900 *
901 * Return: None
902 */
903static void
904ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
905{
906 struct ol_txrx_pdev_t *pdev = vdev->pdev;
907 struct ol_tx_frms_queue_t *txq;
908 int i;
909
910 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
911 txq = &vdev->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530912 ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530913 }
914}
915
916/**
917 * ol_txrx_peer_txqs_init() - initialise peer tx queues
918 * @pdev: the physical device object
919 * @peer: peer object
920 *
921 * Return: None
922 */
923static void
924ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
925 struct ol_txrx_peer_t *peer)
926{
927 uint8_t i;
928 struct ol_txrx_vdev_t *vdev = peer->vdev;
Yun Parkeaea8632017-04-09 09:53:45 -0700929
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530930 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
931 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
932 TAILQ_INIT(&peer->txqs[i].head);
933 peer->txqs[i].paused_count.total = 0;
934 peer->txqs[i].frms = 0;
935 peer->txqs[i].bytes = 0;
936 peer->txqs[i].ext_tid = i;
937 peer->txqs[i].flag = ol_tx_queue_empty;
938 peer->txqs[i].aggr_state = ol_tx_aggr_untried;
939 ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
940 ol_txrx_set_txq_peer(&peer->txqs[i], peer);
941 }
942 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
943
944 /* aggregation is not applicable for mgmt and non-QoS tx queues */
945 for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
946 peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
947
948 ol_txrx_peer_pause(peer);
949}
950
951/**
952 * ol_txrx_peer_tx_queue_free() - free peer tx queues
953 * @pdev: the physical device object
954 * @peer: peer object
955 *
956 * Return: None
957 */
958static void
959ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
960 struct ol_txrx_peer_t *peer)
961{
962 struct ol_tx_frms_queue_t *txq;
963 uint8_t i;
964
965 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
966 txq = &peer->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530967 ol_tx_queue_free(pdev, txq, i, true);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530968 }
969}
970#else
971
972static inline void
973ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
974{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530975}
976
977static inline void
978ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
979{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530980}
981
982static inline void
983ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
984 struct ol_txrx_peer_t *peer)
985{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530986}
987
988static inline void
989ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
990 struct ol_txrx_peer_t *peer)
991{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530992}
993#endif
994
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530995#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
996static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
997{
998 qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
999}
1000
1001static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1002{
1003 qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
1004}
1005
1006static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1007{
1008 int msdu_idx;
1009 int seg_idx;
1010
1011 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1012 "TSO Statistics:");
1013 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1014 "TSO pkts %lld, bytes %lld\n",
1015 pdev->stats.pub.tx.tso.tso_pkts.pkts,
1016 pdev->stats.pub.tx.tso.tso_pkts.bytes);
1017
1018 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1019 "TSO Histogram for numbers of segments:\n"
1020 "Single segment %d\n"
1021 " 2-5 segments %d\n"
1022 " 6-10 segments %d\n"
1023 "11-15 segments %d\n"
1024 "16-20 segments %d\n"
1025 " 20+ segments %d\n",
1026 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
1027 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
1028 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
1029 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
1030 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
1031 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
1032
1033 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1034 "TSO History Buffer: Total size %d, current_index %d",
1035 NUM_MAX_TSO_MSDUS,
1036 TXRX_STATS_TSO_MSDU_IDX(pdev));
1037
1038 for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
1039 if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
1040 continue;
1041 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1042 "jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
1043 msdu_idx,
1044 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
1045 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
1046 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
1047 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
1048
1049 for (seg_idx = 0;
1050 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
1051 msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
1052 seg_idx++) {
1053 struct qdf_tso_seg_t tso_seg =
1054 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
1055
1056 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1057 "seg idx: %d", seg_idx);
1058 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1059 "tso_enable: %d",
1060 tso_seg.tso_flags.tso_enable);
1061 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1062 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
1063 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
1064 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
1065 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
1066 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
1067 tso_seg.tso_flags.ns);
1068 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1069 "tcp_seq_num: 0x%x ip_id: %d",
1070 tso_seg.tso_flags.tcp_seq_num,
1071 tso_seg.tso_flags.ip_id);
1072 }
1073 }
1074}
1075#else
1076static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1077{
1078 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1079 "TSO is not supported\n");
1080}
1081
1082static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1083{
1084 /*
1085 * keeping the body empty and not keeping an error print as print will
1086 * will show up everytime during driver load if TSO is not enabled.
1087 */
1088}
1089
1090static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1091{
1092 /*
1093 * keeping the body empty and not keeping an error print as print will
1094 * will show up everytime during driver unload if TSO is not enabled.
1095 */
1096}
1097
1098#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
1099
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001100/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001101 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001102 * @ctrl_pdev: cfg pdev
1103 * @htc_pdev: HTC pdev
1104 * @osdev: os dev
1105 *
1106 * Return: txrx pdev handle
1107 * NULL for failure
1108 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001109static struct cdp_pdev *
1110ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -07001111 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001112{
1113 struct ol_txrx_pdev_t *pdev;
hqufd227fe2017-06-26 17:01:14 +08001114 int i, tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001115
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301116 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001117 if (!pdev)
1118 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001119
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301120 /* init LL/HL cfg here */
1121 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001122 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
1123
1124 /* store provided params */
1125 pdev->ctrl_pdev = ctrl_pdev;
1126 pdev->osdev = osdev;
1127
1128 for (i = 0; i < htt_num_sec_types; i++)
1129 pdev->sec_types[i] = (enum ol_sec_type)i;
1130
1131 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301132 ol_txrx_tso_stats_init(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001133
1134 TAILQ_INIT(&pdev->vdev_list);
1135
1136 /* do initial set up of the peer ID -> peer object lookup map */
1137 if (ol_txrx_peer_find_attach(pdev))
1138 goto fail1;
1139
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301140 /* initialize the counter of the target's tx buffer availability */
1141 qdf_atomic_init(&pdev->target_tx_credit);
1142 qdf_atomic_init(&pdev->orig_target_tx_credit);
1143
1144 if (ol_cfg_is_high_latency(ctrl_pdev)) {
1145 qdf_spinlock_create(&pdev->tx_queue_spinlock);
1146 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
1147 if (pdev->tx_sched.scheduler == NULL)
1148 goto fail2;
1149 }
1150 ol_txrx_pdev_txq_log_init(pdev);
1151 ol_txrx_pdev_grp_stats_init(pdev);
1152
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001153 pdev->htt_pdev =
1154 htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
1155 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301156 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001157
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301158 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
1159 ol_rx_pkt_dump_call);
hqufd227fe2017-06-26 17:01:14 +08001160
1161 /*
1162 * Init the tid --> category table.
1163 * Regular tids (0-15) map to their AC.
1164 * Extension tids get their own categories.
1165 */
1166 for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
1167 int ac = TXRX_TID_TO_WMM_AC(tid);
1168
1169 pdev->tid_to_ac[tid] = ac;
1170 }
1171 pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
1172 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
1173 pdev->tid_to_ac[OL_TX_MGMT_TID] =
1174 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
1175 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
1176 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
1177 pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
1178 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
1179
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001180 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001181
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301182fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001183 ol_txrx_peer_find_detach(pdev);
1184
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301185fail2:
1186 if (ol_cfg_is_high_latency(ctrl_pdev))
1187 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
1188
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001189fail1:
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301190 ol_txrx_tso_stats_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301191 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001192
1193fail0:
1194 return NULL;
1195}
1196
Komal Seelamc4b28632016-02-03 15:02:18 +05301197#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1198/**
1199 * htt_pkt_log_init() - API to initialize packet log
1200 * @handle: pdev handle
1201 * @scn: HIF context
1202 *
1203 * Return: void
1204 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001205void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +05301206{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001207 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Komal Seelamc4b28632016-02-03 15:02:18 +05301208 if (handle->pkt_log_init)
1209 return;
1210
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301211 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001212 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Komal Seelamc4b28632016-02-03 15:02:18 +05301213 ol_pl_sethandle(&handle->pl_dev, scn);
1214 if (pktlogmod_init(scn))
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301215 qdf_print("%s: pktlogmod_init failed", __func__);
Komal Seelamc4b28632016-02-03 15:02:18 +05301216 else
1217 handle->pkt_log_init = true;
1218 }
1219}
1220
1221/**
1222 * htt_pktlogmod_exit() - API to cleanup pktlog info
1223 * @handle: Pdev handle
1224 * @scn: HIF Context
1225 *
1226 * Return: void
1227 */
Houston Hoffman8c485042017-02-08 13:40:21 -08001228static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +05301229{
Houston Hoffman8c485042017-02-08 13:40:21 -08001230 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001231 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +05301232 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -08001233 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +05301234 handle->pkt_log_init = false;
1235 }
1236}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001237
Komal Seelamc4b28632016-02-03 15:02:18 +05301238#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001239void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -08001240static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +05301241#endif
1242
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001243/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001244 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001245 * @pdev: txrx pdev
1246 *
1247 * Return: 0 for success
1248 */
1249int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001250ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001251{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001252 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001253 uint16_t i;
1254 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001255 int ret = 0;
1256 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301257 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001258
Leo Chang376398b2015-10-23 14:19:02 -07001259 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1260 union ol_tx_desc_list_elem_t *c_element;
1261 unsigned int sig_bit;
1262 uint16_t desc_per_page;
1263
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001264 if (!osc) {
1265 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001266 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001267 }
1268
1269 /*
1270 * For LL, limit the number of host's tx descriptors to match
1271 * the number of target FW tx descriptors.
1272 * This simplifies the FW, by ensuring the host will never
1273 * download more tx descriptors than the target has space for.
1274 * The FW will drop/free low-priority tx descriptors when it
1275 * starts to run low, so that in theory the host should never
1276 * run out of tx descriptors.
1277 */
1278
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001279 /*
1280 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301281 * HL - wait for a HTT target credit initialization
1282 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001283 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301284 if (pdev->cfg.is_high_latency) {
1285 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001286
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301287 qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
1288 qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001289
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301290 pdev->tx_queue.rsrc_threshold_lo =
1291 ol_txrx_rsrc_threshold_lo(desc_pool_size);
1292 pdev->tx_queue.rsrc_threshold_hi =
1293 ol_txrx_rsrc_threshold_hi(desc_pool_size);
1294
1295 for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
1296 qdf_atomic_init(&pdev->txq_grps[i].credit);
1297
1298 ol_tx_target_credit_init(pdev, desc_pool_size);
1299 } else {
1300 qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
1301 &pdev->target_tx_credit);
1302 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1303 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001304
Nirav Shah76291962016-04-25 10:50:37 +05301305 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1306
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001307 setup_fastpath_ce_handles(osc, pdev);
1308
1309 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1310 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301311 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001312
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001313 /* Attach micro controller data path offload resource */
Yun Parkf01f6e22017-01-18 17:27:02 -08001314 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
1315 ret = htt_ipa_uc_attach(pdev->htt_pdev);
1316 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -07001317 goto uc_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001318 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001319
Leo Chang376398b2015-10-23 14:19:02 -07001320 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301321 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301322 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -07001323 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1324 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1325 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301326 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001327 "Page alloc fail");
Yun Parkf01f6e22017-01-18 17:27:02 -08001328 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001329 goto page_alloc_fail;
1330 }
1331 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1332 pdev->tx_desc.offset_filter = desc_per_page - 1;
1333 /* Calculate page divider to find page number */
1334 sig_bit = 0;
1335 while (desc_per_page) {
1336 sig_bit++;
1337 desc_per_page = desc_per_page >> 1;
1338 }
1339 pdev->tx_desc.page_divider = (sig_bit - 1);
Srinivas Girigowdab8ecec22017-03-09 15:02:59 -08001340 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Leo Chang376398b2015-10-23 14:19:02 -07001341 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1342 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1343 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1344 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001345
1346 /*
1347 * Each SW tx desc (used only within the tx datapath SW) has a
1348 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1349 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1350 * desc now, to avoid doing it during time-critical transmit.
1351 */
1352 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -07001353 pdev->tx_desc.freelist =
1354 (union ol_tx_desc_list_elem_t *)
1355 (*pdev->tx_desc.desc_pages.cacheable_pages);
1356 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001357 for (i = 0; i < desc_pool_size; i++) {
1358 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001359 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301360 qdf_dma_addr_t frag_paddr = 0;
1361 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001362
Leo Chang376398b2015-10-23 14:19:02 -07001363 if (i == (desc_pool_size - 1))
1364 c_element->next = NULL;
1365 else
1366 c_element->next = (union ol_tx_desc_list_elem_t *)
1367 ol_tx_desc_find(pdev, i + 1);
1368
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001369 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001370 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301371 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001372 "%s: failed to alloc HTT tx desc (%d of %d)",
1373 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001374 fail_idx = i;
Yun Parkf01f6e22017-01-18 17:27:02 -08001375 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001376 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001377 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001378
Leo Chang376398b2015-10-23 14:19:02 -07001379 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001380 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001381 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001382 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001383 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301384 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001385 "%s: failed to alloc HTT frag dsc (%d/%d)",
1386 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001387 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001388 fail_idx = i;
1389 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001390 }
Leo Chang376398b2015-10-23 14:19:02 -07001391 if (!ret && htt_frag_desc) {
Yun Parkeaea8632017-04-09 09:53:45 -07001392 /*
1393 * Initialize the first 6 words (TSO flags)
1394 * of the frag descriptor
1395 */
Leo Chang376398b2015-10-23 14:19:02 -07001396 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1397 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001398 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001399 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001400#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001401 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001402#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001403 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001404 0xffffffff;
1405#endif
1406#endif
Leo Chang376398b2015-10-23 14:19:02 -07001407 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301408 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001409 c_element = c_element->next;
1410 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001411 }
1412
1413 /* link SW tx descs into a freelist */
1414 pdev->tx_desc.num_free = desc_pool_size;
Poddar, Siddarth14521792017-03-14 21:19:42 +05301415 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001416 "%s first tx_desc:0x%p Last tx desc:0x%p\n", __func__,
1417 (uint32_t *) pdev->tx_desc.freelist,
1418 (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001419
1420 /* check what format of frames are expected to be delivered by the OS */
1421 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1422 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1423 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1424 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1425 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1426 pdev->htt_pkt_type = htt_pkt_type_eth2;
1427 else
1428 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1429 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301430 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001431 "%s Invalid standard frame type: %d",
1432 __func__, pdev->frame_format);
Yun Parkf01f6e22017-01-18 17:27:02 -08001433 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001434 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001435 }
1436
1437 /* setup the global rx defrag waitlist */
1438 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1439
1440 /* configure where defrag timeout and duplicate detection is handled */
1441 pdev->rx.flags.defrag_timeout_check =
1442 pdev->rx.flags.dup_check =
1443 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1444
1445#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1446 /* Need to revisit this part. Currently,hardcode to riva's caps */
1447 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1448 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1449 /*
1450 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1451 * header for non-first subframe of A-MSDU.
1452 */
1453 pdev->sw_subfrm_hdr_recovery_enable = 1;
1454 /*
1455 * The Riva HW doesn't have the capability to set Protected Frame bit
1456 * in the MAC header for encrypted data frame.
1457 */
1458 pdev->sw_pf_proc_enable = 1;
1459
1460 if (pdev->frame_format == wlan_frm_fmt_802_3) {
Yun Parkeaea8632017-04-09 09:53:45 -07001461 /*
1462 * sw llc process is only needed in
1463 * 802.3 to 802.11 transform case
1464 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001465 pdev->sw_tx_llc_proc_enable = 1;
1466 pdev->sw_rx_llc_proc_enable = 1;
1467 } else {
1468 pdev->sw_tx_llc_proc_enable = 0;
1469 pdev->sw_rx_llc_proc_enable = 0;
1470 }
1471
1472 switch (pdev->frame_format) {
1473 case wlan_frm_fmt_raw:
1474 pdev->sw_tx_encap =
1475 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1476 ? 0 : 1;
1477 pdev->sw_rx_decap =
1478 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1479 ? 0 : 1;
1480 break;
1481 case wlan_frm_fmt_native_wifi:
1482 pdev->sw_tx_encap =
1483 pdev->
1484 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1485 ? 0 : 1;
1486 pdev->sw_rx_decap =
1487 pdev->
1488 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1489 ? 0 : 1;
1490 break;
1491 case wlan_frm_fmt_802_3:
1492 pdev->sw_tx_encap =
1493 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1494 ? 0 : 1;
1495 pdev->sw_rx_decap =
1496 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1497 ? 0 : 1;
1498 break;
1499 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301500 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001501 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1502 pdev->frame_format,
1503 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Yun Parkf01f6e22017-01-18 17:27:02 -08001504 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001505 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001506 }
1507#endif
1508
1509 /*
1510 * Determine what rx processing steps are done within the host.
1511 * Possibilities:
1512 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1513 * (This is unlikely; even if the target is doing rx->tx forwarding,
1514 * the host should be doing rx->tx forwarding too, as a back up for
1515 * the target's rx->tx forwarding, in case the target runs short on
1516 * memory, and can't store rx->tx frames that are waiting for
1517 * missing prior rx frames to arrive.)
1518 * 2. Just rx -> tx forwarding.
1519 * This is the typical configuration for HL, and a likely
1520 * configuration for LL STA or small APs (e.g. retail APs).
1521 * 3. Both PN check and rx -> tx forwarding.
1522 * This is the typical configuration for large LL APs.
1523 * Host-side PN check without rx->tx forwarding is not a valid
1524 * configuration, since the PN check needs to be done prior to
1525 * the rx->tx forwarding.
1526 */
1527 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
Yun Parkeaea8632017-04-09 09:53:45 -07001528 /*
1529 * PN check, rx-tx forwarding and rx reorder is done by
1530 * the target
1531 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001532 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1533 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1534 else
1535 pdev->rx_opt_proc = ol_rx_fwd_check;
1536 } else {
1537 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1538 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1539 /*
1540 * PN check done on host,
1541 * rx->tx forwarding not done at all.
1542 */
1543 pdev->rx_opt_proc = ol_rx_pn_check_only;
1544 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1545 /*
1546 * Both PN check and rx->tx forwarding done
1547 * on host.
1548 */
1549 pdev->rx_opt_proc = ol_rx_pn_check;
1550 } else {
1551#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1552"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301553 QDF_TRACE(QDF_MODULE_ID_TXRX,
1554 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001555 "%s: %s", __func__, TRACESTR01);
1556#undef TRACESTR01
Yun Parkf01f6e22017-01-18 17:27:02 -08001557 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001558 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001559 }
1560 } else {
1561 /* PN check done on target */
1562 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1563 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1564 /*
1565 * rx->tx forwarding done on host (possibly as
1566 * back-up for target-side primary rx->tx
1567 * forwarding)
1568 */
1569 pdev->rx_opt_proc = ol_rx_fwd_check;
1570 } else {
Yun Parkeaea8632017-04-09 09:53:45 -07001571 /*
1572 * rx->tx forwarding either done in target,
1573 * or not done at all
1574 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001575 pdev->rx_opt_proc = ol_rx_deliver;
1576 }
1577 }
1578 }
1579
1580 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301581 qdf_spinlock_create(&pdev->tx_mutex);
1582 qdf_spinlock_create(&pdev->peer_ref_mutex);
1583 qdf_spinlock_create(&pdev->rx.mutex);
1584 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001585 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001586 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1587
Yun Parkf01f6e22017-01-18 17:27:02 -08001588 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1589 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001590 goto reorder_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001591 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001592
Yun Parkf01f6e22017-01-18 17:27:02 -08001593 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1594 ret = -ENOMEM;
Leo Chang376398b2015-10-23 14:19:02 -07001595 goto pn_trace_attach_fail;
Yun Parkf01f6e22017-01-18 17:27:02 -08001596 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001597
1598#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1599 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1600#endif
1601
1602 /*
1603 * WDI event attach
1604 */
1605 wdi_event_attach(pdev);
1606
1607 /*
1608 * Initialize rx PN check characteristics for different security types.
1609 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301610 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001611
1612 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1613 pdev->rx_pn[htt_sec_type_tkip].len =
1614 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1615 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1616 pdev->rx_pn[htt_sec_type_tkip].cmp =
1617 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1618 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1619
1620 /* WAPI: 128-bit PN */
1621 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1622 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1623
1624 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1625
Poddar, Siddarth14521792017-03-14 21:19:42 +05301626 ol_txrx_dbg("Created pdev %p\n", pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001627
1628 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1629
1630#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1631#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1632
1633/* #if 1 -- TODO: clean this up */
1634#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1635 /* avg = 100% * new + 0% * old */ \
1636 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1637/*
Yun Parkeaea8632017-04-09 09:53:45 -07001638 * #else
1639 * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1640 * //avg = 25% * new + 25% * old
1641 * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1642 * #endif
1643 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001644 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1645 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1646#endif
1647
1648 ol_txrx_local_peer_id_pool_init(pdev);
1649
1650 pdev->cfg.ll_pause_txq_limit =
1651 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1652
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301653 /* TX flow control for peer who is in very bad link status */
1654 ol_tx_badpeer_flow_cl_init(pdev);
1655
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001656#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301657 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301658 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001659
1660 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301661 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001662 {
1663 uint32_t bin_width_1000ticks;
Yun Parkeaea8632017-04-09 09:53:45 -07001664
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001665 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301666 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001667 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1668 * 1000);
1669 /*
1670 * Compute a factor and shift that together are equal to the
1671 * inverse of the bin_width time, so that rather than dividing
1672 * by the bin width time, approximately the same result can be
1673 * obtained much more efficiently by a multiply + shift.
1674 * multiply_factor >> shift = 1 / bin_width_time, so
1675 * multiply_factor = (1 << shift) / bin_width_time.
1676 *
1677 * Pick the shift semi-arbitrarily.
1678 * If we knew statically what the bin_width would be, we could
1679 * choose a shift that minimizes the error.
1680 * Since the bin_width is determined dynamically, simply use a
1681 * shift that is about half of the uint32_t size. This should
1682 * result in a relatively large multiplier value, which
1683 * minimizes error from rounding the multiplier to an integer.
1684 * The rounding error only becomes significant if the tick units
1685 * are on the order of 1 microsecond. In most systems, it is
1686 * expected that the tick units will be relatively low-res,
1687 * on the order of 1 millisecond. In such systems the rounding
1688 * error is negligible.
1689 * It would be more accurate to dynamically try out different
1690 * shifts and choose the one that results in the smallest
1691 * rounding error, but that extra level of fidelity is
1692 * not needed.
1693 */
1694 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1695 pdev->tx_delay.hist_internal_bin_width_mult =
1696 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1697 1000 + (bin_width_1000ticks >> 1)) /
1698 bin_width_1000ticks;
1699 }
1700#endif /* QCA_COMPUTE_TX_DELAY */
1701
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001702 /* Thermal Mitigation */
1703 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001704
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001705 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001706
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301707 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1708
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001709 ol_tx_register_flow_control(pdev);
1710
1711 return 0; /* success */
1712
Leo Chang376398b2015-10-23 14:19:02 -07001713pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001714 OL_RX_REORDER_TRACE_DETACH(pdev);
1715
Leo Chang376398b2015-10-23 14:19:02 -07001716reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301717 qdf_spinlock_destroy(&pdev->tx_mutex);
1718 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1719 qdf_spinlock_destroy(&pdev->rx.mutex);
1720 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301721 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001722 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1723
Leo Chang376398b2015-10-23 14:19:02 -07001724control_init_fail:
1725desc_alloc_fail:
1726 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001727 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001728 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001729
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301730 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001731 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001732
Leo Chang376398b2015-10-23 14:19:02 -07001733page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001734 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1735 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001736uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001737 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301738htt_attach_fail:
1739 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001740ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001741 return ret; /* fail */
1742}
1743
Dhanashri Atre12a08392016-02-17 13:10:34 -08001744/**
1745 * ol_txrx_pdev_attach_target() - send target configuration
1746 *
1747 * @pdev - the physical device being initialized
1748 *
1749 * The majority of the data SW setup are done by the pdev_attach
1750 * functions, but this function completes the data SW setup by
1751 * sending datapath configuration messages to the target.
1752 *
1753 * Return: 0 - success 1 - failure
1754 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001755static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001756{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001757 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rakesh Pillai7fb7a1f2017-06-23 14:46:36 +05301758 return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001759}
1760
Dhanashri Atre12a08392016-02-17 13:10:34 -08001761/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301762 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08001763 * @pdev - the data physical device object being removed
1764 * @force - delete the pdev (and its vdevs and peers) even if
1765 * there are outstanding references by the target to the vdevs
1766 * and peers within the pdev
1767 *
1768 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301769 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08001770 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301771 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08001772 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301773static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001774{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001775 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001776 int i;
Mohit Khannac3b069b2017-02-17 14:51:51 -08001777 int num_freed_tx_desc = 0;
Leo Chang376398b2015-10-23 14:19:02 -07001778
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001779 /* preconditions */
1780 TXRX_ASSERT2(pdev);
1781
1782 /* check that the pdev has no vdevs allocated */
1783 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1784
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001785#ifdef QCA_SUPPORT_TX_THROTTLE
1786 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301787 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1788 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001789#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301790 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1791 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001792#endif
1793#endif
Leo Chang376398b2015-10-23 14:19:02 -07001794 ol_tso_seg_list_deinit(pdev);
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301795 ol_tso_num_seg_list_deinit(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001796
1797 if (force) {
1798 /*
1799 * The assertion above confirms that all vdevs within this pdev
1800 * were detached. However, they may not have actually been
1801 * deleted.
1802 * If the vdev had peers which never received a PEER_UNMAP msg
1803 * from the target, then there are still zombie peer objects,
1804 * and the vdev parents of the zombie peers are also zombies,
1805 * hanging around until their final peer gets deleted.
1806 * Go through the peer hash table and delete any peers left.
1807 * As a side effect, this will complete the deletion of any
1808 * vdevs that are waiting for their peers to finish deletion.
1809 */
Poddar, Siddarth14521792017-03-14 21:19:42 +05301810 ol_txrx_dbg("Force delete for pdev %p\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001811 pdev);
1812 ol_txrx_peer_find_hash_erase(pdev);
1813 }
1814
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301815 /* to get flow pool status before freeing descs */
1816 ol_tx_dump_flow_pool_info();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001817
1818 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1819 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001820 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001821
Leo Chang376398b2015-10-23 14:19:02 -07001822 tx_desc = ol_tx_desc_find(pdev, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001823 /*
1824 * Confirm that each tx descriptor is "empty", i.e. it has
1825 * no tx frame attached.
1826 * In particular, check that there are no frames that have
1827 * been given to the target to transmit, for which the
1828 * target has never provided a response.
1829 */
Himanshu Agarwalbda5f282017-04-19 18:11:27 +05301830 if (qdf_atomic_read(&tx_desc->ref_cnt) &&
1831 tx_desc->vdev_id != OL_TXRX_INVALID_VDEV_ID) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05301832 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001833 "Warning: freeing tx frame (no compltn)\n");
1834 ol_tx_desc_frame_free_nonstd(pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001835 tx_desc, 1);
Mohit Khannac3b069b2017-02-17 14:51:51 -08001836 num_freed_tx_desc++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001837 }
Leo Chang376398b2015-10-23 14:19:02 -07001838 htt_tx_desc = tx_desc->htt_tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001839 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1840 }
1841
Mohit Khannac3b069b2017-02-17 14:51:51 -08001842 if (num_freed_tx_desc)
1843 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1844 "freed %d tx frames for which no resp from target",
1845 num_freed_tx_desc);
1846
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301847 ol_tx_deregister_flow_control(pdev);
1848 /* Stop the communication between HTT and target at first */
1849 htt_detach_target(pdev->htt_pdev);
1850
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301851 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001852 &pdev->tx_desc.desc_pages, 0, true);
1853 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001854
1855 /* Detach micro controller data path offload resource */
1856 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1857 htt_ipa_uc_detach(pdev->htt_pdev);
1858
1859 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05301860 ol_tx_desc_dup_detect_deinit(pdev);
1861
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301862 qdf_spinlock_destroy(&pdev->tx_mutex);
1863 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1864 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1865 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001866 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001867#ifdef QCA_SUPPORT_TX_THROTTLE
1868 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301869 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001870#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301871
1872 /* TX flow control for peer who is in very bad link status */
1873 ol_tx_badpeer_flow_cl_deinit(pdev);
1874
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001875 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1876
1877 OL_RX_REORDER_TRACE_DETACH(pdev);
1878 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301879
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001880 /*
1881 * WDI event detach
1882 */
1883 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301884
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001885 ol_txrx_local_peer_id_cleanup(pdev);
1886
1887#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301888 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001889#endif
Houston Hoffmane5ec0492017-01-30 12:28:32 -08001890 qdf_mem_free(ppdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001891}
1892
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301893/**
1894 * ol_txrx_pdev_detach() - delete the data SW state
1895 * @ppdev - the data physical device object being removed
1896 * @force - delete the pdev (and its vdevs and peers) even if
1897 * there are outstanding references by the target to the vdevs
1898 * and peers within the pdev
1899 *
1900 * This function is used when the WLAN driver is being removed to
1901 * remove the host data component within the driver.
1902 * All virtual devices within the physical device need to be deleted
1903 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1904 *
1905 * Return: None
1906 */
1907static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
1908{
1909 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
1910
1911 /*checking to ensure txrx pdev structure is not NULL */
1912 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05301913 ol_txrx_err(
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301914 "NULL pdev passed to %s\n", __func__);
1915 return;
1916 }
1917
1918 htt_pktlogmod_exit(pdev);
1919
1920 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
1921
1922 if (pdev->cfg.is_high_latency)
1923 ol_tx_sched_detach(pdev);
1924
1925 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
1926
1927 htt_pdev_free(pdev->htt_pdev);
1928 ol_txrx_peer_find_detach(pdev);
1929 ol_txrx_tso_stats_deinit(pdev);
1930
1931 ol_txrx_pdev_txq_log_destroy(pdev);
1932 ol_txrx_pdev_grp_stat_destroy(pdev);
1933}
1934
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301935#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
1936
1937/**
1938 * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
1939 * @vdev: the virtual device object
1940 *
1941 * Return: None
1942 */
1943static inline void
1944ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
1945{
1946 qdf_atomic_init(&vdev->tx_desc_count);
1947}
1948#else
1949
1950static inline void
1951ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
1952{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301953}
1954#endif
1955
Dhanashri Atre12a08392016-02-17 13:10:34 -08001956/**
1957 * ol_txrx_vdev_attach - Allocate and initialize the data object
1958 * for a new virtual device.
1959 *
1960 * @data_pdev - the physical device the virtual device belongs to
1961 * @vdev_mac_addr - the MAC address of the virtual device
1962 * @vdev_id - the ID used to identify the virtual device to the target
1963 * @op_mode - whether this virtual device is operating as an AP,
1964 * an IBSS, or a STA
1965 *
1966 * Return: success: handle to new data vdev object, failure: NULL
1967 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001968static struct cdp_vdev *
1969ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001970 uint8_t *vdev_mac_addr,
1971 uint8_t vdev_id, enum wlan_op_mode op_mode)
1972{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001973 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001974 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001975 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001976
1977 /* preconditions */
1978 TXRX_ASSERT2(pdev);
1979 TXRX_ASSERT2(vdev_mac_addr);
1980
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301981 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001982 if (!vdev)
1983 return NULL; /* failure */
1984
1985 /* store provided params */
1986 vdev->pdev = pdev;
1987 vdev->vdev_id = vdev_id;
1988 vdev->opmode = op_mode;
1989
1990 vdev->delete.pending = 0;
1991 vdev->safemode = 0;
1992 vdev->drop_unenc = 1;
1993 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301994 vdev->fwd_tx_packets = 0;
1995 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001996
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301997 ol_txrx_vdev_tx_desc_cnt_init(vdev);
1998
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301999 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002000 OL_TXRX_MAC_ADDR_LEN);
2001
2002 TAILQ_INIT(&vdev->peer_list);
2003 vdev->last_real_peer = NULL;
2004
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002005 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302006
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002007#ifdef QCA_IBSS_SUPPORT
2008 vdev->ibss_peer_num = 0;
2009 vdev->ibss_peer_heart_beat_timer = 0;
2010#endif
2011
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302012 ol_txrx_vdev_txqs_init(vdev);
2013
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302014 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002015 vdev->ll_pause.paused_reason = 0;
2016 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
2017 vdev->ll_pause.txq.depth = 0;
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302018 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002019 &vdev->ll_pause.timer,
2020 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302021 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302022 qdf_atomic_init(&vdev->os_q_paused);
2023 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002024 vdev->tx_fl_lwm = 0;
2025 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002026 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002027 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Abhishek Singh217d9782017-04-28 23:49:11 +05302028 qdf_mem_zero(&vdev->last_peer_mac_addr,
2029 sizeof(union ol_txrx_align_mac_addr_t));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302030 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002031 vdev->osif_flow_control_cb = NULL;
2032 vdev->osif_fc_ctx = NULL;
2033
2034 /* Default MAX Q depth for every VDEV */
2035 vdev->ll_pause.max_q_depth =
2036 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002037 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002038 /* add this vdev into the pdev's list */
2039 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
2040
Poddar, Siddarth14521792017-03-14 21:19:42 +05302041 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002042 "Created vdev %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
2043 vdev,
2044 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2045 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2046 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2047
2048 /*
2049 * We've verified that htt_op_mode == wlan_op_mode,
2050 * so no translation is needed.
2051 */
2052 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
2053
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002054 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002055}
2056
Dhanashri Atre12a08392016-02-17 13:10:34 -08002057/**
2058 *ol_txrx_vdev_register - Link a vdev's data object with the
2059 * matching OS shim vdev object.
2060 *
2061 * @txrx_vdev: the virtual device's data object
2062 * @osif_vdev: the virtual device's OS shim object
2063 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
2064 *
2065 * The data object for a virtual device is created by the
2066 * function ol_txrx_vdev_attach. However, rather than fully
2067 * linking the data vdev object with the vdev objects from the
2068 * other subsystems that the data vdev object interacts with,
2069 * the txrx_vdev_attach function focuses primarily on creating
2070 * the data vdev object. After the creation of both the data
2071 * vdev object and the OS shim vdev object, this
2072 * txrx_osif_vdev_attach function is used to connect the two
2073 * vdev objects, so the data SW can use the OS shim vdev handle
2074 * when passing rx data received by a vdev up to the OS shim.
2075 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002076static void ol_txrx_vdev_register(struct cdp_vdev *pvdev,
2077 void *osif_vdev,
2078 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002079{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002080 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002081 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
2082 qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
2083 qdf_assert(0);
2084 return;
2085 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002086
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002087 vdev->osif_dev = osif_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002088 vdev->rx = txrx_ops->rx.rx;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002089 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002090}
2091
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002092#ifdef currently_unused
Dhanashri Atre12a08392016-02-17 13:10:34 -08002093/**
2094 * ol_txrx_set_curchan - Setup the current operating channel of
2095 * the device
2096 * @pdev - the data physical device object
2097 * @chan_mhz - the channel frequency (mhz) packets on
2098 *
2099 * Mainly used when populating monitor mode status that requires
2100 * the current operating channel
2101 *
2102 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002103void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
2104{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002105}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002106#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002107
2108void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
2109{
2110 vdev->safemode = val;
2111}
2112
Dhanashri Atre12a08392016-02-17 13:10:34 -08002113/**
2114 * ol_txrx_set_privacy_filters - set the privacy filter
2115 * @vdev - the data virtual device object
2116 * @filter - filters to be set
2117 * @num - the number of filters
2118 *
2119 * Rx related. Set the privacy filters. When rx packets, check
2120 * the ether type, filter type and packet type to decide whether
2121 * discard these packets.
2122 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002123static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002124ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
2125 void *filters, uint32_t num)
2126{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302127 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002128 num * sizeof(struct privacy_exemption));
2129 vdev->num_filters = num;
2130}
2131
2132void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
2133{
2134 vdev->drop_unenc = val;
2135}
2136
gbian016a42e2017-03-01 18:49:11 +08002137#if defined(CONFIG_HL_SUPPORT)
2138
2139static void
2140ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2141{
2142 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2143 int i;
2144 struct ol_tx_desc_t *tx_desc;
2145
2146 qdf_spin_lock_bh(&pdev->tx_mutex);
2147 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2148 tx_desc = ol_tx_desc_find(pdev, i);
2149 if (tx_desc->vdev == vdev)
2150 tx_desc->vdev = NULL;
2151 }
2152 qdf_spin_unlock_bh(&pdev->tx_mutex);
2153}
2154
2155#else
2156
2157static void
2158ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2159{
2160
2161}
2162
2163#endif
2164
Dhanashri Atre12a08392016-02-17 13:10:34 -08002165/**
2166 * ol_txrx_vdev_detach - Deallocate the specified data virtual
2167 * device object.
2168 * @data_vdev: data object for the virtual device in question
2169 * @callback: function to call (if non-NULL) once the vdev has
2170 * been wholly deleted
2171 * @callback_context: context to provide in the callback
2172 *
2173 * All peers associated with the virtual device need to be deleted
2174 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
2175 * However, for the peers to be fully deleted, the peer deletion has to
2176 * percolate through the target data FW and back up to the host data SW.
2177 * Thus, even though the host control SW may have issued a peer_detach
2178 * call for each of the vdev's peers, the peer objects may still be
2179 * allocated, pending removal of all references to them by the target FW.
2180 * In this case, though the vdev_detach function call will still return
2181 * immediately, the vdev itself won't actually be deleted, until the
2182 * deletions of all its peers complete.
2183 * The caller can provide a callback function pointer to be notified when
2184 * the vdev deletion actually happens - whether it's directly within the
2185 * vdev_detach call, or if it's deferred until all in-progress peer
2186 * deletions have completed.
2187 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002188static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002189ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002190 ol_txrx_vdev_delete_cb callback, void *context)
2191{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002192 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002193 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2194
2195 /* preconditions */
2196 TXRX_ASSERT2(vdev);
2197
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302198 ol_txrx_vdev_tx_queue_free(vdev);
2199
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302200 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302201 qdf_timer_stop(&vdev->ll_pause.timer);
2202 qdf_timer_free(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002203 vdev->ll_pause.is_q_timer_on = false;
2204 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302205 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
Yun Parkeaea8632017-04-09 09:53:45 -07002206
Nirav Shahcbc6d722016-03-01 16:24:53 +05302207 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
2208 qdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
Anurag Chouhandf2b2682016-02-29 14:15:27 +05302209 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302210 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002211 vdev->ll_pause.txq.head = next;
2212 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302213 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
2214 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002215
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302216 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002217 vdev->osif_flow_control_cb = NULL;
2218 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302219 qdf_spin_unlock_bh(&vdev->flow_control_lock);
2220 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002221
2222 /* remove the vdev from its parent pdev's list */
2223 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
2224
2225 /*
2226 * Use peer_ref_mutex while accessing peer_list, in case
2227 * a peer is in the process of being removed from the list.
2228 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302229 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002230 /* check that the vdev has no peers allocated */
2231 if (!TAILQ_EMPTY(&vdev->peer_list)) {
2232 /* debug print - will be removed later */
Poddar, Siddarth14521792017-03-14 21:19:42 +05302233 ol_txrx_dbg(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302234 "%s: not deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002235 __func__, vdev,
2236 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2237 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2238 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2239 /* indicate that the vdev needs to be deleted */
2240 vdev->delete.pending = 1;
2241 vdev->delete.callback = callback;
2242 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302243 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002244 return;
2245 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302246 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002247 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002248
Poddar, Siddarth14521792017-03-14 21:19:42 +05302249 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002250 "%s: deleting vdev obj %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
2251 __func__, vdev,
2252 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2253 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2254 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2255
2256 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2257
2258 /*
Yun Parkeaea8632017-04-09 09:53:45 -07002259 * The ol_tx_desc_free might access the invalid content of vdev referred
2260 * by tx desc, since this vdev might be detached in another thread
2261 * asynchronous.
2262 *
2263 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2264 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2265 * to avoid crash.
2266 *
2267 */
gbian016a42e2017-03-01 18:49:11 +08002268 ol_txrx_tx_desc_reset_vdev(vdev);
2269
2270 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002271 * Doesn't matter if there are outstanding tx frames -
2272 * they will be freed once the target sends a tx completion
2273 * message for them.
2274 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302275 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002276 if (callback)
2277 callback(context);
2278}
2279
2280/**
2281 * ol_txrx_flush_rx_frames() - flush cached rx frames
2282 * @peer: peer
2283 * @drop: set flag to drop frames
2284 *
2285 * Return: None
2286 */
2287void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302288 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002289{
2290 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302291 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002292 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002293
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302294 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2295 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002296 return;
2297 }
2298
Dhanashri Atre182b0272016-02-17 15:35:07 -08002299 qdf_assert(peer->vdev);
2300
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302301 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre182b0272016-02-17 15:35:07 -08002302
Dhanashri Atre50141c52016-04-07 13:15:29 -07002303 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002304 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002305 else
2306 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302307 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002308
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302309 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002310 cache_buf = list_entry((&peer->cached_bufq)->next,
2311 typeof(*cache_buf), list);
2312 while (!list_empty(&peer->cached_bufq)) {
2313 list_del(&cache_buf->list);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302314 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002315 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302316 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002317 } else {
2318 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002319 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302320 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302321 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002322 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302323 qdf_mem_free(cache_buf);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302324 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002325 cache_buf = list_entry((&peer->cached_bufq)->next,
2326 typeof(*cache_buf), list);
2327 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302328 qdf_spin_unlock_bh(&peer->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302329 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002330}
2331
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07002332static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302333{
2334 uint8_t sta_id;
2335 struct ol_txrx_peer_t *peer;
2336 struct ol_txrx_pdev_t *pdev;
2337
2338 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2339 if (!pdev)
2340 return;
2341
2342 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002343 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2344 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302345 if (!peer)
2346 continue;
2347 ol_txrx_flush_rx_frames(peer, 1);
2348 }
2349}
2350
Dhanashri Atre12a08392016-02-17 13:10:34 -08002351/**
2352 * ol_txrx_peer_attach - Allocate and set up references for a
2353 * data peer object.
2354 * @data_pdev: data physical device object that will indirectly
2355 * own the data_peer object
2356 * @data_vdev - data virtual device object that will directly
2357 * own the data_peer object
2358 * @peer_mac_addr - MAC address of the new peer
2359 *
2360 * When an association with a peer starts, the host's control SW
2361 * uses this function to inform the host data SW.
2362 * The host data SW allocates its own peer object, and stores a
2363 * reference to the control peer object within the data peer object.
2364 * The host data SW also stores a reference to the virtual device
2365 * that the peer is associated with. This virtual device handle is
2366 * used when the data SW delivers rx data frames to the OS shim layer.
2367 * The host data SW returns a handle to the new peer data object,
2368 * so a reference within the control peer object can be set to the
2369 * data peer object.
2370 *
2371 * Return: handle to new data peer object, or NULL if the attach
2372 * fails
2373 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002374static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002375ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002376{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002377 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002378 struct ol_txrx_peer_t *peer;
2379 struct ol_txrx_peer_t *temp_peer;
2380 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002381 bool wait_on_deletion = false;
2382 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002383 struct ol_txrx_pdev_t *pdev;
Abhishek Singh217d9782017-04-28 23:49:11 +05302384 bool cmp_wait_mac = false;
2385 uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002386
2387 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002388 TXRX_ASSERT2(vdev);
2389 TXRX_ASSERT2(peer_mac_addr);
2390
Dhanashri Atre12a08392016-02-17 13:10:34 -08002391 pdev = vdev->pdev;
2392 TXRX_ASSERT2(pdev);
2393
Abhishek Singh217d9782017-04-28 23:49:11 +05302394 if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2395 QDF_MAC_ADDR_SIZE))
2396 cmp_wait_mac = true;
2397
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302398 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002399 /* check for duplicate exsisting peer */
2400 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2401 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2402 (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302403 ol_txrx_info_high(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002404 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exsist.\n",
2405 vdev->vdev_id,
2406 peer_mac_addr[0], peer_mac_addr[1],
2407 peer_mac_addr[2], peer_mac_addr[3],
2408 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302409 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002410 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002411 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002412 wait_on_deletion = true;
Abhishek Singh217d9782017-04-28 23:49:11 +05302413 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002414 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302415 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002416 return NULL;
2417 }
2418 }
Abhishek Singh217d9782017-04-28 23:49:11 +05302419 if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2420 &temp_peer->mac_addr,
2421 &vdev->last_peer_mac_addr)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302422 ol_txrx_info_high(
Abhishek Singh217d9782017-04-28 23:49:11 +05302423 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exsist.\n",
2424 vdev->vdev_id,
2425 vdev->last_peer_mac_addr.raw[0],
2426 vdev->last_peer_mac_addr.raw[1],
2427 vdev->last_peer_mac_addr.raw[2],
2428 vdev->last_peer_mac_addr.raw[3],
2429 vdev->last_peer_mac_addr.raw[4],
2430 vdev->last_peer_mac_addr.raw[5]);
2431 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2432 vdev->wait_on_peer_id = temp_peer->local_id;
2433 qdf_event_reset(&vdev->wait_delete_comp);
2434 wait_on_deletion = true;
2435 break;
2436 } else {
2437 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2438 ol_txrx_err("peer not found");
2439 return NULL;
2440 }
2441 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002442 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302443 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002444
Abhishek Singh217d9782017-04-28 23:49:11 +05302445 qdf_mem_zero(&vdev->last_peer_mac_addr,
2446 sizeof(union ol_txrx_align_mac_addr_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002447 if (wait_on_deletion) {
2448 /* wait for peer deletion */
Anurag Chouhance0dc992016-02-16 18:18:03 +05302449 rc = qdf_wait_single_event(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002450 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002451 if (QDF_STATUS_SUCCESS != rc) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302452 ol_txrx_err(
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002453 "error waiting for peer(%d) deletion, status %d\n",
2454 vdev->wait_on_peer_id, (int) rc);
Sandeep Puligillaaa61e432017-03-21 12:44:12 -07002455 if (cds_is_self_recovery_enabled())
2456 cds_trigger_recovery(false);
2457 else
2458 /* Added for debugging only */
2459 QDF_BUG(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002460 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2461 return NULL;
2462 }
2463 }
2464
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302465 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002466 if (!peer)
2467 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002468
2469 /* store provided params */
2470 peer->vdev = vdev;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302471 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002472 OL_TXRX_MAC_ADDR_LEN);
2473
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302474 ol_txrx_peer_txqs_init(pdev, peer);
2475
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002476 INIT_LIST_HEAD(&peer->cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302477 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002478 /* add this peer into the vdev's list */
2479 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302480 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002481 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
2482 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr))
2483 vdev->last_real_peer = peer;
2484
2485 peer->rx_opt_proc = pdev->rx_opt_proc;
2486
2487 ol_rx_peer_init(pdev, peer);
2488
2489 /* initialize the peer_id */
2490 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2491 peer->peer_ids[i] = HTT_INVALID_PEER;
2492
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302493 qdf_spinlock_create(&peer->peer_info_lock);
2494 qdf_spinlock_create(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002495
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302496 qdf_atomic_init(&peer->delete_in_progress);
2497 qdf_atomic_init(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002498
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302499 qdf_atomic_init(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002500
2501 /* keep one reference for attach */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302502 qdf_atomic_inc(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002503
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002504 /*
2505 * Set a flag to indicate peer create is pending in firmware and
2506 * increment ref_cnt so that peer will not get deleted while
2507 * peer create command is pending in firmware.
2508 * First peer_map event from firmware signifies successful
2509 * peer creation and it will be decremented in peer_map handling.
2510 */
2511 qdf_atomic_init(&peer->fw_create_pending);
2512 qdf_atomic_set(&peer->fw_create_pending, 1);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302513 qdf_atomic_inc(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002514
2515 peer->valid = 1;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07002516 qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2517 peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002518
2519 ol_txrx_peer_find_hash_add(pdev, peer);
2520
Mohit Khanna47384bc2016-08-15 15:37:05 -07002521 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2522 "vdev %p created peer %p ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
2523 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002524 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2525 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2526 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2527 /*
2528 * For every peer MAp message search and set if bss_peer
2529 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002530 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2531 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002532 peer->bss_peer = 1;
2533
2534 /*
2535 * The peer starts in the "disc" state while association is in progress.
2536 * Once association completes, the peer will get updated to "auth" state
2537 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2538 * or else to the "conn" state. For non-open mode, the peer will
2539 * progress to "auth" state once the authentication completes.
2540 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002541 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002542 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002543 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002544
2545#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2546 peer->rssi_dbm = HTT_RSSI_INVALID;
2547#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002548 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2549 !pdev->self_peer) {
2550 pdev->self_peer = peer;
2551 /*
2552 * No Tx in monitor mode, otherwise results in target assert.
2553 * Setting disable_intrabss_fwd to true
2554 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002555 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002556 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002557
2558 ol_txrx_local_peer_id_alloc(pdev, peer);
2559
Leo Chang98726762016-10-28 11:07:18 -07002560 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002561}
2562
2563/*
2564 * Discarding tx filter - removes all data frames (disconnected state)
2565 */
2566static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2567{
2568 return A_ERROR;
2569}
2570
2571/*
2572 * Non-autentication tx filter - filters out data frames that are not
2573 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2574 * data frames (connected state)
2575 */
2576static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2577{
2578 return
2579 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2580 tx_msdu_info->htt.info.ethertype ==
2581 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2582}
2583
2584/*
2585 * Pass-through tx filter - lets all data frames through (authenticated state)
2586 */
2587static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2588{
2589 return A_OK;
2590}
2591
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002592/**
2593 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2594 * @peer: handle to peer
2595 *
2596 * returns mac addrs for module which do not know peer type
2597 *
2598 * Return: the mac_addr from peer
2599 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002600static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002601ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002602{
Leo Chang98726762016-10-28 11:07:18 -07002603 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002604 if (!peer)
2605 return NULL;
2606
2607 return peer->mac_addr.raw;
2608}
2609
Abhishek Singhcfb44482017-03-10 12:42:37 +05302610#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002611/**
2612 * ol_txrx_get_pn_info() - Returns pn info from peer
2613 * @peer: handle to peer
2614 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2615 * @last_pn: return last_rmf_pn value from peer.
2616 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2617 *
2618 * Return: NONE
2619 */
2620void
Leo Chang98726762016-10-28 11:07:18 -07002621ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002622 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2623{
Leo Chang98726762016-10-28 11:07:18 -07002624 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002625 *last_pn_valid = &peer->last_rmf_pn_valid;
2626 *last_pn = &peer->last_rmf_pn;
2627 *rmf_pn_replays = &peer->rmf_pn_replays;
2628}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302629#else
2630void
2631ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2632 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2633{
2634}
2635#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002636
2637/**
2638 * ol_txrx_get_opmode() - Return operation mode of vdev
2639 * @vdev: vdev handle
2640 *
2641 * Return: operation mode.
2642 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002643static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002644{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002645 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002646 return vdev->opmode;
2647}
2648
2649/**
2650 * ol_txrx_get_peer_state() - Return peer state of peer
2651 * @peer: peer handle
2652 *
2653 * Return: return peer state
2654 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002655static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002656{
Leo Chang98726762016-10-28 11:07:18 -07002657 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002658 return peer->state;
2659}
2660
2661/**
2662 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2663 * @peer: peer handle
2664 *
2665 * Return: vdev handle from peer
2666 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002667static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002668{
Leo Chang98726762016-10-28 11:07:18 -07002669 ol_txrx_peer_handle peer = ppeer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002670 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002671}
2672
2673/**
2674 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2675 * @vdev: vdev handle
2676 *
2677 * Return: vdev mac address
2678 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002679static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002680ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002681{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002682 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002683 if (!vdev)
2684 return NULL;
2685
2686 return vdev->mac_addr.raw;
2687}
2688
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002689#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002690/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002691 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002692 * vdev
2693 * @vdev: vdev handle
2694 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002695 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002696 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002697struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002698ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2699{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002700 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002701}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002702#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002703
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002704#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002705/**
2706 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2707 * @vdev: vdev handle
2708 *
2709 * Return: Handle to pdev
2710 */
2711ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2712{
2713 return vdev->pdev;
2714}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002715#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002716
2717/**
2718 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2719 * @vdev: vdev handle
2720 *
2721 * Return: Handle to control pdev
2722 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002723static struct cdp_cfg *
2724ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002725{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002726 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
2727 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002728}
2729
2730/**
2731 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2732 * @vdev: vdev handle
2733 *
2734 * Return: Rx Fwd disabled status
2735 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002736static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002737ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002738{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002739 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002740 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2741 vdev->pdev->ctrl_pdev;
2742 return cfg->rx_fwd_disabled;
2743}
2744
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002745#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002746/**
2747 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2748 * @vdev: vdev handle
2749 * @peer_num_delta: peer nums to be adjusted
2750 *
2751 * Return: -1 for failure or total peer nums after adjustment.
2752 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002753static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002754ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002755 int16_t peer_num_delta)
2756{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002757 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002758 int16_t new_peer_num;
2759
2760 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07002761 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002762 return OL_TXRX_INVALID_NUM_PEERS;
2763
2764 vdev->ibss_peer_num = new_peer_num;
2765
2766 return new_peer_num;
2767}
2768
2769/**
2770 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2771 * beat timer
2772 * @vdev: vdev handle
2773 * @timer_value_sec: new heart beat timer value
2774 *
2775 * Return: Old timer value set in vdev.
2776 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002777static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2778 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002779{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002780 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002781 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2782
2783 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2784
2785 return old_timer_value;
2786}
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002787#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002788
2789/**
2790 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
2791 * @vdev: vdev handle
2792 * @callback: callback function to remove the peer.
2793 * @callback_context: handle for callback function
2794 * @remove_last_peer: Does it required to last peer.
2795 *
2796 * Return: NONE
2797 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002798static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002799ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002800 ol_txrx_vdev_peer_remove_cb callback,
2801 void *callback_context, bool remove_last_peer)
2802{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002803 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002804 ol_txrx_peer_handle peer, temp;
2805 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002806 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002807
2808 temp = NULL;
2809 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
2810 peer_list_elem) {
2811 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002812 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2813 if (qdf_atomic_read(&temp->delete_in_progress) == 0) {
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002814 callback(callback_context, temp->mac_addr.raw,
2815 vdev->vdev_id, temp, false);
2816 }
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002817 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002818 }
2819 /* self peer is deleted last */
2820 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302821 ol_txrx_info_high(
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002822 "%s: self peer removed by caller ",
2823 __func__);
2824 break;
Yun Parkeaea8632017-04-09 09:53:45 -07002825 }
2826 temp = peer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002827 }
2828
Mohit Khanna137b97d2016-04-21 16:11:33 -07002829 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2830
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002831 if (remove_last_peer) {
2832 /* remove IBSS bss peer last */
2833 peer = TAILQ_FIRST(&vdev->peer_list);
2834 callback(callback_context, (uint8_t *) &vdev->mac_addr,
2835 vdev->vdev_id, peer, false);
2836 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002837}
2838
2839/**
2840 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
2841 * @vdev: vdev handle
2842 * @callback: callback function to remove the peer.
2843 * @callback_context: handle for callback function
2844 *
2845 * Return: NONE
2846 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002847static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002848ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002849 ol_txrx_vdev_peer_remove_cb callback,
2850 void *callback_context)
2851{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002852 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002853 ol_txrx_peer_handle peer = NULL;
2854
2855 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05302856 ol_txrx_info_high(
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002857 "%s: peer found for vdev id %d. deleting the peer",
2858 __func__, vdev->vdev_id);
2859 callback(callback_context, (uint8_t *)&vdev->mac_addr,
2860 vdev->vdev_id, peer, false);
2861 }
2862}
2863
2864/**
2865 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2866 * @vdev: vdev handle
2867 * @ocb_set_chan: OCB channel information to be set in vdev.
2868 *
2869 * Return: NONE
2870 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002871static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002872 struct ol_txrx_ocb_set_chan ocb_set_chan)
2873{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002874 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002875 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2876 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2877}
2878
2879/**
2880 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2881 * @vdev: vdev handle
2882 *
2883 * Return: handle to struct ol_txrx_ocb_chan_info
2884 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002885static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002886ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002887{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002888 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002889 return vdev->ocb_channel_info;
2890}
2891
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002892/**
2893 * @brief specify the peer's authentication state
2894 * @details
2895 * Specify the peer's authentication state (none, connected, authenticated)
2896 * to allow the data SW to determine whether to filter out invalid data frames.
2897 * (In the "connected" state, where security is enabled, but authentication
2898 * has not completed, tx and rx data frames other than EAPOL or WAPI should
2899 * be discarded.)
2900 * This function is only relevant for systems in which the tx and rx filtering
2901 * are done in the host rather than in the target.
2902 *
2903 * @param data_peer - which peer has changed its state
2904 * @param state - the new state of the peer
2905 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002906 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002907 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002908QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002909 uint8_t *peer_mac,
2910 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002911{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002912 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002913 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002914 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002915
Anurag Chouhanc5548422016-02-24 18:33:27 +05302916 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302917 ol_txrx_err("Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05302918 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302919 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002920 }
2921
2922 peer = ol_txrx_peer_find_hash_find(pdev, peer_mac, 0, 1);
2923 if (NULL == peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05302924 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302925 "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
2926 __func__,
2927 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2928 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302929 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002930 }
2931
2932 /* TODO: Should we send WMI command of the connection state? */
2933 /* avoid multiple auth state change. */
2934 if (peer->state == state) {
2935#ifdef TXRX_PRINT_VERBOSE_ENABLE
Poddar, Siddarth14521792017-03-14 21:19:42 +05302936 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002937 "%s: no state change, returns directly\n",
2938 __func__);
2939#endif
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002940 peer_ref_cnt = ol_txrx_peer_unref_delete(peer);
Mohit Khanna47384bc2016-08-15 15:37:05 -07002941 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002942 "%s: peer %p peer->ref_cnt %d",
2943 __func__, peer, peer_ref_cnt);
2944
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302945 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002946 }
2947
Poddar, Siddarth14521792017-03-14 21:19:42 +05302948 ol_txrx_dbg("%s: change from %d to %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002949 __func__, peer->state, state);
2950
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002951 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002952 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002953 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002954 ? ol_tx_filter_non_auth
2955 : ol_tx_filter_discard);
2956
2957 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002958 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002959 int tid;
2960 /*
2961 * Pause all regular (non-extended) TID tx queues until
2962 * data arrives and ADDBA negotiation has completed.
2963 */
Poddar, Siddarth14521792017-03-14 21:19:42 +05302964 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002965 "%s: pause peer and unpause mgmt/non-qos\n",
2966 __func__);
2967 ol_txrx_peer_pause(peer); /* pause all tx queues */
2968 /* unpause mgmt and non-QoS tx queues */
2969 for (tid = OL_TX_NUM_QOS_TIDS;
2970 tid < OL_TX_NUM_TIDS; tid++)
2971 ol_txrx_peer_tid_unpause(peer, tid);
2972 }
2973 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002974 peer_ref_cnt = ol_txrx_peer_unref_delete(peer);
Mohit Khanna47384bc2016-08-15 15:37:05 -07002975 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08002976 "%s: peer %p peer->ref_cnt %d",
2977 __func__, peer, peer_ref_cnt);
2978 /*
2979 * after ol_txrx_peer_unref_delete, peer object cannot be accessed
2980 * if the return code was 0
2981 */
2982 if (peer_ref_cnt)
2983 /*
2984 * Set the state after the Pause to avoid the race condiction
2985 * with ADDBA check in tx path
2986 */
2987 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302988 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002989}
2990
2991void
2992ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
2993{
2994 peer->keyinstalled = val;
2995}
2996
2997void
2998ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
2999 uint8_t *peer_mac,
3000 union ol_txrx_peer_update_param_t *param,
3001 enum ol_txrx_peer_update_select_t select)
3002{
3003 struct ol_txrx_peer_t *peer;
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003004 int peer_ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003005
3006 peer = ol_txrx_peer_find_hash_find(vdev->pdev, peer_mac, 0, 1);
3007 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303008 ol_txrx_dbg("%s: peer is null",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003009 __func__);
3010 return;
3011 }
3012
3013 switch (select) {
3014 case ol_txrx_peer_update_qos_capable:
3015 {
3016 /* save qos_capable here txrx peer,
3017 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
3018 */
3019 peer->qos_capable = param->qos_capable;
3020 /*
3021 * The following function call assumes that the peer has a
3022 * single ID. This is currently true, and
3023 * is expected to remain true.
3024 */
3025 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
3026 peer->peer_ids[0],
3027 peer->qos_capable);
3028 break;
3029 }
3030 case ol_txrx_peer_update_uapsdMask:
3031 {
3032 peer->uapsd_mask = param->uapsd_mask;
3033 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
3034 peer->peer_ids[0],
3035 peer->uapsd_mask);
3036 break;
3037 }
3038 case ol_txrx_peer_update_peer_security:
3039 {
3040 enum ol_sec_type sec_type = param->sec_type;
3041 enum htt_sec_type peer_sec_type = htt_sec_type_none;
3042
3043 switch (sec_type) {
3044 case ol_sec_type_none:
3045 peer_sec_type = htt_sec_type_none;
3046 break;
3047 case ol_sec_type_wep128:
3048 peer_sec_type = htt_sec_type_wep128;
3049 break;
3050 case ol_sec_type_wep104:
3051 peer_sec_type = htt_sec_type_wep104;
3052 break;
3053 case ol_sec_type_wep40:
3054 peer_sec_type = htt_sec_type_wep40;
3055 break;
3056 case ol_sec_type_tkip:
3057 peer_sec_type = htt_sec_type_tkip;
3058 break;
3059 case ol_sec_type_tkip_nomic:
3060 peer_sec_type = htt_sec_type_tkip_nomic;
3061 break;
3062 case ol_sec_type_aes_ccmp:
3063 peer_sec_type = htt_sec_type_aes_ccmp;
3064 break;
3065 case ol_sec_type_wapi:
3066 peer_sec_type = htt_sec_type_wapi;
3067 break;
3068 default:
3069 peer_sec_type = htt_sec_type_none;
3070 break;
3071 }
3072
3073 peer->security[txrx_sec_ucast].sec_type =
3074 peer->security[txrx_sec_mcast].sec_type =
3075 peer_sec_type;
3076
3077 break;
3078 }
3079 default:
3080 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303081 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003082 "ERROR: unknown param %d in %s", select,
3083 __func__);
3084 break;
3085 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003086 } /* switch */
3087 peer_ref_cnt = ol_txrx_peer_unref_delete(peer);
Mohit Khanna47384bc2016-08-15 15:37:05 -07003088 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003089 "%s: peer %p peer->ref_cnt %d",
3090 __func__, peer, peer_ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003091}
3092
3093uint8_t
3094ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3095{
3096
3097 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07003098
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003099 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3100 if (peer)
3101 return peer->uapsd_mask;
3102 return 0;
3103}
3104
3105uint8_t
3106ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3107{
3108
3109 struct ol_txrx_peer_t *peer_t =
3110 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3111 if (peer_t != NULL)
3112 return peer_t->qos_capable;
3113 return 0;
3114}
3115
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003116int ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003117{
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003118 int rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003119 struct ol_txrx_vdev_t *vdev;
3120 struct ol_txrx_pdev_t *pdev;
3121 int i;
3122
3123 /* preconditions */
3124 TXRX_ASSERT2(peer);
3125
3126 vdev = peer->vdev;
3127 if (NULL == vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303128 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003129 "The vdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003130 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003131 }
3132
3133 pdev = vdev->pdev;
3134 if (NULL == pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303135 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003136 "The pdev is not present anymore\n");
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003137 return -EINVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003138 }
3139
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003140
3141 /*
3142 * Hold the lock all the way from checking if the peer ref count
3143 * is zero until the peer references are removed from the hash
3144 * table and vdev list (if the peer ref count is zero).
3145 * This protects against a new HL tx operation starting to use the
3146 * peer object just after this function concludes it's done being used.
3147 * Furthermore, the lock needs to be held while checking whether the
3148 * vdev's list of peers is empty, to make sure that list is not modified
3149 * concurrently with the empty check.
3150 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303151 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003152
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003153 /*
3154 * Check for the reference count before deleting the peer
3155 * as we noticed that sometimes we are re-entering this
3156 * function again which is leading to dead-lock.
3157 * (A double-free should never happen, so assert if it does.)
3158 */
3159 rc = qdf_atomic_read(&(peer->ref_cnt));
3160
3161 if (rc == 0) {
3162 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3163 ol_txrx_err("The Peer is not present anymore\n");
3164 qdf_assert(0);
3165 return -EACCES;
3166 }
3167 /*
3168 * now decrement rc; this will be the return code.
3169 * 0 : peer deleted
3170 * >0: peer ref removed, but still has other references
3171 * <0: sanity failed - no changes to the state of the peer
3172 */
3173 rc--;
3174
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07003175 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003176 u_int16_t peer_id;
3177
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003178 peer_id = peer->local_id;
3179 /* remove the reference to the peer from the hash table */
3180 ol_txrx_peer_find_hash_remove(pdev, peer);
3181
3182 /* remove the peer from its parent vdev's list */
3183 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3184
3185 /* cleanup the Rx reorder queues for this peer */
3186 ol_rx_peer_cleanup(vdev, peer);
3187
3188 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303189 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003190
3191 /*
3192 * Set wait_delete_comp event if the current peer id matches
3193 * with registered peer id.
3194 */
3195 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303196 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003197 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3198 }
3199
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003200 qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3201 qdf_timer_free(&peer->peer_unmap_timer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003202
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003203 /* check whether the parent vdev has no peers left */
3204 if (TAILQ_EMPTY(&vdev->peer_list)) {
3205 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003206 * Check if the parent vdev was waiting for its peers
3207 * to be deleted, in order for it to be deleted too.
3208 */
3209 if (vdev->delete.pending) {
3210 ol_txrx_vdev_delete_cb vdev_delete_cb =
3211 vdev->delete.callback;
3212 void *vdev_delete_context =
3213 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303214 /*
3215 * Now that there are no references to the peer,
3216 * we can release the peer reference lock.
3217 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303218 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303219
gbian016a42e2017-03-01 18:49:11 +08003220 /*
Yun Parkeaea8632017-04-09 09:53:45 -07003221 * The ol_tx_desc_free might access the invalid
3222 * content of vdev referred by tx desc, since
3223 * this vdev might be detached in another thread
3224 * asynchronous.
3225 *
3226 * Go through tx desc pool to set corresponding
3227 * tx desc's vdev to NULL when detach this vdev,
3228 * and add vdev checking in the ol_tx_desc_free
3229 * to avoid crash.
3230 */
gbian016a42e2017-03-01 18:49:11 +08003231 ol_txrx_tx_desc_reset_vdev(vdev);
Poddar, Siddarth14521792017-03-14 21:19:42 +05303232 ol_txrx_dbg(
Yun Parkeaea8632017-04-09 09:53:45 -07003233 "%s: deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
3234 __func__, vdev,
3235 vdev->mac_addr.raw[0],
3236 vdev->mac_addr.raw[1],
3237 vdev->mac_addr.raw[2],
3238 vdev->mac_addr.raw[3],
3239 vdev->mac_addr.raw[4],
3240 vdev->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003241 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303242 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003243 if (vdev_delete_cb)
3244 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303245 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303246 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003247 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303248 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303249 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303250 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003251
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -07003252 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3253 "%s: Deleting peer %p (%pM) ref_cnt = %d",
3254 __func__, peer, peer->mac_addr.raw,
3255 qdf_atomic_read(&peer->ref_cnt));
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303256 ol_txrx_peer_tx_queue_free(pdev, peer);
3257
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003258 /*
3259 * 'array' is allocated in addba handler and is supposed to be
3260 * freed in delba handler. There is the case (for example, in
3261 * SSR) where delba handler is not called. Because array points
3262 * to address of 'base' by default and is reallocated in addba
3263 * handler later, only free the memory when the array does not
3264 * point to base.
3265 */
3266 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
3267 if (peer->tids_rx_reorder[i].array !=
3268 &peer->tids_rx_reorder[i].base) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303269 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003270 "%s, delete reorder arr, tid:%d\n",
3271 __func__, i);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303272 qdf_mem_free(peer->tids_rx_reorder[i].array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003273 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
3274 (uint8_t) i);
3275 }
3276 }
3277
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303278 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003279 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303280 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Varun Reddy Yeturudd51e8d2017-05-14 14:51:13 -07003281 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003282 "%s: peer %p peer->ref_cnt = %d",
3283 __func__, peer, rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003284 }
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -08003285
3286 return rc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003287}
3288
Dhanashri Atre12a08392016-02-17 13:10:34 -08003289/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003290 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3291 * @peer: pointer to ol txrx peer structure
3292 *
3293 * Return: QDF Status
3294 */
3295static QDF_STATUS
3296ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3297{
3298 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3299 /* Drop pending Rx frames in CDS */
3300 if (sched_ctx)
3301 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3302
3303 /* Purge the cached rx frame queue */
3304 ol_txrx_flush_rx_frames(peer, 1);
3305
3306 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003307 peer->state = OL_TXRX_PEER_STATE_DISC;
3308 qdf_spin_unlock_bh(&peer->peer_info_lock);
3309
3310 return QDF_STATUS_SUCCESS;
3311}
3312
3313/**
3314 * ol_txrx_clear_peer() - clear peer
3315 * @sta_id: sta id
3316 *
3317 * Return: QDF Status
3318 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003319static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003320{
3321 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003322 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003323
3324 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303325 ol_txrx_err("%s: Unable to find pdev!",
Mohit Khanna0696eef2016-04-14 16:14:08 -07003326 __func__);
3327 return QDF_STATUS_E_FAILURE;
3328 }
3329
3330 if (sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303331 ol_txrx_err("Invalid sta id %d", sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003332 return QDF_STATUS_E_INVAL;
3333 }
3334
3335
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003336 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003337 if (!peer)
3338 return QDF_STATUS_E_FAULT;
3339
3340 return ol_txrx_clear_peer_internal(peer);
3341
3342}
3343
3344/**
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003345 * peer_unmap_timer() - peer unmap timer function
3346 * @data: peer object pointer
3347 *
3348 * Return: none
3349 */
3350void peer_unmap_timer_handler(void *data)
3351{
3352 ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
3353
3354 ol_txrx_err("all unmap events not received for peer %p, ref_cnt %d",
3355 peer, qdf_atomic_read(&peer->ref_cnt));
3356 ol_txrx_err("peer %p (%02x:%02x:%02x:%02x:%02x:%02x)",
3357 peer,
3358 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3359 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3360 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Deepak Dhamdhered42ab7c2017-04-13 19:32:16 -07003361 if (!cds_is_driver_recovering()) {
3362 /*
3363 * Add the equivalent of wma_peer_debug_dump() when available.
3364 */
3365 QDF_BUG(0);
3366 } else {
3367 ol_txrx_err("Recovery is in progress, ignore!");
3368 }
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003369}
3370
3371
3372/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003373 * ol_txrx_peer_detach() - Delete a peer's data object.
3374 * @peer - the object to detach
Dhanashri Atre12a08392016-02-17 13:10:34 -08003375 *
3376 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003377 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003378 * stored in the control peer object to the data peer
3379 * object (set up by a call to ol_peer_store()) is provided.
3380 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003381 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003382 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003383static void ol_txrx_peer_detach(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003384{
Leo Chang98726762016-10-28 11:07:18 -07003385 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003386 struct ol_txrx_vdev_t *vdev = peer->vdev;
3387
3388 /* redirect peer's rx delivery function to point to a discard func */
3389 peer->rx_opt_proc = ol_rx_discard;
3390
3391 peer->valid = 0;
3392
Mohit Khanna0696eef2016-04-14 16:14:08 -07003393 /* flush all rx packets before clearing up the peer local_id */
3394 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003395 ol_txrx_local_peer_id_free(peer->vdev->pdev, peer);
3396
3397 /* debug print to dump rx reorder state */
3398 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3399
Poddar, Siddarth14521792017-03-14 21:19:42 +05303400 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08003401 "%s:peer %p (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003402 __func__, peer,
3403 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3404 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3405 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003406
3407 if (peer->vdev->last_real_peer == peer)
3408 peer->vdev->last_real_peer = NULL;
3409
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303410 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003411 if (vdev->last_real_peer == peer)
3412 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303413 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003414 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3415
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303416 qdf_spinlock_destroy(&peer->peer_info_lock);
3417 qdf_spinlock_destroy(&peer->bufq_lock);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003418 /*
3419 * set delete_in_progress to identify that wma
3420 * is waiting for unmap massage for this peer
3421 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303422 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003423
Abhishek Singh217d9782017-04-28 23:49:11 +05303424 if (vdev->opmode == wlan_op_mode_sta) {
3425 qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3426 &peer->mac_addr,
3427 sizeof(union ol_txrx_align_mac_addr_t));
3428 }
3429
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003430 /*
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003431 * Create a timer to track unmap events when the sta peer gets deleted.
3432 */
3433 if (vdev->opmode == wlan_op_mode_sta) {
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003434 qdf_timer_start(&peer->peer_unmap_timer,
3435 OL_TXRX_PEER_UNMAP_TIMEOUT);
Kapil Gupta53d9b572017-06-28 17:53:25 +05303436 ol_txrx_info_high("%s: started peer_unmap_timer for peer %p",
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07003437 __func__, peer);
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08003438 }
3439
3440 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003441 * Remove the reference added during peer_attach.
3442 * The peer will still be left allocated until the
3443 * PEER_UNMAP message arrives to remove the other
3444 * reference, added by the PEER_MAP message.
3445 */
3446 ol_txrx_peer_unref_delete(peer);
3447}
3448
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003449/**
3450 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
3451 * @peer - the object to detach
3452 *
3453 * Detach a peer and force the peer object to be removed. It is called during
3454 * roaming scenario when the firmware has already deleted a peer.
3455 * Peer object is freed immediately to avoid duplicate peers during roam sync
3456 * indication processing.
3457 *
3458 * Return: None
3459 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003460static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003461{
Leo Chang98726762016-10-28 11:07:18 -07003462 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003463 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3464
Kapil Gupta53d9b572017-06-28 17:53:25 +05303465 ol_txrx_info_high("%s peer %p, peer->ref_cnt %d",
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003466 __func__, peer, qdf_atomic_read(&peer->ref_cnt));
3467
3468 /* Clear the peer_id_to_obj map entries */
3469 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
3470 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
3471 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
3472
3473 /*
3474 * Set ref_cnt = 1 so that ol_txrx_peer_unref_delete() called by
3475 * ol_txrx_peer_detach() will actually delete this peer entry properly.
3476 */
3477 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
3478 qdf_atomic_set(&peer->ref_cnt, 1);
3479 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3480
3481 ol_txrx_peer_detach(peer);
3482}
3483
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003484ol_txrx_peer_handle
3485ol_txrx_peer_find_by_addr(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac_addr)
3486{
3487 struct ol_txrx_peer_t *peer;
Yun Parkeaea8632017-04-09 09:53:45 -07003488
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003489 peer = ol_txrx_peer_find_hash_find(pdev, peer_mac_addr, 0, 0);
3490 if (peer) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05303491 ol_txrx_info_high(
Houston Hoffman43d47fa2016-02-24 16:34:30 -08003492 "%s: Delete extra reference %p", __func__, peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003493 /* release the extra reference */
3494 ol_txrx_peer_unref_delete(peer);
3495 }
3496 return peer;
3497}
3498
3499/**
3500 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3501 * @txrx_pdev: Pointer to txrx pdev
3502 *
3503 * Return: none
3504 */
3505static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3506{
3507 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003508 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003509
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303510 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3511 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3512 else
3513 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003514
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003515 num_free = ol_tx_get_total_free_desc(pdev);
3516
Kapil Gupta53d9b572017-06-28 17:53:25 +05303517 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303518 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003519 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003520
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003521}
3522
3523/**
3524 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3525 * @timeout: timeout in ms
3526 *
3527 * Wait for tx queue to be empty, return timeout error if
3528 * queue doesn't empty before timeout occurs.
3529 *
3530 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303531 * QDF_STATUS_SUCCESS if the queue empties,
3532 * QDF_STATUS_E_TIMEOUT in case of timeout,
3533 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003534 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003535static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003536{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003537 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003538
3539 if (txrx_pdev == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303540 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003541 "%s: txrx context is null", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303542 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003543 }
3544
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003545 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303546 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003547 if (timeout <= 0) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05303548 ol_txrx_err(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303549 "%s: tx frames are pending", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003550 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303551 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003552 }
3553 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3554 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303555 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003556}
3557
3558#ifndef QCA_WIFI_3_0_EMU
3559#define SUSPEND_DRAIN_WAIT 500
3560#else
3561#define SUSPEND_DRAIN_WAIT 3000
3562#endif
3563
Yue Ma1e11d792016-02-26 18:58:44 -08003564#ifdef FEATURE_RUNTIME_PM
3565/**
3566 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3567 * @txrx_pdev: TXRX pdev context
3568 *
3569 * TXRX is ready to runtime suspend if there are no pending packets
3570 * in the tx queue.
3571 *
3572 * Return: QDF_STATUS
3573 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003574static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003575{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003576 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003577
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003578 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003579 return QDF_STATUS_E_BUSY;
3580 else
3581 return QDF_STATUS_SUCCESS;
3582}
3583
3584/**
3585 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3586 * @txrx_pdev: TXRX pdev context
3587 *
3588 * This is a dummy function for symmetry.
3589 *
3590 * Return: QDF_STATUS_SUCCESS
3591 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003592static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003593{
3594 return QDF_STATUS_SUCCESS;
3595}
3596#endif
3597
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003598/**
3599 * ol_txrx_bus_suspend() - bus suspend
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003600 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003601 *
3602 * Ensure that ol_txrx is ready for bus suspend
3603 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303604 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003605 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003606static QDF_STATUS ol_txrx_bus_suspend(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003607{
3608 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3609}
3610
3611/**
3612 * ol_txrx_bus_resume() - bus resume
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003613 * @ppdev: TXRX pdev context
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003614 *
3615 * Dummy function for symetry
3616 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303617 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003618 */
Dustin Brown7ff24dd2017-05-10 15:49:59 -07003619static QDF_STATUS ol_txrx_bus_resume(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003620{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303621 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003622}
3623
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003624/**
3625 * ol_txrx_get_tx_pending - Get the number of pending transmit
3626 * frames that are awaiting completion.
3627 *
3628 * @pdev - the data physical device object
3629 * Mainly used in clean up path to make sure all buffers have been freed
3630 *
3631 * Return: count of pending frames
3632 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003633int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003634{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003635 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003636 uint32_t total;
3637
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303638 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3639 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3640 else
3641 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003642
Nirav Shah55b45a02016-01-21 10:00:16 +05303643 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003644}
3645
3646void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3647{
3648 ol_tx_desc_list tx_descs;
Yun Parkeaea8632017-04-09 09:53:45 -07003649 /*
3650 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303651 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Yun Parkeaea8632017-04-09 09:53:45 -07003652 * which is tha same with normal data send complete path
3653 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003654 htt_tx_pending_discard(pdev_handle->htt_pdev);
3655
3656 TAILQ_INIT(&tx_descs);
3657 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3658 /* Discard Frames in Discard List */
3659 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3660
3661 ol_tx_discard_target_frms(pdev_handle);
3662}
3663
3664/*--- debug features --------------------------------------------------------*/
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003665struct ol_txrx_stats_req_internal {
3666 struct ol_txrx_stats_req base;
3667 int serviced; /* state of this request */
3668 int offset;
3669};
3670
3671static inline
3672uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3673{
3674 return (uint64_t) ((size_t) req);
3675}
3676
3677static inline
3678struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3679{
3680 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3681}
3682
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003683#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003684void
3685ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3686 uint8_t cfg_stats_type, uint32_t cfg_val)
3687{
3688 uint64_t dummy_cookie = 0;
Yun Parkeaea8632017-04-09 09:53:45 -07003689
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003690 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3691 0 /* reset mask */,
3692 cfg_stats_type, cfg_val, dummy_cookie);
3693}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003694#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003695
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003696static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003697ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07003698 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003699{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003700 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003701 struct ol_txrx_pdev_t *pdev = vdev->pdev;
3702 uint64_t cookie;
3703 struct ol_txrx_stats_req_internal *non_volatile_req;
3704
3705 if (!pdev ||
3706 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
3707 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
3708 return A_ERROR;
3709 }
3710
3711 /*
3712 * Allocate a non-transient stats request object.
3713 * (The one provided as an argument is likely allocated on the stack.)
3714 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303715 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003716 if (!non_volatile_req)
3717 return A_NO_MEMORY;
3718
3719 /* copy the caller's specifications */
3720 non_volatile_req->base = *req;
3721 non_volatile_req->serviced = 0;
3722 non_volatile_req->offset = 0;
3723
3724 /* use the non-volatile request object's address as the cookie */
3725 cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
3726
3727 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
3728 req->stats_type_upload_mask,
3729 req->stats_type_reset_mask,
3730 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
3731 cookie)) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303732 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003733 return A_ERROR;
3734 }
3735
3736 if (req->wait.blocking)
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303737 while (qdf_semaphore_acquire(req->wait.sem_ptr))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003738 ;
3739
Nirav Shahd2310422016-01-21 18:58:06 +05303740 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303741 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05303742
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003743 return A_OK;
3744}
Dhanashri Atre12a08392016-02-17 13:10:34 -08003745
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003746void
3747ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
3748 uint64_t cookie, uint8_t *stats_info_list)
3749{
3750 enum htt_dbg_stats_type type;
3751 enum htt_dbg_stats_status status;
3752 int length;
3753 uint8_t *stats_data;
3754 struct ol_txrx_stats_req_internal *req;
3755 int more = 0;
3756
3757 req = ol_txrx_u64_to_stats_ptr(cookie);
3758
3759 do {
3760 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
3761 &length, &stats_data);
3762 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
3763 break;
3764 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
3765 status == HTT_DBG_STATS_STATUS_PARTIAL) {
3766 uint8_t *buf;
3767 int bytes = 0;
3768
3769 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
3770 more = 1;
3771 if (req->base.print.verbose || req->base.print.concise)
3772 /* provide the header along with the data */
3773 htt_t2h_stats_print(stats_info_list,
3774 req->base.print.concise);
3775
3776 switch (type) {
3777 case HTT_DBG_STATS_WAL_PDEV_TXRX:
3778 bytes = sizeof(struct wlan_dbg_stats);
3779 if (req->base.copy.buf) {
3780 int lmt;
3781
3782 lmt = sizeof(struct wlan_dbg_stats);
3783 if (req->base.copy.byte_limit < lmt)
3784 lmt = req->base.copy.byte_limit;
3785 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303786 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003787 }
3788 break;
3789 case HTT_DBG_STATS_RX_REORDER:
3790 bytes = sizeof(struct rx_reorder_stats);
3791 if (req->base.copy.buf) {
3792 int lmt;
3793
3794 lmt = sizeof(struct rx_reorder_stats);
3795 if (req->base.copy.byte_limit < lmt)
3796 lmt = req->base.copy.byte_limit;
3797 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303798 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003799 }
3800 break;
3801 case HTT_DBG_STATS_RX_RATE_INFO:
3802 bytes = sizeof(wlan_dbg_rx_rate_info_t);
3803 if (req->base.copy.buf) {
3804 int lmt;
3805
3806 lmt = sizeof(wlan_dbg_rx_rate_info_t);
3807 if (req->base.copy.byte_limit < lmt)
3808 lmt = req->base.copy.byte_limit;
3809 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303810 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003811 }
3812 break;
3813
3814 case HTT_DBG_STATS_TX_RATE_INFO:
3815 bytes = sizeof(wlan_dbg_tx_rate_info_t);
3816 if (req->base.copy.buf) {
3817 int lmt;
3818
3819 lmt = sizeof(wlan_dbg_tx_rate_info_t);
3820 if (req->base.copy.byte_limit < lmt)
3821 lmt = req->base.copy.byte_limit;
3822 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303823 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003824 }
3825 break;
3826
3827 case HTT_DBG_STATS_TX_PPDU_LOG:
3828 bytes = 0;
3829 /* TO DO: specify how many bytes are present */
3830 /* TO DO: add copying to the requestor's buf */
3831
3832 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003833 bytes = sizeof(struct
3834 rx_remote_buffer_mgmt_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003835 if (req->base.copy.buf) {
3836 int limit;
3837
Yun Parkeaea8632017-04-09 09:53:45 -07003838 limit = sizeof(struct
3839 rx_remote_buffer_mgmt_stats);
3840 if (req->base.copy.byte_limit < limit)
3841 limit = req->base.copy.
3842 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003843 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303844 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003845 }
3846 break;
3847
3848 case HTT_DBG_STATS_TXBF_INFO:
3849 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
3850 if (req->base.copy.buf) {
3851 int limit;
3852
Yun Parkeaea8632017-04-09 09:53:45 -07003853 limit = sizeof(struct
3854 wlan_dbg_txbf_data_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003855 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003856 limit = req->base.copy.
3857 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003858 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303859 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003860 }
3861 break;
3862
3863 case HTT_DBG_STATS_SND_INFO:
3864 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
3865 if (req->base.copy.buf) {
3866 int limit;
3867
Yun Parkeaea8632017-04-09 09:53:45 -07003868 limit = sizeof(struct
3869 wlan_dbg_txbf_snd_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003870 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003871 limit = req->base.copy.
3872 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003873 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303874 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003875 }
3876 break;
3877
3878 case HTT_DBG_STATS_TX_SELFGEN_INFO:
Yun Parkeaea8632017-04-09 09:53:45 -07003879 bytes = sizeof(struct
3880 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003881 if (req->base.copy.buf) {
3882 int limit;
3883
Yun Parkeaea8632017-04-09 09:53:45 -07003884 limit = sizeof(struct
3885 wlan_dbg_tx_selfgen_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003886 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003887 limit = req->base.copy.
3888 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003889 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303890 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003891 }
3892 break;
3893
3894 case HTT_DBG_STATS_ERROR_INFO:
3895 bytes =
3896 sizeof(struct wlan_dbg_wifi2_error_stats);
3897 if (req->base.copy.buf) {
3898 int limit;
3899
Yun Parkeaea8632017-04-09 09:53:45 -07003900 limit = sizeof(struct
3901 wlan_dbg_wifi2_error_stats);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003902 if (req->base.copy.byte_limit < limit)
Yun Parkeaea8632017-04-09 09:53:45 -07003903 limit = req->base.copy.
3904 byte_limit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003905 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303906 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003907 }
3908 break;
3909
3910 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
3911 bytes =
3912 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
3913 if (req->base.copy.buf) {
3914 int limit;
3915
3916 limit = sizeof(struct
3917 rx_txbf_musu_ndpa_pkts_stats);
3918 if (req->base.copy.byte_limit < limit)
3919 limit =
3920 req->base.copy.byte_limit;
3921 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303922 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003923 }
3924 break;
3925
3926 default:
3927 break;
3928 }
Yun Parkeaea8632017-04-09 09:53:45 -07003929 buf = req->base.copy.buf ?
3930 req->base.copy.buf : stats_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003931 if (req->base.callback.fp)
3932 req->base.callback.fp(req->base.callback.ctxt,
3933 type, buf, bytes);
3934 }
3935 stats_info_list += length;
3936 } while (1);
3937
3938 if (!more) {
3939 if (req->base.wait.blocking)
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303940 qdf_semaphore_release(req->base.wait.sem_ptr);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303941 qdf_mem_free(req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003942 }
3943}
3944
3945#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
3946int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
3947{
3948 if (debug_specs & TXRX_DBG_MASK_OBJS) {
3949#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3950 ol_txrx_pdev_display(vdev->pdev, 0);
3951#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303952 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303953 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003954#endif
3955 }
Yun Parkeaea8632017-04-09 09:53:45 -07003956 if (debug_specs & TXRX_DBG_MASK_STATS)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003957 ol_txrx_stats_display(vdev->pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003958 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
3959#if defined(ENABLE_TXRX_PROT_ANALYZE)
3960 ol_txrx_prot_ans_display(vdev->pdev);
3961#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303962 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303963 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003964#endif
3965 }
3966 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
3967#if defined(ENABLE_RX_REORDER_TRACE)
3968 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
3969#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303970 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303971 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003972#endif
3973
3974 }
3975 return 0;
3976}
3977#endif
3978
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003979#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003980int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
3981 int max_subfrms_ampdu, int max_subfrms_amsdu)
3982{
3983 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
3984 max_subfrms_ampdu, max_subfrms_amsdu);
3985}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003986#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003987
3988#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3989void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
3990{
3991 struct ol_txrx_vdev_t *vdev;
3992
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303993 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003994 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303995 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003996 "%*spdev object: %p", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303997 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003998 "%*svdev list:", indent + 4, " ");
3999 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304000 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004001 }
4002 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304003 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004004 "%*stx desc pool: %d elems @ %p", indent + 4, " ",
4005 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304006 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004007 htt_display(pdev->htt_pdev, indent);
4008}
4009
4010void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4011{
4012 struct ol_txrx_peer_t *peer;
4013
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304014 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004015 "%*stxrx vdev: %p\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304016 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004017 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304018 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004019 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4020 indent + 4, " ",
4021 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4022 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4023 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304024 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004025 "%*speer list:", indent + 4, " ");
4026 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304027 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004028 }
4029}
4030
4031void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4032{
4033 int i;
4034
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304035 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004036 "%*stxrx peer: %p", indent, " ", peer);
4037 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4038 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304039 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004040 "%*sID: %d", indent + 4, " ",
4041 peer->peer_ids[i]);
4042 }
4043 }
4044}
4045#endif /* TXRX_DEBUG_LEVEL */
4046
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004047/**
4048 * ol_txrx_stats() - update ol layer stats
4049 * @vdev_id: vdev_id
4050 * @buffer: pointer to buffer
4051 * @buf_len: length of the buffer
4052 *
4053 * Return: length of string
4054 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004055static int
Yun Parkeaea8632017-04-09 09:53:45 -07004056ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004057{
4058 uint32_t len = 0;
4059
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004060 struct ol_txrx_vdev_t *vdev =
4061 (struct ol_txrx_vdev_t *)
4062 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004063
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004064 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304065 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304066 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004067 snprintf(buffer, buf_len, "vdev not found");
4068 return len;
4069 }
4070
4071 len = scnprintf(buffer, buf_len,
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004072 "\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304073 ((vdev->ll_pause.is_q_paused == false) ?
4074 "UNPAUSED" : "PAUSED"),
4075 vdev->ll_pause.q_pause_cnt,
4076 vdev->ll_pause.q_unpause_cnt,
4077 vdev->ll_pause.q_overflow_cnt,
4078 ((vdev->ll_pause.is_q_timer_on == false)
4079 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004080 return len;
4081}
4082
4083void ol_txrx_stats_display(ol_txrx_pdev_handle pdev)
4084{
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304085 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304086 "TX PATH Statistics:");
4087 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05304088 "sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4089 pdev->stats.pub.tx.from_stack.pkts,
4090 pdev->stats.pub.tx.from_stack.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004091 pdev->stats.pub.tx.dropped.host_reject.pkts,
4092 pdev->stats.pub.tx.dropped.host_reject.bytes,
4093 pdev->stats.pub.tx.dropped.download_fail.pkts
4094 + pdev->stats.pub.tx.dropped.target_discard.pkts
4095 + pdev->stats.pub.tx.dropped.no_ack.pkts,
4096 pdev->stats.pub.tx.dropped.download_fail.bytes
4097 + pdev->stats.pub.tx.dropped.target_discard.bytes
4098 + pdev->stats.pub.tx.dropped.no_ack.bytes);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304099 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Yun Parkeaea8632017-04-09 09:53:45 -07004100 "successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B)",
Nirav Shahda008342016-05-17 18:50:40 +05304101 pdev->stats.pub.tx.delivered.pkts,
4102 pdev->stats.pub.tx.delivered.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004103 pdev->stats.pub.tx.dropped.download_fail.pkts,
4104 pdev->stats.pub.tx.dropped.download_fail.bytes,
4105 pdev->stats.pub.tx.dropped.target_discard.pkts,
4106 pdev->stats.pub.tx.dropped.target_discard.bytes,
4107 pdev->stats.pub.tx.dropped.no_ack.pkts,
4108 pdev->stats.pub.tx.dropped.no_ack.bytes);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304109 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05304110 "Tx completions per HTT message:\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004111 "Single Packet %d\n"
4112 " 2-10 Packets %d\n"
4113 "11-20 Packets %d\n"
4114 "21-30 Packets %d\n"
4115 "31-40 Packets %d\n"
4116 "41-50 Packets %d\n"
4117 "51-60 Packets %d\n"
4118 " 60+ Packets %d\n",
4119 pdev->stats.pub.tx.comp_histogram.pkts_1,
4120 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4121 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4122 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4123 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4124 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4125 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4126 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05304127
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304128 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304129 "RX PATH Statistics:");
4130 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4131 "%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
Nirav Shahda008342016-05-17 18:50:40 +05304132 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4133 "msdus with frag_ind: %d msdus with offload_ind: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004134 pdev->stats.priv.rx.normal.ppdus,
4135 pdev->stats.priv.rx.normal.mpdus,
4136 pdev->stats.pub.rx.delivered.pkts,
4137 pdev->stats.pub.rx.delivered.bytes,
Nirav Shah6a4eee62016-04-25 10:15:04 +05304138 pdev->stats.pub.rx.dropped_err.pkts,
4139 pdev->stats.pub.rx.dropped_err.bytes,
4140 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4141 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4142 pdev->stats.pub.rx.dropped_mic_err.pkts,
Nirav Shahda008342016-05-17 18:50:40 +05304143 pdev->stats.pub.rx.dropped_mic_err.bytes,
4144 pdev->stats.pub.rx.msdus_with_frag_ind,
4145 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004146
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304147 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004148 " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4149 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4150 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4151 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304152
4153 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05304154 "Rx packets per HTT message:\n"
Nirav Shah6a4eee62016-04-25 10:15:04 +05304155 "Single Packet %d\n"
4156 " 2-10 Packets %d\n"
4157 "11-20 Packets %d\n"
4158 "21-30 Packets %d\n"
4159 "31-40 Packets %d\n"
4160 "41-50 Packets %d\n"
4161 "51-60 Packets %d\n"
4162 " 60+ Packets %d\n",
4163 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4164 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4165 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4166 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4167 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4168 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4169 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4170 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004171}
4172
4173void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4174{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304175 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004176}
4177
4178#if defined(ENABLE_TXRX_PROT_ANALYZE)
4179
4180void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4181{
4182 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4183 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4184}
4185
4186#endif /* ENABLE_TXRX_PROT_ANALYZE */
4187
4188#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4189int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4190{
4191 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4192 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4193}
4194#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4195
4196#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4197A_STATUS
4198ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4199 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4200{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304201 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304202 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304203 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304204 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004205 return A_OK;
4206}
4207#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4208
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004209static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004210{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004211 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004212 if (NULL == vdev)
4213 return;
4214
4215 vdev->disable_intrabss_fwd = val;
4216}
4217
Nirav Shahc657ef52016-07-26 14:22:38 +05304218/**
4219 * ol_txrx_update_mac_id() - update mac_id for vdev
4220 * @vdev_id: vdev id
4221 * @mac_id: mac id
4222 *
4223 * Return: none
4224 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004225static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304226{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004227 struct ol_txrx_vdev_t *vdev =
4228 (struct ol_txrx_vdev_t *)
4229 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304230
4231 if (NULL == vdev) {
4232 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4233 "%s: Invalid vdev_id %d", __func__, vdev_id);
4234 return;
4235 }
4236 vdev->mac_id = mac_id;
4237}
4238
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004239#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
4240
4241/**
4242 * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
4243 * @sta_id: sta_id
4244 *
4245 * Return: vdev handle
4246 * NULL if not found.
4247 */
4248static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
4249{
4250 struct ol_txrx_peer_t *peer = NULL;
4251 ol_txrx_pdev_handle pdev = NULL;
4252
4253 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304254 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304255 "Invalid sta id passed");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004256 return NULL;
4257 }
4258
Anurag Chouhan6d760662016-02-20 16:05:43 +05304259 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004260 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304261 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304262 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004263 return NULL;
4264 }
4265
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004266 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004267
4268 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304269 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304270 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004271 return NULL;
4272 }
4273
4274 return peer->vdev;
4275}
4276
4277/**
4278 * ol_txrx_register_tx_flow_control() - register tx flow control callback
4279 * @vdev_id: vdev_id
4280 * @flowControl: flow control callback
4281 * @osif_fc_ctx: callback context
4282 *
4283 * Return: 0 for sucess or error code
4284 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004285static int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304286 ol_txrx_tx_flow_control_fp flowControl,
4287 void *osif_fc_ctx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004288{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004289 struct ol_txrx_vdev_t *vdev =
4290 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004291
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004292 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304293 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304294 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004295 return -EINVAL;
4296 }
4297
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304298 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004299 vdev->osif_flow_control_cb = flowControl;
4300 vdev->osif_fc_ctx = osif_fc_ctx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304301 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004302 return 0;
4303}
4304
4305/**
Yun Parkeaea8632017-04-09 09:53:45 -07004306 * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
4307 * callback
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004308 * @vdev_id: vdev_id
4309 *
4310 * Return: 0 for success or error code
4311 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004312static int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004313{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004314 struct ol_txrx_vdev_t *vdev =
4315 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004316
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004317 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304318 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304319 "%s: Invalid vdev_id", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004320 return -EINVAL;
4321 }
4322
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304323 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004324 vdev->osif_flow_control_cb = NULL;
4325 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304326 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004327 return 0;
4328}
4329
4330/**
4331 * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
4332 * @sta_id: sta id
4333 * @low_watermark: low watermark
4334 * @high_watermark_offset: high watermark offset value
4335 *
4336 * Return: true/false
4337 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004338static bool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004339ol_txrx_get_tx_resource(uint8_t sta_id,
4340 unsigned int low_watermark,
4341 unsigned int high_watermark_offset)
4342{
4343 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004344
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004345 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304346 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304347 "%s: Invalid sta_id %d", __func__, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004348 /* Return true so caller do not understand that resource
4349 * is less than low_watermark.
4350 * sta_id validation will be done in ol_tx_send_data_frame
4351 * and if sta_id is not registered then host will drop
4352 * packet.
4353 */
4354 return true;
4355 }
4356
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304357 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304358
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004359 if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
4360 vdev->tx_fl_lwm = (uint16_t) low_watermark;
4361 vdev->tx_fl_hwm =
4362 (uint16_t) (low_watermark + high_watermark_offset);
4363 /* Not enough free resource, stop TX OS Q */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05304364 qdf_atomic_set(&vdev->os_q_paused, 1);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304365 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004366 return false;
4367 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304368 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004369 return true;
4370}
4371
4372/**
4373 * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
4374 * @vdev_id: vdev id
4375 * @pause_q_depth: pause queue depth
4376 *
4377 * Return: 0 for success or error code
4378 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004379static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004380ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
4381{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004382 struct ol_txrx_vdev_t *vdev =
4383 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Yun Parkeaea8632017-04-09 09:53:45 -07004384
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004385 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304386 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304387 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004388 return -EINVAL;
4389 }
4390
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304391 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004392 vdev->ll_pause.max_q_depth = pause_q_depth;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304393 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004394
4395 return 0;
4396}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004397#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
4398
4399#ifdef IPA_OFFLOAD
Leo Chang8e073612015-11-13 10:55:34 -08004400/**
4401 * ol_txrx_ipa_uc_get_resource() - Client request resource information
4402 * @pdev: handle to the HTT instance
4403 * @ce_sr_base_paddr: copy engine source ring base physical address
4404 * @ce_sr_ring_size: copy engine source ring size
4405 * @ce_reg_paddr: copy engine register physical address
4406 * @tx_comp_ring_base_paddr: tx comp ring base physical address
4407 * @tx_comp_ring_size: tx comp ring size
4408 * @tx_num_alloc_buffer: number of allocated tx buffer
4409 * @rx_rdy_ring_base_paddr: rx ready ring base physical address
4410 * @rx_rdy_ring_size: rx ready ring size
4411 * @rx_proc_done_idx_paddr: rx process done index physical address
4412 * @rx_proc_done_idx_vaddr: rx process done index virtual address
4413 * @rx2_rdy_ring_base_paddr: rx done ring base physical address
4414 * @rx2_rdy_ring_size: rx done ring size
4415 * @rx2_proc_done_idx_paddr: rx done index physical address
4416 * @rx2_proc_done_idx_vaddr: rx done index virtual address
4417 *
4418 * OL client will reuqest IPA UC related resource information
4419 * Resource information will be distributted to IPA module
4420 * All of the required resources should be pre-allocated
4421 *
4422 * Return: none
4423 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004424static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004425ol_txrx_ipa_uc_get_resource(struct cdp_pdev *ppdev,
Leo Chang98726762016-10-28 11:07:18 -07004426 struct ol_txrx_ipa_resources *ipa_res)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004427{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004428 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07004429
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004430 htt_ipa_uc_get_resource(pdev->htt_pdev,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004431 &ipa_res->ce_sr_base_paddr,
4432 &ipa_res->ce_sr_ring_size,
4433 &ipa_res->ce_reg_paddr,
4434 &ipa_res->tx_comp_ring_base_paddr,
4435 &ipa_res->tx_comp_ring_size,
4436 &ipa_res->tx_num_alloc_buffer,
4437 &ipa_res->rx_rdy_ring_base_paddr,
4438 &ipa_res->rx_rdy_ring_size,
4439 &ipa_res->rx_proc_done_idx_paddr,
4440 &ipa_res->rx_proc_done_idx_vaddr,
4441 &ipa_res->rx2_rdy_ring_base_paddr,
4442 &ipa_res->rx2_rdy_ring_size,
4443 &ipa_res->rx2_proc_done_idx_paddr,
4444 &ipa_res->rx2_proc_done_idx_vaddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004445}
4446
Leo Chang8e073612015-11-13 10:55:34 -08004447/**
4448 * ol_txrx_ipa_uc_set_doorbell_paddr() - Client set IPA UC doorbell register
4449 * @pdev: handle to the HTT instance
4450 * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
4451 * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
4452 *
4453 * IPA UC let know doorbell register physical address
4454 * WLAN firmware will use this physical address to notify IPA UC
4455 *
4456 * Return: none
4457 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004458static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004459ol_txrx_ipa_uc_set_doorbell_paddr(struct cdp_pdev *ppdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05304460 qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
4461 qdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004462{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004463 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004464 htt_ipa_uc_set_doorbell_paddr(pdev->htt_pdev,
4465 ipa_tx_uc_doorbell_paddr,
4466 ipa_rx_uc_doorbell_paddr);
4467}
4468
Leo Chang8e073612015-11-13 10:55:34 -08004469/**
4470 * ol_txrx_ipa_uc_set_active() - Client notify IPA UC data path active or not
4471 * @pdev: handle to the HTT instance
4472 * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
4473 * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
4474 *
4475 * IPA UC let know doorbell register physical address
4476 * WLAN firmware will use this physical address to notify IPA UC
4477 *
4478 * Return: none
4479 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004480static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004481ol_txrx_ipa_uc_set_active(struct cdp_pdev *ppdev, bool uc_active, bool is_tx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004482{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004483 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004484 htt_h2t_ipa_uc_set_active(pdev->htt_pdev, uc_active, is_tx);
4485}
4486
4487/**
Leo Chang8e073612015-11-13 10:55:34 -08004488 * ol_txrx_ipa_uc_op_response() - Handle OP command response from firmware
4489 * @pdev: handle to the HTT instance
4490 * @op_msg: op response message from firmware
4491 *
4492 * Return: none
4493 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004494static void ol_txrx_ipa_uc_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg)
Govind Singh66615292015-12-28 23:07:54 +05304495{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004496 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Govind Singh66615292015-12-28 23:07:54 +05304497 if (pdev->ipa_uc_op_cb) {
4498 pdev->ipa_uc_op_cb(op_msg, pdev->osif_dev);
4499 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304500 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Govind Singh66615292015-12-28 23:07:54 +05304501 "%s: IPA callback function is not registered", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304502 qdf_mem_free(op_msg);
Govind Singh66615292015-12-28 23:07:54 +05304503 return;
4504 }
4505}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004506
Leo Chang8e073612015-11-13 10:55:34 -08004507/**
4508 * ol_txrx_ipa_uc_register_op_cb() - Register OP handler function
4509 * @pdev: handle to the HTT instance
4510 * @op_cb: handler function pointer
4511 * @osif_dev: register client context
4512 *
4513 * Return: none
4514 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004515static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004516ol_txrx_ipa_uc_register_op_cb(struct cdp_pdev *ppdev,
4517 ipa_uc_op_cb_type op_cb, void *osif_dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004518{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004519 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004520 pdev->ipa_uc_op_cb = op_cb;
4521 pdev->osif_dev = osif_dev;
4522}
4523
Leo Chang8e073612015-11-13 10:55:34 -08004524/**
4525 * ol_txrx_ipa_uc_get_stat() - Get firmware wdi status
4526 * @pdev: handle to the HTT instance
4527 *
4528 * Return: none
4529 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004530static void ol_txrx_ipa_uc_get_stat(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004531{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004532 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004533 htt_h2t_ipa_uc_get_stats(pdev->htt_pdev);
4534}
Yun Park637d6482016-10-05 10:51:33 -07004535
4536static void ol_txrx_ipa_uc_get_share_stats(struct cdp_pdev *ppdev,
4537 uint8_t reset_stats)
4538{
4539 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
4540 htt_h2t_ipa_uc_get_share_stats(pdev->htt_pdev, reset_stats);
4541}
4542
4543static void ol_txrx_ipa_uc_set_quota(struct cdp_pdev *ppdev,
4544 uint64_t quota_bytes)
4545{
4546 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
4547 htt_h2t_ipa_uc_set_quota(pdev->htt_pdev, quota_bytes);
4548}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004549#endif /* IPA_UC_OFFLOAD */
4550
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004551/*
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004552 * ol_txrx_display_stats() - Display OL TXRX display stats
4553 * @value: Module id for which stats needs to be displayed
Nirav Shahda008342016-05-17 18:50:40 +05304554 *
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004555 * Return: status
Nirav Shahda008342016-05-17 18:50:40 +05304556 */
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004557static QDF_STATUS ol_txrx_display_stats(void *soc, uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004558{
4559 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004560 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004561
Anurag Chouhan6d760662016-02-20 16:05:43 +05304562 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004563 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304564 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304565 "%s: pdev is NULL", __func__);
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004566 return QDF_STATUS_E_NULL_VALUE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004567 }
4568
4569 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004570 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004571 ol_txrx_stats_display(pdev);
4572 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004573 case CDP_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004574 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004575 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004576 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004577 ol_tx_dump_flow_pool_info();
4578 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004579 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304580 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004581 break;
Orhan K AKYILDIZfdd74de2016-12-15 12:08:04 -08004582 case CDP_WLAN_RX_BUF_DEBUG_STATS:
4583 htt_display_rx_buf_debug(pdev->htt_pdev);
4584 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304585#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004586 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304587 ol_tx_sched_cur_state_display(pdev);
4588 ol_tx_sched_stats_display(pdev);
4589 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004590 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304591 ol_tx_queue_log_display(pdev);
4592 break;
4593#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004594 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304595 ol_tx_dump_group_credit_stats(pdev);
4596 break;
4597#endif
4598
4599#ifdef DEBUG_HL_LOGGING
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004600 case CDP__BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304601 htt_dump_bundle_stats(pdev->htt_pdev);
4602 break;
4603#endif
4604#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004605 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004606 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004607 break;
4608 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004609 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004610}
4611
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004612/**
4613 * ol_txrx_clear_stats() - Clear OL TXRX stats
4614 * @value: Module id for which stats needs to be cleared
4615 *
4616 * Return: None
4617 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004618static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004619{
4620 ol_txrx_pdev_handle pdev;
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004621 QDF_STATUS status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004622
Anurag Chouhan6d760662016-02-20 16:05:43 +05304623 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004624 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304625 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304626 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004627 return;
4628 }
4629
4630 switch (value) {
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004631 case CDP_TXRX_PATH_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004632 ol_txrx_stats_clear(pdev);
4633 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004634 case CDP_DUMP_TX_FLOW_POOL_INFO:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004635 ol_tx_clear_flow_pool_stats();
4636 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004637 case CDP_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304638 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004639 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304640#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004641 case CDP_SCHEDULER_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304642 ol_tx_sched_stats_clear(pdev);
4643 break;
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004644 case CDP_TX_QUEUE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304645 ol_tx_queue_log_clear(pdev);
4646 break;
4647#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004648 case CDP_CREDIT_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304649 ol_tx_clear_group_credit_stats(pdev);
4650 break;
4651#endif
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07004652 case CDP_BUNDLE_STATS:
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304653 htt_clear_bundle_stats(pdev->htt_pdev);
4654 break;
4655#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004656 default:
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004657 status = QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004658 break;
4659 }
Mohit Khanna3e2115b2016-10-11 13:18:29 -07004660
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004661}
4662
4663/**
4664 * ol_rx_data_cb() - data rx callback
4665 * @peer: peer
4666 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304667 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004668 *
4669 * Return: None
4670 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304671static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
4672 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004673{
Nirav Shah36a87bf2016-02-22 12:38:46 +05304674 void *cds_ctx = cds_get_global_context();
Mohit Khanna0696eef2016-04-14 16:14:08 -07004675 void *osif_dev;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304676 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304677 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004678 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304679 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004680
Nirav Shah36a87bf2016-02-22 12:38:46 +05304681 if (qdf_unlikely(!cds_ctx) || qdf_unlikely(!pdev))
4682 goto free_buf;
4683
4684 /* Do not use peer directly. Derive peer from staid to
4685 * make sure that peer is valid.
4686 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004687 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, staid);
Nirav Shah36a87bf2016-02-22 12:38:46 +05304688 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004689 goto free_buf;
4690
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304691 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07004692 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
4693 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304694 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004695 goto free_buf;
4696 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08004697
4698 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07004699 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304700 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004701
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304702 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004703 if (!list_empty(&peer->cached_bufq)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304704 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004705 /* Flush the cached frames to HDD before passing new rx frame */
4706 ol_txrx_flush_rx_frames(peer, 0);
4707 } else
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304708 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004709
4710 buf = buf_list;
4711 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304712 next_buf = qdf_nbuf_queue_next(buf);
4713 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07004714 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304715 if (ret != QDF_STATUS_SUCCESS) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304716 ol_txrx_err("Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05304717 if (pdev)
4718 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304719 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004720 }
4721 buf = next_buf;
4722 }
4723 return;
4724
4725free_buf:
Poddar, Siddarth14521792017-03-14 21:19:42 +05304726 ol_txrx_warn("%s:Dropping frames", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004727 buf = buf_list;
4728 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304729 next_buf = qdf_nbuf_queue_next(buf);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304730 if (pdev)
4731 TXRX_STATS_MSDU_INCR(pdev,
4732 rx.dropped_peer_invalid, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304733 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004734 buf = next_buf;
4735 }
4736}
4737
4738/**
4739 * ol_rx_data_process() - process rx frame
4740 * @peer: peer
4741 * @rx_buf_list: rx buffer list
4742 *
4743 * Return: None
4744 */
4745void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05304746 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004747{
Yun Parkeaea8632017-04-09 09:53:45 -07004748 /*
4749 * Firmware data path active response will use shim RX thread
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004750 * T2H MSG running on SIRQ context,
Yun Parkeaea8632017-04-09 09:53:45 -07004751 * IPA kernel module API should not be called on SIRQ CTXT
4752 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05304753 qdf_nbuf_t buf, next_buf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004754 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304755 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004756
4757 if ((!peer) || (!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304758 ol_txrx_err("peer/pdev is NULL");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004759 goto drop_rx_buf;
4760 }
4761
Dhanashri Atre182b0272016-02-17 15:35:07 -08004762 qdf_assert(peer->vdev);
4763
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304764 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004765 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08004766 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304767 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004768
4769 /*
4770 * If there is a data frame from peer before the peer is
4771 * registered for data service, enqueue them on to pending queue
4772 * which will be flushed to HDD once that station is registered.
4773 */
4774 if (!data_rx) {
4775 struct ol_rx_cached_buf *cache_buf;
Manjunathappa Prakash92db7712016-05-27 00:19:34 -07004776
Kapil Gupta53d9b572017-06-28 17:53:25 +05304777 ol_txrx_info_high(
Manjunathappa Prakash92db7712016-05-27 00:19:34 -07004778 "Data on the peer before it is registered!!!");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004779 buf = rx_buf_list;
4780 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304781 next_buf = qdf_nbuf_queue_next(buf);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304782 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004783 if (!cache_buf) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304784 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004785 "Failed to allocate buf to cache the rx frames");
Nirav Shahcbc6d722016-03-01 16:24:53 +05304786 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004787 } else {
4788 /* Add NULL terminator */
Nirav Shahcbc6d722016-03-01 16:24:53 +05304789 qdf_nbuf_set_next(buf, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004790 cache_buf->buf = buf;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304791 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004792 list_add_tail(&cache_buf->list,
4793 &peer->cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304794 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004795 }
4796 buf = next_buf;
4797 }
4798 } else {
4799#ifdef QCA_CONFIG_SMP
4800 /*
4801 * If the kernel is SMP, schedule rx thread to
4802 * better use multicores.
4803 */
4804 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05304805 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004806 } else {
4807 p_cds_sched_context sched_ctx =
4808 get_cds_sched_ctxt();
4809 struct cds_ol_rx_pkt *pkt;
4810
4811 if (unlikely(!sched_ctx))
4812 goto drop_rx_buf;
4813
4814 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
4815 if (!pkt) {
Kapil Gupta53d9b572017-06-28 17:53:25 +05304816 ol_txrx_info_high(
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304817 "No available Rx message buffer");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004818 goto drop_rx_buf;
4819 }
4820 pkt->callback = (cds_ol_rx_thread_cb)
4821 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304822 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004823 pkt->Rxpkt = (void *)rx_buf_list;
4824 pkt->staId = peer->local_id;
4825 cds_indicate_rxpkt(sched_ctx, pkt);
4826 }
4827#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304828 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004829#endif /* QCA_CONFIG_SMP */
4830 }
4831
4832 return;
4833
4834drop_rx_buf:
Kapil Gupta53d9b572017-06-28 17:53:25 +05304835 ol_txrx_info_high("Dropping rx packets");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004836 buf = rx_buf_list;
4837 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304838 next_buf = qdf_nbuf_queue_next(buf);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304839 if (pdev)
4840 TXRX_STATS_MSDU_INCR(pdev,
4841 rx.dropped_peer_invalid, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304842 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004843 buf = next_buf;
4844 }
4845}
4846
4847/**
4848 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004849 * @sta_desc: sta descriptor
4850 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304851 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004852 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004853static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004854{
4855 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304856 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004857 union ol_txrx_peer_update_param_t param;
4858 struct privacy_exemption privacy_filter;
4859
4860 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304861 ol_txrx_err("Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304862 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004863 }
4864
4865 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304866 ol_txrx_err("Invalid sta id :%d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004867 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304868 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004869 }
4870
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004871 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
4872 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004873 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304874 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004875
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304876 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004877 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304878 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004879
4880 param.qos_capable = sta_desc->is_qos_enabled;
4881 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
4882 ol_txrx_peer_update_qos_capable);
4883
4884 if (sta_desc->is_wapi_supported) {
4885 /*Privacy filter to accept unencrypted WAI frames */
4886 privacy_filter.ether_type = ETHERTYPE_WAI;
4887 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
4888 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
4889 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
4890 }
4891
4892 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304893 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004894}
4895
4896/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004897 * ol_txrx_register_ocb_peer - Function to register the OCB peer
4898 * @cds_ctx: Pointer to the global OS context
4899 * @mac_addr: MAC address of the self peer
4900 * @peer_id: Pointer to the peer ID
4901 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304902 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004903 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004904static QDF_STATUS ol_txrx_register_ocb_peer(void *cds_ctx, uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004905 uint8_t *peer_id)
4906{
4907 ol_txrx_pdev_handle pdev;
4908 ol_txrx_peer_handle peer;
4909
4910 if (!cds_ctx) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304911 ol_txrx_err("%s: Invalid context",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004912 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304913 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004914 }
4915
Anurag Chouhan6d760662016-02-20 16:05:43 +05304916 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004917 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304918 ol_txrx_err("%s: Unable to find pdev!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004919 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304920 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004921 }
4922
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004923 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
4924 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004925 if (!peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304926 ol_txrx_err("%s: Unable to find OCB peer!",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004927 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304928 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004929 }
4930
4931 ol_txrx_set_ocb_peer(pdev, peer);
4932
4933 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004934 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004935 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004936
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304937 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004938}
4939
4940/**
4941 * ol_txrx_set_ocb_peer - Function to store the OCB peer
4942 * @pdev: Handle to the HTT instance
4943 * @peer: Pointer to the peer
4944 */
4945void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
4946 struct ol_txrx_peer_t *peer)
4947{
4948 if (pdev == NULL)
4949 return;
4950
4951 pdev->ocb_peer = peer;
4952 pdev->ocb_peer_valid = (NULL != peer);
4953}
4954
4955/**
4956 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
4957 * @pdev: Handle to the HTT instance
4958 * @peer: Pointer to the returned peer
4959 *
4960 * Return: true if the peer is valid, false if not
4961 */
4962bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
4963 struct ol_txrx_peer_t **peer)
4964{
4965 int rc;
4966
4967 if ((pdev == NULL) || (peer == NULL)) {
4968 rc = false;
4969 goto exit;
4970 }
4971
4972 if (pdev->ocb_peer_valid) {
4973 *peer = pdev->ocb_peer;
4974 rc = true;
4975 } else {
4976 rc = false;
4977 }
4978
4979exit:
4980 return rc;
4981}
4982
4983#ifdef QCA_LL_TX_FLOW_CONTROL_V2
4984/**
4985 * ol_txrx_register_pause_cb() - register pause callback
4986 * @pause_cb: pause callback
4987 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304988 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004989 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004990static QDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004991{
Anurag Chouhan6d760662016-02-20 16:05:43 +05304992 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Yun Parkeaea8632017-04-09 09:53:45 -07004993
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004994 if (!pdev || !pause_cb) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05304995 ol_txrx_err("pdev or pause_cb is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304996 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004997 }
4998 pdev->pause_cb = pause_cb;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304999 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005000}
5001#endif
5002
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305003void
5004ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5005{
5006 qdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
5007 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5008 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5009 qdf_nbuf_data(nbuf), len, true);
5010}
5011
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305012#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5013bool
5014ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5015{
5016 struct ol_tx_flow_pool_t *pool = vdev->pool;
5017 bool enough_desc_flag;
5018
5019 if (!vdev)
Yun Parkff5da562017-01-18 14:44:20 -08005020 return false;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305021
5022 pool = vdev->pool;
5023
Yun Parkff5da562017-01-18 14:44:20 -08005024 if (!pool)
5025 return false;
5026
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305027 qdf_spin_lock_bh(&pool->flow_pool_lock);
5028 enough_desc_flag = (pool->avail_desc < (pool->stop_th +
Yun Parkff5da562017-01-18 14:44:20 -08005029 OL_TX_NON_FWD_RESERVE))
5030 ? false : true;
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305031 qdf_spin_unlock_bh(&pool->flow_pool_lock);
5032 return enough_desc_flag;
5033}
5034#else
5035bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5036{
5037 return true;
5038}
5039#endif
5040
Dhanashri Atre12a08392016-02-17 13:10:34 -08005041/**
5042 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5043 * @vdev_id: vdev_id
5044 *
5045 * Return: vdev handle
5046 * NULL if not found.
5047 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005048struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005049{
5050 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5051 ol_txrx_vdev_handle vdev = NULL;
5052
5053 if (qdf_unlikely(!pdev))
5054 return NULL;
5055
5056 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5057 if (vdev->vdev_id == vdev_id)
5058 break;
5059 }
5060
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005061 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005062}
Nirav Shah2e583a02016-04-30 14:06:12 +05305063
5064/**
5065 * ol_txrx_set_wisa_mode() - set wisa mode
5066 * @vdev: vdev handle
5067 * @enable: enable flag
5068 *
5069 * Return: QDF STATUS
5070 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005071static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305072{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005073 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005074
Nirav Shah2e583a02016-04-30 14:06:12 +05305075 if (!vdev)
5076 return QDF_STATUS_E_INVAL;
5077
5078 vdev->is_wisa_mode_enable = enable;
5079 return QDF_STATUS_SUCCESS;
5080}
Leo Chang98726762016-10-28 11:07:18 -07005081
5082/**
5083 * ol_txrx_get_vdev_id() - get interface id from interface context
5084 * @pvdev: vdev handle
5085 *
5086 * Return: virtual interface id
5087 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005088static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005089{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005090 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005091 return vdev->vdev_id;
5092}
5093
5094/**
5095 * ol_txrx_last_assoc_received() - get time of last assoc received
5096 * @ppeer: peer handle
5097 *
5098 * Return: pointer of the time of last assoc received
5099 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005100static qdf_time_t *ol_txrx_last_assoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005101{
5102 ol_txrx_peer_handle peer = ppeer;
5103
5104 return &peer->last_assoc_rcvd;
5105}
5106
5107/**
5108 * ol_txrx_last_disassoc_received() - get time of last disassoc received
5109 * @ppeer: peer handle
5110 *
5111 * Return: pointer of the time of last disassoc received
5112 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005113static qdf_time_t *ol_txrx_last_disassoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005114{
5115 ol_txrx_peer_handle peer = ppeer;
5116
5117 return &peer->last_disassoc_rcvd;
5118}
5119
5120/**
5121 * ol_txrx_last_deauth_received() - get time of last deauth received
5122 * @ppeer: peer handle
5123 *
5124 * Return: pointer of the time of last deauth received
5125 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005126static qdf_time_t *ol_txrx_last_deauth_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005127{
5128 ol_txrx_peer_handle peer = ppeer;
5129
5130 return &peer->last_deauth_rcvd;
5131}
5132
5133/**
5134 * ol_txrx_soc_attach_target() - attach soc target
5135 * @soc: soc handle
5136 *
5137 * MCL legacy OL do nothing here
5138 *
5139 * Return: 0
5140 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005141static int ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005142{
5143 /* MCL legacy OL do nothing here */
5144 return 0;
5145}
5146
5147/**
5148 * ol_txrx_soc_detach() - detach soc target
5149 * @soc: soc handle
5150 *
5151 * MCL legacy OL do nothing here
5152 *
5153 * Return: noe
5154 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005155static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005156{
Venkata Sharath Chandra Manchala0c2eece2017-03-09 17:30:52 -08005157 qdf_mem_free(soc);
Leo Chang98726762016-10-28 11:07:18 -07005158}
5159
5160/**
5161 * ol_txrx_pkt_log_con_service() - connect packet log service
5162 * @ppdev: physical device handle
5163 * @scn: device context
5164 *
5165 * Return: noe
5166 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005167static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005168{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005169 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005170
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005171 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005172 pktlog_htc_attach();
5173}
5174
5175/* OL wrapper functions for CDP abstraction */
5176/**
5177 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5178 * @peer: peer handle
5179 * @drop: rx packets drop or deliver
5180 *
5181 * Return: none
5182 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005183static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005184{
5185 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5186}
5187
5188/**
5189 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5190 * @ppdev: pdev handle
5191 * @vdev_id: interface id
5192 *
5193 * Return: virtual interface instance
5194 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005195static
5196struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5197 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005198{
5199 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5200}
5201
5202/**
5203 * ol_txrx_wrapper_register_peer() - register peer
5204 * @pdev: pdev handle
5205 * @sta_desc: peer description
5206 *
5207 * Return: QDF STATUS
5208 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005209static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005210 struct ol_txrx_desc_type *sta_desc)
5211{
5212 return ol_txrx_register_peer(sta_desc);
5213}
5214
5215/**
5216 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5217 * @pdev - the data physical device object
5218 * @local_peer_id - the ID txrx assigned locally to the peer in question
5219 *
5220 * The control SW typically uses the txrx peer handle to refer to the peer.
5221 * In unusual circumstances, if it is infeasible for the control SW maintain
5222 * the txrx peer handle but it can maintain a small integer local peer ID,
5223 * this function allows the peer handled to be retrieved, based on the local
5224 * peer ID.
5225 *
5226 * @return handle to the txrx peer object
5227 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005228static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005229ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5230 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005231{
5232 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005233 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005234}
5235
5236/**
5237 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5238 * @pdev: pdev handle
5239 *
5240 * Return: 1 high latency bus
5241 * 0 low latency bus
5242 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005243static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005244{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005245 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005246}
5247
5248/**
5249 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5250 * @data_peer - which peer has changed its state
5251 * @state - the new state of the peer
5252 *
5253 * Specify the peer's authentication state (none, connected, authenticated)
5254 * to allow the data SW to determine whether to filter out invalid data frames.
5255 * (In the "connected" state, where security is enabled, but authentication
5256 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5257 * be discarded.)
5258 * This function is only relevant for systems in which the tx and rx filtering
5259 * are done in the host rather than in the target.
5260 *
5261 * Return: QDF Status
5262 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005263static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005264 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5265{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005266 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005267 peer_mac, state);
5268}
5269
5270/**
5271 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5272 * @pdev: pdev handle
5273 * @peer_addr: peer address wnat to find
5274 * @peer_id: peer id
5275 *
5276 * Return: peer instance pointer
5277 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005278static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005279 uint8_t *peer_addr, uint8_t *peer_id)
5280{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005281 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005282 peer_addr, peer_id);
5283}
5284
5285/**
5286 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5287 * @cfg_ctx: cfg context
5288 * @cfg_param: cfg parameters
5289 *
5290 * Return: none
5291 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08005292static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005293ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5294 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07005295{
5296 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005297 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005298 (struct txrx_pdev_cfg_param_t *)cfg_param);
5299}
5300
5301static struct cdp_cmn_ops ol_ops_cmn = {
5302 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
5303 .txrx_vdev_attach = ol_txrx_vdev_attach,
5304 .txrx_vdev_detach = ol_txrx_vdev_detach,
5305 .txrx_pdev_attach = ol_txrx_pdev_attach,
5306 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
5307 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05305308 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07005309 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08005310 .txrx_peer_create = ol_txrx_peer_attach,
5311 .txrx_peer_setup = NULL,
5312 .txrx_peer_teardown = NULL,
5313 .txrx_peer_delete = ol_txrx_peer_detach,
Leo Chang98726762016-10-28 11:07:18 -07005314 .txrx_vdev_register = ol_txrx_vdev_register,
5315 .txrx_soc_detach = ol_txrx_soc_detach,
5316 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
5317 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
5318 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08005319 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07005320 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
5321 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
5322 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07005323 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Venkata Sharath Chandra Manchala71032b62017-03-29 12:30:45 -07005324 .txrx_fw_stats_get = ol_txrx_fw_stats_get,
5325 .display_stats = ol_txrx_display_stats,
Leo Chang98726762016-10-28 11:07:18 -07005326 /* TODO: Add other functions */
5327};
5328
5329static struct cdp_misc_ops ol_ops_misc = {
5330 .set_ibss_vdev_heart_beat_timer =
5331 ol_txrx_set_ibss_vdev_heart_beat_timer,
5332#ifdef CONFIG_HL_SUPPORT
5333 .set_wmm_param = ol_txrx_set_wmm_param,
5334#endif /* CONFIG_HL_SUPPORT */
5335 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
5336 .bad_peer_txctl_update_threshold =
5337 ol_txrx_bad_peer_txctl_update_threshold,
5338 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
5339 .tx_non_std = ol_tx_non_std,
5340 .get_vdev_id = ol_txrx_get_vdev_id,
5341 .set_wisa_mode = ol_txrx_set_wisa_mode,
5342#ifdef FEATURE_RUNTIME_PM
5343 .runtime_suspend = ol_txrx_runtime_suspend,
5344 .runtime_resume = ol_txrx_runtime_resume,
5345#endif /* FEATURE_RUNTIME_PM */
5346 .get_opmode = ol_txrx_get_opmode,
5347 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
5348 .update_mac_id = ol_txrx_update_mac_id,
5349 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
5350 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
5351 .pkt_log_init = htt_pkt_log_init,
5352 .pkt_log_con_service = ol_txrx_pkt_log_con_service
5353};
5354
5355static struct cdp_flowctl_ops ol_ops_flowctl = {
5356#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5357 .register_pause_cb = ol_txrx_register_pause_cb,
5358 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
5359 .dump_flow_pool_info = ol_tx_dump_flow_pool_info
5360#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
5361};
5362
5363static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
5364#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
5365 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
5366 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
5367 .flow_control_cb = ol_txrx_flow_control_cb,
5368 .get_tx_resource = ol_txrx_get_tx_resource,
5369 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
5370 .vdev_flush = ol_txrx_vdev_flush,
5371 .vdev_pause = ol_txrx_vdev_pause,
5372 .vdev_unpause = ol_txrx_vdev_unpause
5373#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
5374};
5375
5376static struct cdp_ipa_ops ol_ops_ipa = {
5377#ifdef IPA_OFFLOAD
5378 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
5379 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
5380 .ipa_set_active = ol_txrx_ipa_uc_set_active,
5381 .ipa_op_response = ol_txrx_ipa_uc_op_response,
5382 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
5383 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
5384 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
Yun Park637d6482016-10-05 10:51:33 -07005385 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
5386 .ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
5387 .ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
Leo Chang98726762016-10-28 11:07:18 -07005388#endif /* IPA_OFFLOAD */
5389};
5390
Leo Chang98726762016-10-28 11:07:18 -07005391static struct cdp_bus_ops ol_ops_bus = {
5392 .bus_suspend = ol_txrx_bus_suspend,
5393 .bus_resume = ol_txrx_bus_resume
5394};
5395
5396static struct cdp_ocb_ops ol_ops_ocb = {
5397 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
5398 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
5399};
5400
5401static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005402#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07005403 .throttle_init_period = ol_tx_throttle_init_period,
5404 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005405#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07005406};
5407
5408static struct cdp_mob_stats_ops ol_ops_mob_stats = {
Leo Chang98726762016-10-28 11:07:18 -07005409 .clear_stats = ol_txrx_clear_stats,
5410 .stats = ol_txrx_stats
5411};
5412
5413static struct cdp_cfg_ops ol_ops_cfg = {
5414 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
5415 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
5416 .cfg_attach = ol_pdev_cfg_attach,
5417 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
5418 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
5419 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
5420 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
5421 .set_flow_control_parameters =
5422 ol_txrx_wrapper_set_flow_control_parameters,
5423 .set_flow_steering = ol_set_cfg_flow_steering,
5424};
5425
5426static struct cdp_peer_ops ol_ops_peer = {
5427 .register_peer = ol_txrx_wrapper_register_peer,
5428 .clear_peer = ol_txrx_clear_peer,
5429 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
5430 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
5431 .local_peer_id = ol_txrx_local_peer_id,
5432 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
5433 .peer_state_update = ol_txrx_wrapper_peer_state_update,
5434 .get_vdevid = ol_txrx_get_vdevid,
5435 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
5436 .register_ocb_peer = ol_txrx_register_ocb_peer,
5437 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
5438 .get_peer_state = ol_txrx_get_peer_state,
5439 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
5440 .update_ibss_add_peer_num_of_vdev =
5441 ol_txrx_update_ibss_add_peer_num_of_vdev,
5442 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
5443 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08005444#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07005445 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
5446 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005447 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
5448 .update_last_real_peer = ol_txrx_update_last_real_peer,
5449#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07005450 .last_assoc_received = ol_txrx_last_assoc_received,
5451 .last_disassoc_received = ol_txrx_last_disassoc_received,
5452 .last_deauth_received = ol_txrx_last_deauth_received,
Leo Chang98726762016-10-28 11:07:18 -07005453 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
5454};
5455
5456static struct cdp_tx_delay_ops ol_ops_delay = {
5457#ifdef QCA_COMPUTE_TX_DELAY
5458 .tx_delay = ol_tx_delay,
5459 .tx_delay_hist = ol_tx_delay_hist,
5460 .tx_packet_count = ol_tx_packet_count,
5461 .tx_set_compute_interval = ol_tx_set_compute_interval
5462#endif /* QCA_COMPUTE_TX_DELAY */
5463};
5464
5465static struct cdp_pmf_ops ol_ops_pmf = {
5466 .get_pn_info = ol_txrx_get_pn_info
5467};
5468
5469/* WINplatform specific structures */
5470static struct cdp_ctrl_ops ol_ops_ctrl = {
5471 /* EMPTY FOR MCL */
5472};
5473
5474static struct cdp_me_ops ol_ops_me = {
5475 /* EMPTY FOR MCL */
5476};
5477
5478static struct cdp_mon_ops ol_ops_mon = {
5479 /* EMPTY FOR MCL */
5480};
5481
5482static struct cdp_host_stats_ops ol_ops_host_stats = {
5483 /* EMPTY FOR MCL */
5484};
5485
5486static struct cdp_wds_ops ol_ops_wds = {
5487 /* EMPTY FOR MCL */
5488};
5489
5490static struct cdp_raw_ops ol_ops_raw = {
5491 /* EMPTY FOR MCL */
5492};
5493
5494static struct cdp_ops ol_txrx_ops = {
5495 .cmn_drv_ops = &ol_ops_cmn,
5496 .ctrl_ops = &ol_ops_ctrl,
5497 .me_ops = &ol_ops_me,
5498 .mon_ops = &ol_ops_mon,
5499 .host_stats_ops = &ol_ops_host_stats,
5500 .wds_ops = &ol_ops_wds,
5501 .raw_ops = &ol_ops_raw,
5502 .misc_ops = &ol_ops_misc,
5503 .cfg_ops = &ol_ops_cfg,
5504 .flowctl_ops = &ol_ops_flowctl,
5505 .l_flowctl_ops = &ol_ops_l_flowctl,
5506 .ipa_ops = &ol_ops_ipa,
Leo Chang98726762016-10-28 11:07:18 -07005507 .bus_ops = &ol_ops_bus,
5508 .ocb_ops = &ol_ops_ocb,
5509 .peer_ops = &ol_ops_peer,
5510 .throttle_ops = &ol_ops_throttle,
5511 .mob_stats_ops = &ol_ops_mob_stats,
5512 .delay_ops = &ol_ops_delay,
5513 .pmf_ops = &ol_ops_pmf
5514};
5515
Jeff Johnson02c37b42017-01-10 14:49:24 -08005516/*
5517 * Local prototype added to temporarily address warning caused by
5518 * -Wmissing-prototypes. A more correct solution, namely to expose
5519 * a prototype in an appropriate header file, will come later.
5520 */
5521struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5522 struct ol_if_ops *dp_ol_if_ops);
5523struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5524 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07005525{
5526 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
5527 if (!soc) {
5528 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5529 "%s: OL SOC memory allocation failed\n", __func__);
5530 return NULL;
5531 }
5532
5533 soc->ops = &ol_txrx_ops;
5534 return soc;
5535}
5536
5537