blob: 5e6336be2b20962c132b77f5bd49621690025526 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Jeff Johnsonb9b49342016-12-19 16:46:23 -08002 * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== includes ===*/
29/* header files for OS primitives */
30#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053032#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053033#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053034#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053036#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037/* Required for WLAN_FEATURE_FASTPATH */
38#include <ce_api.h>
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053039#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080040/* header files for utilities */
41#include <cds_queue.h> /* TAILQ */
42
43/* header files for configuration API */
44#include <ol_cfg.h> /* ol_cfg_is_high_latency */
45#include <ol_if_athvar.h>
46
47/* header files for HTT API */
48#include <ol_htt_api.h>
49#include <ol_htt_tx_api.h>
50
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051/* header files for our own APIs */
52#include <ol_txrx_api.h>
53#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070054#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070055#include <ol_txrx_ctrl_api.h>
56#include <cdp_txrx_stats.h>
57#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058/* header files for our internal definitions */
59#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
60#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080061#include <ol_tx.h> /* ol_tx_ll */
62#include <ol_rx.h> /* ol_rx_deliver */
63#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
64#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
65#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
66#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
67#include <ol_rx_reorder.h>
68#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
69#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
70#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053071#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080072#include <ol_txrx.h>
Manjunathappa Prakash04f26442016-10-13 14:46:49 -070073#include <ol_txrx_types.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080074#include <cdp_txrx_flow_ctrl_legacy.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070075#include <cdp_txrx_bus.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080076#include <cdp_txrx_ipa.h>
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070077#include <cdp_txrx_lro.h>
78#include <cdp_txrx_pmf.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080079#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053080#include "hif.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070081#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053082#ifndef REMOVE_PKT_LOG
83#include "pktlog_ac.h"
84#endif
Tushnim Bhattacharyya12b48742017-03-13 12:46:45 -070085#include <wlan_policy_mgr_api.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053086#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053087#include <a_types.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080088#include <cdp_txrx_handle.h>
Leo Chang98726762016-10-28 11:07:18 -070089#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
90ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080091ol_txrx_peer_find_by_local_id(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070092 uint8_t local_peer_id);
93
94#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080095QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -070096 uint8_t *peer_mac,
97 enum ol_txrx_peer_state state);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080098static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
99 bool val);
100int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
Leo Chang98726762016-10-28 11:07:18 -0700101extern void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800102ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
Leo Chang98726762016-10-28 11:07:18 -0700103 struct ol_tx_wmm_param_t wmm_param);
Leo Chang98726762016-10-28 11:07:18 -0700104
Leo Chang98726762016-10-28 11:07:18 -0700105extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
106 uint64_t **last_pn, uint32_t **rmf_pn_replays);
107
Yu Wang053d3e72017-02-08 18:48:24 +0800108#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530109
110/**
111 * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
112 * @vdev: the data virtual device
113 * @bss_addr: bss address
114 *
115 * Return: None
116 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800117static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800118ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530119{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800120 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530121 if (bss_addr && vdev->last_real_peer &&
Ankit Guptaa5076012016-09-14 11:32:19 -0700122 !qdf_mem_cmp((u8 *)bss_addr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530123 vdev->last_real_peer->mac_addr.raw,
Ankit Guptaa5076012016-09-14 11:32:19 -0700124 IEEE80211_ADDR_LEN))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530125 qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
126 vdev->last_real_peer->mac_addr.raw,
127 OL_TXRX_MAC_ADDR_LEN);
128}
129
130/**
131 * ol_txrx_add_last_real_peer() - add last peer
132 * @pdev: the data physical device
133 * @vdev: virtual device
134 * @peer_id: peer id
135 *
136 * Return: None
137 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800138static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800139ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
140 struct cdp_vdev *pvdev, uint8_t *peer_id)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530141{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800142 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
143 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530144 ol_txrx_peer_handle peer;
145 if (vdev->last_real_peer == NULL) {
146 peer = NULL;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800147 peer = ol_txrx_find_peer_by_addr(
148 (struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530149 vdev->hl_tdls_ap_mac_addr.raw,
150 peer_id);
151 if (peer && (peer->peer_ids[0] !=
152 HTT_INVALID_PEER_ID))
153 vdev->last_real_peer = peer;
154 }
155}
156
157/**
158 * is_vdev_restore_last_peer() - check for vdev last peer
159 * @peer: peer object
160 *
161 * Return: true if last peer is not null
162 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800163static bool
Leo Chang98726762016-10-28 11:07:18 -0700164is_vdev_restore_last_peer(void *ppeer)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530165{
Leo Chang98726762016-10-28 11:07:18 -0700166 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530167 struct ol_txrx_vdev_t *vdev;
168 vdev = peer->vdev;
169 return vdev->last_real_peer && (vdev->last_real_peer == peer);
170}
171
172/**
173 * ol_txrx_update_last_real_peer() - check for vdev last peer
174 * @pdev: the data physical device
175 * @peer: peer device
176 * @peer_id: peer id
177 * @restore_last_peer: restore last peer flag
178 *
179 * Return: None
180 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800181static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800182ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530183 uint8_t *peer_id, bool restore_last_peer)
184{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800185 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -0700186 struct ol_txrx_peer_t *peer = ppeer;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530187 struct ol_txrx_vdev_t *vdev;
188 vdev = peer->vdev;
189 if (restore_last_peer && (vdev->last_real_peer == NULL)) {
190 peer = NULL;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800191 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530192 vdev->hl_tdls_ap_mac_addr.raw, peer_id);
193 if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
194 vdev->last_real_peer = peer;
195 }
196}
197#endif
198
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530199/**
200 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
201 * fw is compatible for marking first packet after wow wakeup
202 * @value: 1 for enabled/ 0 for disabled
203 *
204 * Return: None
205 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800206static void ol_tx_mark_first_wakeup_packet(uint8_t value)
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530207{
208 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
209
210 if (!pdev) {
211 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
212 "%s: pdev is NULL\n", __func__);
213 return;
214 }
215
216 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
217}
218
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530219u_int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800220ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530221{
222 u_int16_t desc_pool_size;
223 u_int16_t steady_state_tx_lifetime_ms;
224 u_int16_t safety_factor;
225
226 /*
227 * Steady-state tx latency:
228 * roughly 1-2 ms flight time
229 * + roughly 1-2 ms prep time,
230 * + roughly 1-2 ms target->host notification time.
231 * = roughly 6 ms total
232 * Thus, steady state number of frames =
233 * steady state max throughput / frame size * tx latency, e.g.
234 * 1 Gbps / 1500 bytes * 6 ms = 500
235 *
236 */
237 steady_state_tx_lifetime_ms = 6;
238
239 safety_factor = 8;
240
241 desc_pool_size =
242 ol_cfg_max_thruput_mbps(ctrl_pdev) *
243 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
244 (8 * OL_TX_AVG_FRM_BYTES) *
245 steady_state_tx_lifetime_ms *
246 safety_factor;
247
248 /* minimum */
249 if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
250 desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
251
252 /* maximum */
253 if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
254 desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
255
256 return desc_pool_size;
257}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800258
259/*=== function definitions ===*/
260
Nirav Shah22bf44d2015-12-10 15:39:48 +0530261/**
262 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
263 * wmi is enabled or not.
264 * @value: 1 for enabled/ 0 for disable
265 *
266 * Return: None
267 */
268void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
269{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530270 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530271 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530272 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530273 return;
274 }
275 pdev->is_mgmt_over_wmi_enabled = value;
276 return;
277}
278
279/**
280 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
281 *
282 * Return: is_mgmt_over_wmi_enabled
283 */
284uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
285{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530286 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530287 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530288 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530289 return 0;
290 }
291 return pdev->is_mgmt_over_wmi_enabled;
292}
293
294
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800295#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800296static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800297ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
298 struct cdp_vdev *pvdev, uint8_t *peer_addr, uint8_t *peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800299{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800300 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
301 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800302 struct ol_txrx_peer_t *peer;
303
304 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
305 if (!peer)
306 return NULL;
307 *peer_id = peer->local_id;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530308 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700309 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
310 "%s: peer %p peer->ref_cnt %d", __func__, peer,
311 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800312 return peer;
313}
314
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800315static QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800316{
Leo Chang98726762016-10-28 11:07:18 -0700317 struct ol_txrx_peer_t *peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800318 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530319 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530320 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530321 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322 }
323
324 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530325 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800326}
327
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800328static struct cdp_vdev *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800329{
330 struct ol_txrx_peer_t *peer = NULL;
331 ol_txrx_pdev_handle pdev = NULL;
332
333 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530334 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800335 "Invalid sta id passed");
336 return NULL;
337 }
338
Anurag Chouhan6d760662016-02-20 16:05:43 +0530339 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530341 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530342 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343 return NULL;
344 }
345
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800346 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800347 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530348 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530349 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800350 return NULL;
351 }
352
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800353 return (struct cdp_vdev *)peer->vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800354}
355
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800356void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800357 uint8_t *peer_addr,
358 uint8_t *peer_id)
359{
360 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800361 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800362
363 peer = ol_txrx_peer_find_hash_find(pdev, peer_addr, 0, 1);
364 if (!peer)
365 return NULL;
366 *peer_id = peer->local_id;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530367 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700368 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
369 "%s: peer %p peer->ref_cnt %d", __func__, peer,
370 qdf_atomic_read(&peer->ref_cnt));
Leo Chang98726762016-10-28 11:07:18 -0700371 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800372}
373
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800374static uint16_t ol_txrx_local_peer_id(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375{
Leo Chang98726762016-10-28 11:07:18 -0700376 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800377 return peer->local_id;
378}
379
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700380/**
381 * @brief Find a txrx peer handle from a peer's local ID
382 * @details
383 * The control SW typically uses the txrx peer handle to refer to the peer.
384 * In unusual circumstances, if it is infeasible for the control SW maintain
385 * the txrx peer handle but it can maintain a small integer local peer ID,
386 * this function allows the peer handled to be retrieved, based on the local
387 * peer ID.
388 *
389 * @param pdev - the data physical device object
390 * @param local_peer_id - the ID txrx assigned locally to the peer in question
391 * @return handle to the txrx peer object
392 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800393ol_txrx_peer_handle
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800394ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395 uint8_t local_peer_id)
396{
397 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800398 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800399 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
400 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
401 return NULL;
402 }
403
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530404 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530406 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800407 return peer;
408}
409
410static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
411{
412 int i;
413
414 /* point the freelist to the first ID */
415 pdev->local_peer_ids.freelist = 0;
416
417 /* link each ID to the next one */
418 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
419 pdev->local_peer_ids.pool[i] = i + 1;
420 pdev->local_peer_ids.map[i] = NULL;
421 }
422
423 /* link the last ID to itself, to mark the end of the list */
424 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
425 pdev->local_peer_ids.pool[i] = i;
426
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530427 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800428}
429
430static void
431ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
432 struct ol_txrx_peer_t *peer)
433{
434 int i;
435
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530436 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800437 i = pdev->local_peer_ids.freelist;
438 if (pdev->local_peer_ids.pool[i] == i) {
439 /* the list is empty, except for the list-end marker */
440 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
441 } else {
442 /* take the head ID and advance the freelist */
443 peer->local_id = i;
444 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
445 pdev->local_peer_ids.map[i] = peer;
446 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530447 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448}
449
450static void
451ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
452 struct ol_txrx_peer_t *peer)
453{
454 int i = peer->local_id;
455 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
456 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
457 return;
458 }
459 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530460 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800461 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
462 pdev->local_peer_ids.freelist = i;
463 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530464 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465}
466
467static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
468{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530469 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470}
471
472#else
473#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
474#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
475#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
476#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
477#endif
478
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530479#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
480
481/**
482 * ol_txrx_update_group_credit() - update group credit for tx queue
483 * @group: for which credit needs to be updated
484 * @credit: credits
485 * @absolute: TXQ group absolute
486 *
487 * Return: allocated pool size
488 */
489void ol_txrx_update_group_credit(
490 struct ol_tx_queue_group_t *group,
491 int32_t credit,
492 u_int8_t absolute)
493{
494 if (absolute)
495 qdf_atomic_set(&group->credit, credit);
496 else
497 qdf_atomic_add(credit, &group->credit);
498}
499
500/**
501 * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
502 * vdev id mask and ac mask is not matching
503 * @pdev: the data physical device
504 * @group_id: TXQ group id
505 * @credit: TXQ group credit count
506 * @absolute: TXQ group absolute
507 * @vdev_id_mask: TXQ vdev group id mask
508 * @ac_mask: TQX access category mask
509 *
510 * Return: None
511 */
512void ol_txrx_update_tx_queue_groups(
513 ol_txrx_pdev_handle pdev,
514 u_int8_t group_id,
515 int32_t credit,
516 u_int8_t absolute,
517 u_int32_t vdev_id_mask,
518 u_int32_t ac_mask
519 )
520{
521 struct ol_tx_queue_group_t *group;
522 u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
523 u_int32_t membership;
524 struct ol_txrx_vdev_t *vdev;
525 group = &pdev->txq_grps[group_id];
526
527 membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
528
529 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
530 /*
531 * if the membership (vdev id mask and ac mask)
532 * matches then no need to update tx qeue groups.
533 */
534 if (group->membership == membership)
535 /* Update Credit Only */
536 goto credit_update;
537
538
539 /*
540 * membership (vdev id mask and ac mask) is not matching
541 * TODO: ignoring ac mask for now
542 */
543 group_vdev_id_mask =
544 OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
545
546 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
547 group_vdev_bit_mask =
548 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
549 group_vdev_id_mask, vdev->vdev_id);
550 vdev_bit_mask =
551 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
552 vdev_id_mask, vdev->vdev_id);
553
554 if (group_vdev_bit_mask != vdev_bit_mask) {
555 /*
556 * Change in vdev tx queue group
557 */
558 if (!vdev_bit_mask) {
559 /* Set Group Pointer (vdev and peer) to NULL */
560 ol_tx_set_vdev_group_ptr(
561 pdev, vdev->vdev_id, NULL);
562 } else {
563 /* Set Group Pointer (vdev and peer) */
564 ol_tx_set_vdev_group_ptr(
565 pdev, vdev->vdev_id, group);
566 }
567 }
568 }
569 /* Update membership */
570 group->membership = membership;
571credit_update:
572 /* Update Credit */
573 ol_txrx_update_group_credit(group, credit, absolute);
574 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
575}
576#endif
577
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800578#ifdef WLAN_FEATURE_FASTPATH
579/**
580 * setup_fastpath_ce_handles() Update pdev with ce_handle for fastpath use.
581 *
582 * @osc: pointer to HIF context
583 * @pdev: pointer to ol pdev
584 *
585 * Return: void
586 */
Komal Seelam3d202862016-02-24 18:43:24 +0530587static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
588 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800589{
590 /*
591 * Before the HTT attach, set up the CE handles
592 * CE handles are (struct CE_state *)
593 * This is only required in the fast path
594 */
Komal Seelam7fde14c2016-02-02 13:05:57 +0530595 pdev->ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_H2T_MSG);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800596
597}
598
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800599#else /* not WLAN_FEATURE_FASTPATH */
Komal Seelam3d202862016-02-24 18:43:24 +0530600static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
601 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602{
603}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800604#endif /* WLAN_FEATURE_FASTPATH */
605
606#ifdef QCA_LL_TX_FLOW_CONTROL_V2
607/**
608 * ol_tx_set_desc_global_pool_size() - set global pool size
609 * @num_msdu_desc: total number of descriptors
610 *
611 * Return: none
612 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -0800613static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800614{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530615 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800616 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530617 qdf_print("%s: pdev is NULL\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800618 return;
619 }
Nirav Shah2ae038d2015-12-23 20:36:11 +0530620 pdev->num_msdu_desc = num_msdu_desc;
621 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
622 pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
623 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global pool size: %d\n",
624 pdev->num_msdu_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800625 return;
626}
627
628/**
629 * ol_tx_get_desc_global_pool_size() - get global pool size
630 * @pdev: pdev handle
631 *
632 * Return: global pool size
633 */
634static inline
635uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
636{
637 return pdev->num_msdu_desc;
638}
Nirav Shah55b45a02016-01-21 10:00:16 +0530639
640/**
641 * ol_tx_get_total_free_desc() - get total free descriptors
642 * @pdev: pdev handle
643 *
644 * Return: total free descriptors
645 */
646static inline
647uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
648{
649 struct ol_tx_flow_pool_t *pool = NULL;
650 uint32_t free_desc;
651
652 free_desc = pdev->tx_desc.num_free;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530653 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530654 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
655 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530656 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530657 free_desc += pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530658 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530659 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530660 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530661
662 return free_desc;
663}
664
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800665#else
666/**
667 * ol_tx_get_desc_global_pool_size() - get global pool size
668 * @pdev: pdev handle
669 *
670 * Return: global pool size
671 */
672static inline
673uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
674{
675 return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
676}
Nirav Shah55b45a02016-01-21 10:00:16 +0530677
678/**
679 * ol_tx_get_total_free_desc() - get total free descriptors
680 * @pdev: pdev handle
681 *
682 * Return: total free descriptors
683 */
684static inline
685uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
686{
687 return pdev->tx_desc.num_free;
688}
689
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690#endif
691
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530692#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
693
694/**
695 * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
696 * margin replenishment
697 * @desc_pool_size: tx desc pool size
698 *
699 * Return: threshold low
700 */
701static inline uint16_t
702ol_txrx_rsrc_threshold_lo(int desc_pool_size)
703{
704 int threshold_low;
705 /*
706 * 5% margin of unallocated desc is too much for per
707 * vdev mechanism.
708 * Define the value seperately.
709 */
710 threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
711
712 return threshold_low;
713}
714
715/**
716 * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
717 * during tx desc margin replenishment
718 * @desc_pool_size: tx desc pool size
719 *
720 * Return: threshold high
721 */
722static inline uint16_t
723ol_txrx_rsrc_threshold_hi(int desc_pool_size)
724{
725 int threshold_high;
726 /* when freeing up descriptors,
727 * keep going until there's a 7.5% margin
728 */
729 threshold_high = ((15 * desc_pool_size)/100)/2;
730
731 return threshold_high;
732}
733#else
734
735static inline uint16_t
736ol_txrx_rsrc_threshold_lo(int desc_pool_size)
737{
738 int threshold_low;
739 /* always maintain a 5% margin of unallocated descriptors */
740 threshold_low = (5 * desc_pool_size)/100;
741
742 return threshold_low;
743}
744
745static inline uint16_t
746ol_txrx_rsrc_threshold_hi(int desc_pool_size)
747{
748 int threshold_high;
749 /* when freeing up descriptors, keep going until
750 * there's a 15% margin
751 */
752 threshold_high = (15 * desc_pool_size)/100;
753
754 return threshold_high;
755}
756#endif
757
758#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
759
760/**
761 * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
762 * @pdev: the physical device object
763 *
764 * Return: None
765 */
766static void
767ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
768{
769 qdf_spinlock_create(&pdev->txq_log_spinlock);
770 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
771 pdev->txq_log.oldest_record_offset = 0;
772 pdev->txq_log.offset = 0;
773 pdev->txq_log.allow_wrap = 1;
774 pdev->txq_log.wrapped = 0;
775}
776
777/**
778 * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
779 * @pdev: the physical device object
780 *
781 * Return: None
782 */
783static inline void
784ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
785{
786 qdf_spinlock_destroy(&pdev->txq_log_spinlock);
787}
788
789#else
790
791static inline void
792ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
793{
794 return;
795}
796
797static inline void
798ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
799{
800 return;
801}
802
803
804#endif
805
806#if defined(DEBUG_HL_LOGGING)
807
808/**
809 * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
810 * @pdev: the physical device object
811 *
812 * Return: None
813 */
814static inline void
815ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
816{
817 qdf_spinlock_create(&pdev->grp_stat_spinlock);
818 pdev->grp_stats.last_valid_index = -1;
819 pdev->grp_stats.wrap_around = 0;
820}
821
822/**
823 * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
824 * @pdev: the physical device object
825 *
826 * Return: None
827 */
828static inline void
829ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
830{
831 qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
832}
833#else
834
835static inline void
836ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
837{
838 return;
839}
840
841static inline void
842ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
843{
844 return;
845}
846#endif
847
848#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
849
850/**
851 * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
852 * @vdev: the virtual device object
853 * @flag: flag
854 *
855 * Return: None
856 */
857void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800858ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530859{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800860 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530861 vdev->hlTdlsFlag = flag;
862}
863#endif
864
865#if defined(CONFIG_HL_SUPPORT)
866
867/**
868 * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
869 * @vdev: the virtual device object
870 *
871 * Return: None
872 */
873static void
874ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
875{
876 u_int8_t i;
877 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
878 TAILQ_INIT(&vdev->txqs[i].head);
879 vdev->txqs[i].paused_count.total = 0;
880 vdev->txqs[i].frms = 0;
881 vdev->txqs[i].bytes = 0;
882 vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
883 vdev->txqs[i].flag = ol_tx_queue_empty;
884 /* aggregation is not applicable for vdev tx queues */
885 vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
886 ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
887 ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
888 }
889}
890
891/**
892 * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
893 * @vdev: the virtual device object
894 *
895 * Return: None
896 */
897static void
898ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
899{
900 struct ol_txrx_pdev_t *pdev = vdev->pdev;
901 struct ol_tx_frms_queue_t *txq;
902 int i;
903
904 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
905 txq = &vdev->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530906 ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530907 }
908}
909
910/**
911 * ol_txrx_peer_txqs_init() - initialise peer tx queues
912 * @pdev: the physical device object
913 * @peer: peer object
914 *
915 * Return: None
916 */
917static void
918ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
919 struct ol_txrx_peer_t *peer)
920{
921 uint8_t i;
922 struct ol_txrx_vdev_t *vdev = peer->vdev;
923 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
924 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
925 TAILQ_INIT(&peer->txqs[i].head);
926 peer->txqs[i].paused_count.total = 0;
927 peer->txqs[i].frms = 0;
928 peer->txqs[i].bytes = 0;
929 peer->txqs[i].ext_tid = i;
930 peer->txqs[i].flag = ol_tx_queue_empty;
931 peer->txqs[i].aggr_state = ol_tx_aggr_untried;
932 ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
933 ol_txrx_set_txq_peer(&peer->txqs[i], peer);
934 }
935 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
936
937 /* aggregation is not applicable for mgmt and non-QoS tx queues */
938 for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
939 peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
940
941 ol_txrx_peer_pause(peer);
942}
943
944/**
945 * ol_txrx_peer_tx_queue_free() - free peer tx queues
946 * @pdev: the physical device object
947 * @peer: peer object
948 *
949 * Return: None
950 */
951static void
952ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
953 struct ol_txrx_peer_t *peer)
954{
955 struct ol_tx_frms_queue_t *txq;
956 uint8_t i;
957
958 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
959 txq = &peer->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530960 ol_tx_queue_free(pdev, txq, i, true);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530961 }
962}
963#else
964
965static inline void
966ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
967{
968 return;
969}
970
971static inline void
972ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
973{
974 return;
975}
976
977static inline void
978ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
979 struct ol_txrx_peer_t *peer)
980{
981 return;
982}
983
984static inline void
985ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
986 struct ol_txrx_peer_t *peer)
987{
988 return;
989}
990#endif
991
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530992#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
993static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
994{
995 qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
996}
997
998static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
999{
1000 qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
1001}
1002
1003static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1004{
1005 int msdu_idx;
1006 int seg_idx;
1007
1008 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1009 "TSO Statistics:");
1010 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1011 "TSO pkts %lld, bytes %lld\n",
1012 pdev->stats.pub.tx.tso.tso_pkts.pkts,
1013 pdev->stats.pub.tx.tso.tso_pkts.bytes);
1014
1015 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1016 "TSO Histogram for numbers of segments:\n"
1017 "Single segment %d\n"
1018 " 2-5 segments %d\n"
1019 " 6-10 segments %d\n"
1020 "11-15 segments %d\n"
1021 "16-20 segments %d\n"
1022 " 20+ segments %d\n",
1023 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
1024 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
1025 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
1026 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
1027 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
1028 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
1029
1030 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1031 "TSO History Buffer: Total size %d, current_index %d",
1032 NUM_MAX_TSO_MSDUS,
1033 TXRX_STATS_TSO_MSDU_IDX(pdev));
1034
1035 for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
1036 if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
1037 continue;
1038 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1039 "jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
1040 msdu_idx,
1041 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
1042 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
1043 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
1044 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
1045
1046 for (seg_idx = 0;
1047 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
1048 msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
1049 seg_idx++) {
1050 struct qdf_tso_seg_t tso_seg =
1051 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
1052
1053 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1054 "seg idx: %d", seg_idx);
1055 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1056 "tso_enable: %d",
1057 tso_seg.tso_flags.tso_enable);
1058 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1059 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
1060 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
1061 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
1062 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
1063 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
1064 tso_seg.tso_flags.ns);
1065 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1066 "tcp_seq_num: 0x%x ip_id: %d",
1067 tso_seg.tso_flags.tcp_seq_num,
1068 tso_seg.tso_flags.ip_id);
1069 }
1070 }
1071}
1072#else
1073static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
1074{
1075 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1076 "TSO is not supported\n");
1077}
1078
1079static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
1080{
1081 /*
1082 * keeping the body empty and not keeping an error print as print will
1083 * will show up everytime during driver load if TSO is not enabled.
1084 */
1085}
1086
1087static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
1088{
1089 /*
1090 * keeping the body empty and not keeping an error print as print will
1091 * will show up everytime during driver unload if TSO is not enabled.
1092 */
1093}
1094
1095#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
1096
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001097/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001098 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001099 * @ctrl_pdev: cfg pdev
1100 * @htc_pdev: HTC pdev
1101 * @osdev: os dev
1102 *
1103 * Return: txrx pdev handle
1104 * NULL for failure
1105 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001106static struct cdp_pdev *
1107ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
Leo Chang98726762016-10-28 11:07:18 -07001108 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001109{
1110 struct ol_txrx_pdev_t *pdev;
1111 int i;
1112
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301113 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001114 if (!pdev)
1115 goto fail0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001116
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301117 /* init LL/HL cfg here */
1118 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001119 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
1120
1121 /* store provided params */
1122 pdev->ctrl_pdev = ctrl_pdev;
1123 pdev->osdev = osdev;
1124
1125 for (i = 0; i < htt_num_sec_types; i++)
1126 pdev->sec_types[i] = (enum ol_sec_type)i;
1127
1128 TXRX_STATS_INIT(pdev);
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301129 ol_txrx_tso_stats_init(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001130
1131 TAILQ_INIT(&pdev->vdev_list);
1132
1133 /* do initial set up of the peer ID -> peer object lookup map */
1134 if (ol_txrx_peer_find_attach(pdev))
1135 goto fail1;
1136
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301137 /* initialize the counter of the target's tx buffer availability */
1138 qdf_atomic_init(&pdev->target_tx_credit);
1139 qdf_atomic_init(&pdev->orig_target_tx_credit);
1140
1141 if (ol_cfg_is_high_latency(ctrl_pdev)) {
1142 qdf_spinlock_create(&pdev->tx_queue_spinlock);
1143 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
1144 if (pdev->tx_sched.scheduler == NULL)
1145 goto fail2;
1146 }
1147 ol_txrx_pdev_txq_log_init(pdev);
1148 ol_txrx_pdev_grp_stats_init(pdev);
1149
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001150 pdev->htt_pdev =
1151 htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
1152 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301153 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001154
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301155 htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
1156 ol_rx_pkt_dump_call);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001157 return (struct cdp_pdev *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001158
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301159fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001160 ol_txrx_peer_find_detach(pdev);
1161
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301162fail2:
1163 if (ol_cfg_is_high_latency(ctrl_pdev))
1164 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
1165
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001166fail1:
Himanshu Agarwal5501c192017-02-14 11:39:39 +05301167 ol_txrx_tso_stats_deinit(pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301168 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001169
1170fail0:
1171 return NULL;
1172}
1173
Komal Seelamc4b28632016-02-03 15:02:18 +05301174#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1175/**
1176 * htt_pkt_log_init() - API to initialize packet log
1177 * @handle: pdev handle
1178 * @scn: HIF context
1179 *
1180 * Return: void
1181 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001182void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
Komal Seelamc4b28632016-02-03 15:02:18 +05301183{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001184 struct ol_txrx_pdev_t *handle = (struct ol_txrx_pdev_t *)ppdev;
Komal Seelamc4b28632016-02-03 15:02:18 +05301185 if (handle->pkt_log_init)
1186 return;
1187
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301188 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001189 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Komal Seelamc4b28632016-02-03 15:02:18 +05301190 ol_pl_sethandle(&handle->pl_dev, scn);
1191 if (pktlogmod_init(scn))
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301192 qdf_print("%s: pktlogmod_init failed", __func__);
Komal Seelamc4b28632016-02-03 15:02:18 +05301193 else
1194 handle->pkt_log_init = true;
1195 }
1196}
1197
1198/**
1199 * htt_pktlogmod_exit() - API to cleanup pktlog info
1200 * @handle: Pdev handle
1201 * @scn: HIF Context
1202 *
1203 * Return: void
1204 */
Houston Hoffman8c485042017-02-08 13:40:21 -08001205static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
Komal Seelamc4b28632016-02-03 15:02:18 +05301206{
Houston Hoffman8c485042017-02-08 13:40:21 -08001207 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001208 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +05301209 handle->pkt_log_init) {
Houston Hoffman8c485042017-02-08 13:40:21 -08001210 pktlogmod_exit(handle);
Komal Seelamc4b28632016-02-03 15:02:18 +05301211 handle->pkt_log_init = false;
1212 }
1213}
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001214
Komal Seelamc4b28632016-02-03 15:02:18 +05301215#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001216void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *ol_sc) { }
Houston Hoffman8c485042017-02-08 13:40:21 -08001217static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle) { }
Komal Seelamc4b28632016-02-03 15:02:18 +05301218#endif
1219
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001220/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001221 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001222 * @pdev: txrx pdev
1223 *
1224 * Return: 0 for success
1225 */
1226int
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001227ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001228{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001229 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang376398b2015-10-23 14:19:02 -07001230 uint16_t i;
1231 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001232 int ret = 0;
1233 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301234 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001235
Leo Chang376398b2015-10-23 14:19:02 -07001236 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1237 union ol_tx_desc_list_elem_t *c_element;
1238 unsigned int sig_bit;
1239 uint16_t desc_per_page;
1240
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001241 if (!osc) {
1242 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001243 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001244 }
1245
1246 /*
1247 * For LL, limit the number of host's tx descriptors to match
1248 * the number of target FW tx descriptors.
1249 * This simplifies the FW, by ensuring the host will never
1250 * download more tx descriptors than the target has space for.
1251 * The FW will drop/free low-priority tx descriptors when it
1252 * starts to run low, so that in theory the host should never
1253 * run out of tx descriptors.
1254 */
1255
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001256 /*
1257 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301258 * HL - wait for a HTT target credit initialization
1259 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001260 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301261 if (pdev->cfg.is_high_latency) {
1262 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001263
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301264 qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
1265 qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001266
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301267 pdev->tx_queue.rsrc_threshold_lo =
1268 ol_txrx_rsrc_threshold_lo(desc_pool_size);
1269 pdev->tx_queue.rsrc_threshold_hi =
1270 ol_txrx_rsrc_threshold_hi(desc_pool_size);
1271
1272 for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
1273 qdf_atomic_init(&pdev->txq_grps[i].credit);
1274
1275 ol_tx_target_credit_init(pdev, desc_pool_size);
1276 } else {
1277 qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
1278 &pdev->target_tx_credit);
1279 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1280 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001281
Nirav Shah76291962016-04-25 10:50:37 +05301282 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1283
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001284 setup_fastpath_ce_handles(osc, pdev);
1285
1286 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1287 if (ret)
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301288 goto htt_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001289
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001290 /* Attach micro controller data path offload resource */
1291 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1292 if (htt_ipa_uc_attach(pdev->htt_pdev))
Leo Chang376398b2015-10-23 14:19:02 -07001293 goto uc_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001294
Leo Chang376398b2015-10-23 14:19:02 -07001295 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301296 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301297 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -07001298 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1299 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1300 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301301 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001302 "Page alloc fail");
1303 goto page_alloc_fail;
1304 }
1305 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1306 pdev->tx_desc.offset_filter = desc_per_page - 1;
1307 /* Calculate page divider to find page number */
1308 sig_bit = 0;
1309 while (desc_per_page) {
1310 sig_bit++;
1311 desc_per_page = desc_per_page >> 1;
1312 }
1313 pdev->tx_desc.page_divider = (sig_bit - 1);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301314 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001315 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1316 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1317 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1318 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001319
1320 /*
1321 * Each SW tx desc (used only within the tx datapath SW) has a
1322 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1323 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1324 * desc now, to avoid doing it during time-critical transmit.
1325 */
1326 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -07001327 pdev->tx_desc.freelist =
1328 (union ol_tx_desc_list_elem_t *)
1329 (*pdev->tx_desc.desc_pages.cacheable_pages);
1330 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001331 for (i = 0; i < desc_pool_size; i++) {
1332 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001333 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301334 qdf_dma_addr_t frag_paddr = 0;
1335 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001336
Leo Chang376398b2015-10-23 14:19:02 -07001337 if (i == (desc_pool_size - 1))
1338 c_element->next = NULL;
1339 else
1340 c_element->next = (union ol_tx_desc_list_elem_t *)
1341 ol_tx_desc_find(pdev, i + 1);
1342
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001343 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001344 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301345 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001346 "%s: failed to alloc HTT tx desc (%d of %d)",
1347 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001348 fail_idx = i;
1349 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001350 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001351
Leo Chang376398b2015-10-23 14:19:02 -07001352 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001353 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001354 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001355 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001356 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301357 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001358 "%s: failed to alloc HTT frag dsc (%d/%d)",
1359 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001360 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001361 fail_idx = i;
1362 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001363 }
Leo Chang376398b2015-10-23 14:19:02 -07001364 if (!ret && htt_frag_desc) {
1365 /* Initialize the first 6 words (TSO flags)
1366 of the frag descriptor */
1367 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1368 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001369 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001370 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001371#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001372 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001373#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001374 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001375 0xffffffff;
1376#endif
1377#endif
Leo Chang376398b2015-10-23 14:19:02 -07001378 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301379 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001380 c_element = c_element->next;
1381 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382 }
1383
1384 /* link SW tx descs into a freelist */
1385 pdev->tx_desc.num_free = desc_pool_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001386 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
1387 "%s first tx_desc:0x%p Last tx desc:0x%p\n", __func__,
1388 (uint32_t *) pdev->tx_desc.freelist,
1389 (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001390
1391 /* check what format of frames are expected to be delivered by the OS */
1392 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1393 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1394 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1395 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1396 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1397 pdev->htt_pkt_type = htt_pkt_type_eth2;
1398 else
1399 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1400 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301401 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001402 "%s Invalid standard frame type: %d",
1403 __func__, pdev->frame_format);
Leo Chang376398b2015-10-23 14:19:02 -07001404 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001405 }
1406
1407 /* setup the global rx defrag waitlist */
1408 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1409
1410 /* configure where defrag timeout and duplicate detection is handled */
1411 pdev->rx.flags.defrag_timeout_check =
1412 pdev->rx.flags.dup_check =
1413 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1414
1415#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1416 /* Need to revisit this part. Currently,hardcode to riva's caps */
1417 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1418 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1419 /*
1420 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1421 * header for non-first subframe of A-MSDU.
1422 */
1423 pdev->sw_subfrm_hdr_recovery_enable = 1;
1424 /*
1425 * The Riva HW doesn't have the capability to set Protected Frame bit
1426 * in the MAC header for encrypted data frame.
1427 */
1428 pdev->sw_pf_proc_enable = 1;
1429
1430 if (pdev->frame_format == wlan_frm_fmt_802_3) {
1431 /* sw llc process is only needed in
1432 802.3 to 802.11 transform case */
1433 pdev->sw_tx_llc_proc_enable = 1;
1434 pdev->sw_rx_llc_proc_enable = 1;
1435 } else {
1436 pdev->sw_tx_llc_proc_enable = 0;
1437 pdev->sw_rx_llc_proc_enable = 0;
1438 }
1439
1440 switch (pdev->frame_format) {
1441 case wlan_frm_fmt_raw:
1442 pdev->sw_tx_encap =
1443 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1444 ? 0 : 1;
1445 pdev->sw_rx_decap =
1446 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1447 ? 0 : 1;
1448 break;
1449 case wlan_frm_fmt_native_wifi:
1450 pdev->sw_tx_encap =
1451 pdev->
1452 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1453 ? 0 : 1;
1454 pdev->sw_rx_decap =
1455 pdev->
1456 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1457 ? 0 : 1;
1458 break;
1459 case wlan_frm_fmt_802_3:
1460 pdev->sw_tx_encap =
1461 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1462 ? 0 : 1;
1463 pdev->sw_rx_decap =
1464 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1465 ? 0 : 1;
1466 break;
1467 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301468 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001469 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1470 pdev->frame_format,
1471 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Leo Chang376398b2015-10-23 14:19:02 -07001472 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001473 }
1474#endif
1475
1476 /*
1477 * Determine what rx processing steps are done within the host.
1478 * Possibilities:
1479 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1480 * (This is unlikely; even if the target is doing rx->tx forwarding,
1481 * the host should be doing rx->tx forwarding too, as a back up for
1482 * the target's rx->tx forwarding, in case the target runs short on
1483 * memory, and can't store rx->tx frames that are waiting for
1484 * missing prior rx frames to arrive.)
1485 * 2. Just rx -> tx forwarding.
1486 * This is the typical configuration for HL, and a likely
1487 * configuration for LL STA or small APs (e.g. retail APs).
1488 * 3. Both PN check and rx -> tx forwarding.
1489 * This is the typical configuration for large LL APs.
1490 * Host-side PN check without rx->tx forwarding is not a valid
1491 * configuration, since the PN check needs to be done prior to
1492 * the rx->tx forwarding.
1493 */
1494 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
1495 /* PN check, rx-tx forwarding and rx reorder is done by
1496 the target */
1497 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1498 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1499 else
1500 pdev->rx_opt_proc = ol_rx_fwd_check;
1501 } else {
1502 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1503 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1504 /*
1505 * PN check done on host,
1506 * rx->tx forwarding not done at all.
1507 */
1508 pdev->rx_opt_proc = ol_rx_pn_check_only;
1509 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1510 /*
1511 * Both PN check and rx->tx forwarding done
1512 * on host.
1513 */
1514 pdev->rx_opt_proc = ol_rx_pn_check;
1515 } else {
1516#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1517"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301518 QDF_TRACE(QDF_MODULE_ID_TXRX,
1519 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001520 "%s: %s", __func__, TRACESTR01);
1521#undef TRACESTR01
Leo Chang376398b2015-10-23 14:19:02 -07001522 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001523 }
1524 } else {
1525 /* PN check done on target */
1526 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1527 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1528 /*
1529 * rx->tx forwarding done on host (possibly as
1530 * back-up for target-side primary rx->tx
1531 * forwarding)
1532 */
1533 pdev->rx_opt_proc = ol_rx_fwd_check;
1534 } else {
1535 /* rx->tx forwarding either done in target,
1536 * or not done at all */
1537 pdev->rx_opt_proc = ol_rx_deliver;
1538 }
1539 }
1540 }
1541
1542 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301543 qdf_spinlock_create(&pdev->tx_mutex);
1544 qdf_spinlock_create(&pdev->peer_ref_mutex);
1545 qdf_spinlock_create(&pdev->rx.mutex);
1546 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001547 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001548 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1549
1550 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK)
Leo Chang376398b2015-10-23 14:19:02 -07001551 goto reorder_trace_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001552
1553 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK)
Leo Chang376398b2015-10-23 14:19:02 -07001554 goto pn_trace_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001555
1556#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1557 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1558#endif
1559
1560 /*
1561 * WDI event attach
1562 */
1563 wdi_event_attach(pdev);
1564
1565 /*
1566 * Initialize rx PN check characteristics for different security types.
1567 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301568 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001569
1570 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1571 pdev->rx_pn[htt_sec_type_tkip].len =
1572 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1573 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1574 pdev->rx_pn[htt_sec_type_tkip].cmp =
1575 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1576 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1577
1578 /* WAPI: 128-bit PN */
1579 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1580 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1581
1582 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1583
1584 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Created pdev %p\n", pdev);
1585
1586 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1587
1588#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1589#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1590
1591/* #if 1 -- TODO: clean this up */
1592#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1593 /* avg = 100% * new + 0% * old */ \
1594 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1595/*
1596#else
1597#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1598 //avg = 25% * new + 25% * old
1599 (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1600#endif
1601*/
1602 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1603 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1604#endif
1605
1606 ol_txrx_local_peer_id_pool_init(pdev);
1607
1608 pdev->cfg.ll_pause_txq_limit =
1609 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1610
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301611 /* TX flow control for peer who is in very bad link status */
1612 ol_tx_badpeer_flow_cl_init(pdev);
1613
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001614#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301615 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301616 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001617
1618 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301619 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001620 {
1621 uint32_t bin_width_1000ticks;
1622 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301623 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001624 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1625 * 1000);
1626 /*
1627 * Compute a factor and shift that together are equal to the
1628 * inverse of the bin_width time, so that rather than dividing
1629 * by the bin width time, approximately the same result can be
1630 * obtained much more efficiently by a multiply + shift.
1631 * multiply_factor >> shift = 1 / bin_width_time, so
1632 * multiply_factor = (1 << shift) / bin_width_time.
1633 *
1634 * Pick the shift semi-arbitrarily.
1635 * If we knew statically what the bin_width would be, we could
1636 * choose a shift that minimizes the error.
1637 * Since the bin_width is determined dynamically, simply use a
1638 * shift that is about half of the uint32_t size. This should
1639 * result in a relatively large multiplier value, which
1640 * minimizes error from rounding the multiplier to an integer.
1641 * The rounding error only becomes significant if the tick units
1642 * are on the order of 1 microsecond. In most systems, it is
1643 * expected that the tick units will be relatively low-res,
1644 * on the order of 1 millisecond. In such systems the rounding
1645 * error is negligible.
1646 * It would be more accurate to dynamically try out different
1647 * shifts and choose the one that results in the smallest
1648 * rounding error, but that extra level of fidelity is
1649 * not needed.
1650 */
1651 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1652 pdev->tx_delay.hist_internal_bin_width_mult =
1653 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1654 1000 + (bin_width_1000ticks >> 1)) /
1655 bin_width_1000ticks;
1656 }
1657#endif /* QCA_COMPUTE_TX_DELAY */
1658
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001659 /* Thermal Mitigation */
1660 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001661
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001662 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001663
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301664 ol_tso_num_seg_list_init(pdev, desc_pool_size);
1665
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001666 ol_tx_register_flow_control(pdev);
1667
1668 return 0; /* success */
1669
Leo Chang376398b2015-10-23 14:19:02 -07001670pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001671 OL_RX_REORDER_TRACE_DETACH(pdev);
1672
Leo Chang376398b2015-10-23 14:19:02 -07001673reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301674 qdf_spinlock_destroy(&pdev->tx_mutex);
1675 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1676 qdf_spinlock_destroy(&pdev->rx.mutex);
1677 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301678 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001679 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1680
Leo Chang376398b2015-10-23 14:19:02 -07001681control_init_fail:
1682desc_alloc_fail:
1683 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001684 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001685 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001686
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301687 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001688 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001689
Leo Chang376398b2015-10-23 14:19:02 -07001690page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001691 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1692 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001693uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001694 htt_detach(pdev->htt_pdev);
Himanshu Agarwalf8f43a72017-03-24 15:27:29 +05301695htt_attach_fail:
1696 ol_tx_desc_dup_detect_deinit(pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001697ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001698 return ret; /* fail */
1699}
1700
Dhanashri Atre12a08392016-02-17 13:10:34 -08001701/**
1702 * ol_txrx_pdev_attach_target() - send target configuration
1703 *
1704 * @pdev - the physical device being initialized
1705 *
1706 * The majority of the data SW setup are done by the pdev_attach
1707 * functions, but this function completes the data SW setup by
1708 * sending datapath configuration messages to the target.
1709 *
1710 * Return: 0 - success 1 - failure
1711 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001712static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001713{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001714 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08001715 return htt_attach_target(pdev->htt_pdev) == A_OK ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001716}
1717
Dhanashri Atre12a08392016-02-17 13:10:34 -08001718/**
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301719 * ol_txrx_pdev_pre_detach() - detach the data SW state
Dhanashri Atre12a08392016-02-17 13:10:34 -08001720 * @pdev - the data physical device object being removed
1721 * @force - delete the pdev (and its vdevs and peers) even if
1722 * there are outstanding references by the target to the vdevs
1723 * and peers within the pdev
1724 *
1725 * This function is used when the WLAN driver is being removed to
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301726 * detach the host data component within the driver.
Dhanashri Atre12a08392016-02-17 13:10:34 -08001727 *
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301728 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08001729 */
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301730static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001731{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001732 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001733 int i;
Leo Chang376398b2015-10-23 14:19:02 -07001734
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001735 /* preconditions */
1736 TXRX_ASSERT2(pdev);
1737
1738 /* check that the pdev has no vdevs allocated */
1739 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1740
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001741#ifdef QCA_SUPPORT_TX_THROTTLE
1742 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301743 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1744 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001745#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301746 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1747 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001748#endif
1749#endif
Leo Chang376398b2015-10-23 14:19:02 -07001750 ol_tso_seg_list_deinit(pdev);
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301751 ol_tso_num_seg_list_deinit(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001752
1753 if (force) {
1754 /*
1755 * The assertion above confirms that all vdevs within this pdev
1756 * were detached. However, they may not have actually been
1757 * deleted.
1758 * If the vdev had peers which never received a PEER_UNMAP msg
1759 * from the target, then there are still zombie peer objects,
1760 * and the vdev parents of the zombie peers are also zombies,
1761 * hanging around until their final peer gets deleted.
1762 * Go through the peer hash table and delete any peers left.
1763 * As a side effect, this will complete the deletion of any
1764 * vdevs that are waiting for their peers to finish deletion.
1765 */
1766 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Force delete for pdev %p\n",
1767 pdev);
1768 ol_txrx_peer_find_hash_erase(pdev);
1769 }
1770
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301771 /* to get flow pool status before freeing descs */
1772 ol_tx_dump_flow_pool_info();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001773
1774 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1775 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001776 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001777
Leo Chang376398b2015-10-23 14:19:02 -07001778 tx_desc = ol_tx_desc_find(pdev, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001779 /*
1780 * Confirm that each tx descriptor is "empty", i.e. it has
1781 * no tx frame attached.
1782 * In particular, check that there are no frames that have
1783 * been given to the target to transmit, for which the
1784 * target has never provided a response.
1785 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301786 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001787 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
1788 "Warning: freeing tx frame (no compltn)\n");
1789 ol_tx_desc_frame_free_nonstd(pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001790 tx_desc, 1);
1791 }
Leo Chang376398b2015-10-23 14:19:02 -07001792 htt_tx_desc = tx_desc->htt_tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001793 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1794 }
1795
Himanshu Agarwal749e0f22016-10-26 21:12:59 +05301796 ol_tx_deregister_flow_control(pdev);
1797 /* Stop the communication between HTT and target at first */
1798 htt_detach_target(pdev->htt_pdev);
1799
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301800 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001801 &pdev->tx_desc.desc_pages, 0, true);
1802 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001803
1804 /* Detach micro controller data path offload resource */
1805 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1806 htt_ipa_uc_detach(pdev->htt_pdev);
1807
1808 htt_detach(pdev->htt_pdev);
Nirav Shah76291962016-04-25 10:50:37 +05301809 ol_tx_desc_dup_detect_deinit(pdev);
1810
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301811 qdf_spinlock_destroy(&pdev->tx_mutex);
1812 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1813 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1814 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001815 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001816#ifdef QCA_SUPPORT_TX_THROTTLE
1817 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301818 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001819#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301820
1821 /* TX flow control for peer who is in very bad link status */
1822 ol_tx_badpeer_flow_cl_deinit(pdev);
1823
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001824 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1825
1826 OL_RX_REORDER_TRACE_DETACH(pdev);
1827 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301828
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001829 /*
1830 * WDI event detach
1831 */
1832 wdi_event_detach(pdev);
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301833
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001834 ol_txrx_local_peer_id_cleanup(pdev);
1835
1836#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301837 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001838#endif
Houston Hoffmane5ec0492017-01-30 12:28:32 -08001839 qdf_mem_free(ppdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001840}
1841
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05301842/**
1843 * ol_txrx_pdev_detach() - delete the data SW state
1844 * @ppdev - the data physical device object being removed
1845 * @force - delete the pdev (and its vdevs and peers) even if
1846 * there are outstanding references by the target to the vdevs
1847 * and peers within the pdev
1848 *
1849 * This function is used when the WLAN driver is being removed to
1850 * remove the host data component within the driver.
1851 * All virtual devices within the physical device need to be deleted
1852 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1853 *
1854 * Return: None
1855 */
1856static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
1857{
1858 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
1859
1860 /*checking to ensure txrx pdev structure is not NULL */
1861 if (!pdev) {
1862 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
1863 "NULL pdev passed to %s\n", __func__);
1864 return;
1865 }
1866
1867 htt_pktlogmod_exit(pdev);
1868
1869 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
1870
1871 if (pdev->cfg.is_high_latency)
1872 ol_tx_sched_detach(pdev);
1873
1874 htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
1875
1876 htt_pdev_free(pdev->htt_pdev);
1877 ol_txrx_peer_find_detach(pdev);
1878 ol_txrx_tso_stats_deinit(pdev);
1879
1880 ol_txrx_pdev_txq_log_destroy(pdev);
1881 ol_txrx_pdev_grp_stat_destroy(pdev);
1882}
1883
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301884#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
1885
1886/**
1887 * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
1888 * @vdev: the virtual device object
1889 *
1890 * Return: None
1891 */
1892static inline void
1893ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
1894{
1895 qdf_atomic_init(&vdev->tx_desc_count);
1896}
1897#else
1898
1899static inline void
1900ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
1901{
1902 return;
1903}
1904#endif
1905
Dhanashri Atre12a08392016-02-17 13:10:34 -08001906/**
1907 * ol_txrx_vdev_attach - Allocate and initialize the data object
1908 * for a new virtual device.
1909 *
1910 * @data_pdev - the physical device the virtual device belongs to
1911 * @vdev_mac_addr - the MAC address of the virtual device
1912 * @vdev_id - the ID used to identify the virtual device to the target
1913 * @op_mode - whether this virtual device is operating as an AP,
1914 * an IBSS, or a STA
1915 *
1916 * Return: success: handle to new data vdev object, failure: NULL
1917 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001918static struct cdp_vdev *
1919ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001920 uint8_t *vdev_mac_addr,
1921 uint8_t vdev_id, enum wlan_op_mode op_mode)
1922{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001923 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001924 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001925 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001926
1927 /* preconditions */
1928 TXRX_ASSERT2(pdev);
1929 TXRX_ASSERT2(vdev_mac_addr);
1930
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301931 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001932 if (!vdev)
1933 return NULL; /* failure */
1934
1935 /* store provided params */
1936 vdev->pdev = pdev;
1937 vdev->vdev_id = vdev_id;
1938 vdev->opmode = op_mode;
1939
1940 vdev->delete.pending = 0;
1941 vdev->safemode = 0;
1942 vdev->drop_unenc = 1;
1943 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301944 vdev->fwd_tx_packets = 0;
1945 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001946
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301947 ol_txrx_vdev_tx_desc_cnt_init(vdev);
1948
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301949 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001950 OL_TXRX_MAC_ADDR_LEN);
1951
1952 TAILQ_INIT(&vdev->peer_list);
1953 vdev->last_real_peer = NULL;
1954
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001955 ol_txrx_hl_tdls_flag_reset((struct cdp_vdev *)vdev, false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301956
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001957#ifdef QCA_IBSS_SUPPORT
1958 vdev->ibss_peer_num = 0;
1959 vdev->ibss_peer_heart_beat_timer = 0;
1960#endif
1961
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301962 ol_txrx_vdev_txqs_init(vdev);
1963
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301964 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001965 vdev->ll_pause.paused_reason = 0;
1966 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
1967 vdev->ll_pause.txq.depth = 0;
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301968 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001969 &vdev->ll_pause.timer,
1970 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301971 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301972 qdf_atomic_init(&vdev->os_q_paused);
1973 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001974 vdev->tx_fl_lwm = 0;
1975 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001976 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001977 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301978 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001979 vdev->osif_flow_control_cb = NULL;
1980 vdev->osif_fc_ctx = NULL;
1981
1982 /* Default MAX Q depth for every VDEV */
1983 vdev->ll_pause.max_q_depth =
1984 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001985 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001986 /* add this vdev into the pdev's list */
1987 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
1988
1989 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
1990 "Created vdev %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
1991 vdev,
1992 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1993 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1994 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1995
1996 /*
1997 * We've verified that htt_op_mode == wlan_op_mode,
1998 * so no translation is needed.
1999 */
2000 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
2001
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002002 return (struct cdp_vdev *)vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002003}
2004
Dhanashri Atre12a08392016-02-17 13:10:34 -08002005/**
2006 *ol_txrx_vdev_register - Link a vdev's data object with the
2007 * matching OS shim vdev object.
2008 *
2009 * @txrx_vdev: the virtual device's data object
2010 * @osif_vdev: the virtual device's OS shim object
2011 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
2012 *
2013 * The data object for a virtual device is created by the
2014 * function ol_txrx_vdev_attach. However, rather than fully
2015 * linking the data vdev object with the vdev objects from the
2016 * other subsystems that the data vdev object interacts with,
2017 * the txrx_vdev_attach function focuses primarily on creating
2018 * the data vdev object. After the creation of both the data
2019 * vdev object and the OS shim vdev object, this
2020 * txrx_osif_vdev_attach function is used to connect the two
2021 * vdev objects, so the data SW can use the OS shim vdev handle
2022 * when passing rx data received by a vdev up to the OS shim.
2023 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002024static void ol_txrx_vdev_register(struct cdp_vdev *pvdev,
2025 void *osif_vdev,
2026 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002027{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002028 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002029 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
2030 qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
2031 qdf_assert(0);
2032 return;
2033 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002034
Dhanashri Atre41c0d282016-06-28 14:09:59 -07002035 vdev->osif_dev = osif_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002036 vdev->rx = txrx_ops->rx.rx;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08002037 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002038}
2039
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002040#ifdef currently_unused
Dhanashri Atre12a08392016-02-17 13:10:34 -08002041/**
2042 * ol_txrx_set_curchan - Setup the current operating channel of
2043 * the device
2044 * @pdev - the data physical device object
2045 * @chan_mhz - the channel frequency (mhz) packets on
2046 *
2047 * Mainly used when populating monitor mode status that requires
2048 * the current operating channel
2049 *
2050 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002051void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
2052{
2053 return;
2054}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002055#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002056
2057void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
2058{
2059 vdev->safemode = val;
2060}
2061
Dhanashri Atre12a08392016-02-17 13:10:34 -08002062/**
2063 * ol_txrx_set_privacy_filters - set the privacy filter
2064 * @vdev - the data virtual device object
2065 * @filter - filters to be set
2066 * @num - the number of filters
2067 *
2068 * Rx related. Set the privacy filters. When rx packets, check
2069 * the ether type, filter type and packet type to decide whether
2070 * discard these packets.
2071 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002072static void
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002073ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
2074 void *filters, uint32_t num)
2075{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302076 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002077 num * sizeof(struct privacy_exemption));
2078 vdev->num_filters = num;
2079}
2080
2081void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
2082{
2083 vdev->drop_unenc = val;
2084}
2085
gbian016a42e2017-03-01 18:49:11 +08002086#if defined(CONFIG_HL_SUPPORT)
2087
2088static void
2089ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2090{
2091 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2092 int i;
2093 struct ol_tx_desc_t *tx_desc;
2094
2095 qdf_spin_lock_bh(&pdev->tx_mutex);
2096 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2097 tx_desc = ol_tx_desc_find(pdev, i);
2098 if (tx_desc->vdev == vdev)
2099 tx_desc->vdev = NULL;
2100 }
2101 qdf_spin_unlock_bh(&pdev->tx_mutex);
2102}
2103
2104#else
2105
2106static void
2107ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2108{
2109
2110}
2111
2112#endif
2113
Dhanashri Atre12a08392016-02-17 13:10:34 -08002114/**
2115 * ol_txrx_vdev_detach - Deallocate the specified data virtual
2116 * device object.
2117 * @data_vdev: data object for the virtual device in question
2118 * @callback: function to call (if non-NULL) once the vdev has
2119 * been wholly deleted
2120 * @callback_context: context to provide in the callback
2121 *
2122 * All peers associated with the virtual device need to be deleted
2123 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
2124 * However, for the peers to be fully deleted, the peer deletion has to
2125 * percolate through the target data FW and back up to the host data SW.
2126 * Thus, even though the host control SW may have issued a peer_detach
2127 * call for each of the vdev's peers, the peer objects may still be
2128 * allocated, pending removal of all references to them by the target FW.
2129 * In this case, though the vdev_detach function call will still return
2130 * immediately, the vdev itself won't actually be deleted, until the
2131 * deletions of all its peers complete.
2132 * The caller can provide a callback function pointer to be notified when
2133 * the vdev deletion actually happens - whether it's directly within the
2134 * vdev_detach call, or if it's deferred until all in-progress peer
2135 * deletions have completed.
2136 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002137static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002138ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002139 ol_txrx_vdev_delete_cb callback, void *context)
2140{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002141 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002142 struct ol_txrx_pdev_t *pdev = vdev->pdev;
2143
2144 /* preconditions */
2145 TXRX_ASSERT2(vdev);
2146
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302147 ol_txrx_vdev_tx_queue_free(vdev);
2148
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302149 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05302150 qdf_timer_stop(&vdev->ll_pause.timer);
2151 qdf_timer_free(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002152 vdev->ll_pause.is_q_timer_on = false;
2153 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302154 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
2155 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
2156 qdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
Anurag Chouhandf2b2682016-02-29 14:15:27 +05302157 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302158 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002159 vdev->ll_pause.txq.head = next;
2160 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302161 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
2162 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002163
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302164 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002165 vdev->osif_flow_control_cb = NULL;
2166 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302167 qdf_spin_unlock_bh(&vdev->flow_control_lock);
2168 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002169
2170 /* remove the vdev from its parent pdev's list */
2171 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
2172
2173 /*
2174 * Use peer_ref_mutex while accessing peer_list, in case
2175 * a peer is in the process of being removed from the list.
2176 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302177 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002178 /* check that the vdev has no peers allocated */
2179 if (!TAILQ_EMPTY(&vdev->peer_list)) {
2180 /* debug print - will be removed later */
2181 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302182 "%s: not deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002183 __func__, vdev,
2184 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2185 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2186 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2187 /* indicate that the vdev needs to be deleted */
2188 vdev->delete.pending = 1;
2189 vdev->delete.callback = callback;
2190 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302191 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002192 return;
2193 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302194 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002195 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002196
2197 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
2198 "%s: deleting vdev obj %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
2199 __func__, vdev,
2200 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
2201 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
2202 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
2203
2204 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2205
2206 /*
gbian016a42e2017-03-01 18:49:11 +08002207 * The ol_tx_desc_free might access the invalid content of vdev referred
2208 * by tx desc, since this vdev might be detached in another thread
2209 * asynchronous.
2210 *
2211 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2212 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2213 * to avoid crash.
2214 *
2215 */
2216 ol_txrx_tx_desc_reset_vdev(vdev);
2217
2218 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002219 * Doesn't matter if there are outstanding tx frames -
2220 * they will be freed once the target sends a tx completion
2221 * message for them.
2222 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302223 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002224 if (callback)
2225 callback(context);
2226}
2227
2228/**
2229 * ol_txrx_flush_rx_frames() - flush cached rx frames
2230 * @peer: peer
2231 * @drop: set flag to drop frames
2232 *
2233 * Return: None
2234 */
2235void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302236 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002237{
2238 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302239 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002240 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002241
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302242 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2243 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002244 return;
2245 }
2246
Dhanashri Atre182b0272016-02-17 15:35:07 -08002247 qdf_assert(peer->vdev);
2248
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302249 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre182b0272016-02-17 15:35:07 -08002250
Dhanashri Atre50141c52016-04-07 13:15:29 -07002251 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002252 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002253 else
2254 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302255 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002256
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302257 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002258 cache_buf = list_entry((&peer->cached_bufq)->next,
2259 typeof(*cache_buf), list);
2260 while (!list_empty(&peer->cached_bufq)) {
2261 list_del(&cache_buf->list);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302262 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002263 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302264 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002265 } else {
2266 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002267 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302268 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302269 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002270 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302271 qdf_mem_free(cache_buf);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302272 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002273 cache_buf = list_entry((&peer->cached_bufq)->next,
2274 typeof(*cache_buf), list);
2275 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302276 qdf_spin_unlock_bh(&peer->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302277 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002278}
2279
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07002280static void ol_txrx_flush_cache_rx_queue(void)
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302281{
2282 uint8_t sta_id;
2283 struct ol_txrx_peer_t *peer;
2284 struct ol_txrx_pdev_t *pdev;
2285
2286 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2287 if (!pdev)
2288 return;
2289
2290 for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT; sta_id++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002291 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
2292 sta_id);
Poddar, Siddartha78cac32016-12-29 20:08:34 +05302293 if (!peer)
2294 continue;
2295 ol_txrx_flush_rx_frames(peer, 1);
2296 }
2297}
2298
Dhanashri Atre12a08392016-02-17 13:10:34 -08002299/**
2300 * ol_txrx_peer_attach - Allocate and set up references for a
2301 * data peer object.
2302 * @data_pdev: data physical device object that will indirectly
2303 * own the data_peer object
2304 * @data_vdev - data virtual device object that will directly
2305 * own the data_peer object
2306 * @peer_mac_addr - MAC address of the new peer
2307 *
2308 * When an association with a peer starts, the host's control SW
2309 * uses this function to inform the host data SW.
2310 * The host data SW allocates its own peer object, and stores a
2311 * reference to the control peer object within the data peer object.
2312 * The host data SW also stores a reference to the virtual device
2313 * that the peer is associated with. This virtual device handle is
2314 * used when the data SW delivers rx data frames to the OS shim layer.
2315 * The host data SW returns a handle to the new peer data object,
2316 * so a reference within the control peer object can be set to the
2317 * data peer object.
2318 *
2319 * Return: handle to new data peer object, or NULL if the attach
2320 * fails
2321 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002322static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002323ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002324{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002325 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002326 struct ol_txrx_peer_t *peer;
2327 struct ol_txrx_peer_t *temp_peer;
2328 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002329 bool wait_on_deletion = false;
2330 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002331 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002332
2333 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002334 TXRX_ASSERT2(vdev);
2335 TXRX_ASSERT2(peer_mac_addr);
2336
Dhanashri Atre12a08392016-02-17 13:10:34 -08002337 pdev = vdev->pdev;
2338 TXRX_ASSERT2(pdev);
2339
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302340 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002341 /* check for duplicate exsisting peer */
2342 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2343 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2344 (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
2345 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
2346 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exsist.\n",
2347 vdev->vdev_id,
2348 peer_mac_addr[0], peer_mac_addr[1],
2349 peer_mac_addr[2], peer_mac_addr[3],
2350 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302351 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002352 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002353 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002354 wait_on_deletion = true;
2355 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302356 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002357 return NULL;
2358 }
2359 }
2360 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302361 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002362
2363 if (wait_on_deletion) {
2364 /* wait for peer deletion */
Anurag Chouhance0dc992016-02-16 18:18:03 +05302365 rc = qdf_wait_single_event(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002366 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002367 if (QDF_STATUS_SUCCESS != rc) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002368 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002369 "error waiting for peer(%d) deletion, status %d\n",
2370 vdev->wait_on_peer_id, (int) rc);
Deepak Dhamdhere4835a5b2016-10-07 18:08:52 -07002371 /* Added for debugging only */
2372 QDF_BUG(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002373 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2374 return NULL;
2375 }
2376 }
2377
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302378 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002379 if (!peer)
2380 return NULL; /* failure */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002381
2382 /* store provided params */
2383 peer->vdev = vdev;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302384 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002385 OL_TXRX_MAC_ADDR_LEN);
2386
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302387 ol_txrx_peer_txqs_init(pdev, peer);
2388
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002389 INIT_LIST_HEAD(&peer->cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302390 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002391 /* add this peer into the vdev's list */
2392 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302393 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002394 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
2395 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr))
2396 vdev->last_real_peer = peer;
2397
2398 peer->rx_opt_proc = pdev->rx_opt_proc;
2399
2400 ol_rx_peer_init(pdev, peer);
2401
2402 /* initialize the peer_id */
2403 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2404 peer->peer_ids[i] = HTT_INVALID_PEER;
2405
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302406 qdf_spinlock_create(&peer->peer_info_lock);
2407 qdf_spinlock_create(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002408
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302409 qdf_atomic_init(&peer->delete_in_progress);
2410 qdf_atomic_init(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002411
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302412 qdf_atomic_init(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002413
2414 /* keep one reference for attach */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302415 qdf_atomic_inc(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002416
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002417 /*
2418 * Set a flag to indicate peer create is pending in firmware and
2419 * increment ref_cnt so that peer will not get deleted while
2420 * peer create command is pending in firmware.
2421 * First peer_map event from firmware signifies successful
2422 * peer creation and it will be decremented in peer_map handling.
2423 */
2424 qdf_atomic_init(&peer->fw_create_pending);
2425 qdf_atomic_set(&peer->fw_create_pending, 1);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302426 qdf_atomic_inc(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002427
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08002428
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002429 peer->valid = 1;
2430
2431 ol_txrx_peer_find_hash_add(pdev, peer);
2432
Mohit Khanna47384bc2016-08-15 15:37:05 -07002433 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2434 "vdev %p created peer %p ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
2435 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002436 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2437 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2438 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2439 /*
2440 * For every peer MAp message search and set if bss_peer
2441 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002442 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2443 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002444 peer->bss_peer = 1;
2445
2446 /*
2447 * The peer starts in the "disc" state while association is in progress.
2448 * Once association completes, the peer will get updated to "auth" state
2449 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2450 * or else to the "conn" state. For non-open mode, the peer will
2451 * progress to "auth" state once the authentication completes.
2452 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002453 peer->state = OL_TXRX_PEER_STATE_INVALID;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002454 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002455 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002456
2457#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2458 peer->rssi_dbm = HTT_RSSI_INVALID;
2459#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002460 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2461 !pdev->self_peer) {
2462 pdev->self_peer = peer;
2463 /*
2464 * No Tx in monitor mode, otherwise results in target assert.
2465 * Setting disable_intrabss_fwd to true
2466 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002467 ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002468 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002469
2470 ol_txrx_local_peer_id_alloc(pdev, peer);
2471
Leo Chang98726762016-10-28 11:07:18 -07002472 return (void *)peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002473}
2474
2475/*
2476 * Discarding tx filter - removes all data frames (disconnected state)
2477 */
2478static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2479{
2480 return A_ERROR;
2481}
2482
2483/*
2484 * Non-autentication tx filter - filters out data frames that are not
2485 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2486 * data frames (connected state)
2487 */
2488static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2489{
2490 return
2491 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2492 tx_msdu_info->htt.info.ethertype ==
2493 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2494}
2495
2496/*
2497 * Pass-through tx filter - lets all data frames through (authenticated state)
2498 */
2499static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2500{
2501 return A_OK;
2502}
2503
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002504/**
2505 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2506 * @peer: handle to peer
2507 *
2508 * returns mac addrs for module which do not know peer type
2509 *
2510 * Return: the mac_addr from peer
2511 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002512static uint8_t *
Leo Chang98726762016-10-28 11:07:18 -07002513ol_txrx_peer_get_peer_mac_addr(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002514{
Leo Chang98726762016-10-28 11:07:18 -07002515 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002516 if (!peer)
2517 return NULL;
2518
2519 return peer->mac_addr.raw;
2520}
2521
Abhishek Singhcfb44482017-03-10 12:42:37 +05302522#ifdef WLAN_FEATURE_11W
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002523/**
2524 * ol_txrx_get_pn_info() - Returns pn info from peer
2525 * @peer: handle to peer
2526 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2527 * @last_pn: return last_rmf_pn value from peer.
2528 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2529 *
2530 * Return: NONE
2531 */
2532void
Leo Chang98726762016-10-28 11:07:18 -07002533ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002534 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2535{
Leo Chang98726762016-10-28 11:07:18 -07002536 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002537 *last_pn_valid = &peer->last_rmf_pn_valid;
2538 *last_pn = &peer->last_rmf_pn;
2539 *rmf_pn_replays = &peer->rmf_pn_replays;
2540}
Abhishek Singhcfb44482017-03-10 12:42:37 +05302541#else
2542void
2543ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
2544 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2545{
2546}
2547#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002548
2549/**
2550 * ol_txrx_get_opmode() - Return operation mode of vdev
2551 * @vdev: vdev handle
2552 *
2553 * Return: operation mode.
2554 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002555static int ol_txrx_get_opmode(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002556{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002557 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002558 return vdev->opmode;
2559}
2560
2561/**
2562 * ol_txrx_get_peer_state() - Return peer state of peer
2563 * @peer: peer handle
2564 *
2565 * Return: return peer state
2566 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002567static int ol_txrx_get_peer_state(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002568{
Leo Chang98726762016-10-28 11:07:18 -07002569 ol_txrx_peer_handle peer = ppeer;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002570 return peer->state;
2571}
2572
2573/**
2574 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2575 * @peer: peer handle
2576 *
2577 * Return: vdev handle from peer
2578 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002579static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002580{
Leo Chang98726762016-10-28 11:07:18 -07002581 ol_txrx_peer_handle peer = ppeer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002582 return (struct cdp_vdev *)peer->vdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002583}
2584
2585/**
2586 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2587 * @vdev: vdev handle
2588 *
2589 * Return: vdev mac address
2590 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002591static uint8_t *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002592ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002593{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002594 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002595 if (!vdev)
2596 return NULL;
2597
2598 return vdev->mac_addr.raw;
2599}
2600
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002601#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002602/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002603 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002604 * vdev
2605 * @vdev: vdev handle
2606 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002607 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002608 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002609struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002610ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2611{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002612 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002613}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002614#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002615
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002616#ifdef currently_unused
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002617/**
2618 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2619 * @vdev: vdev handle
2620 *
2621 * Return: Handle to pdev
2622 */
2623ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2624{
2625 return vdev->pdev;
2626}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08002627#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002628
2629/**
2630 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2631 * @vdev: vdev handle
2632 *
2633 * Return: Handle to control pdev
2634 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002635static struct cdp_cfg *
2636ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002637{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002638 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
2639 return vdev->pdev->ctrl_pdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002640}
2641
2642/**
2643 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2644 * @vdev: vdev handle
2645 *
2646 * Return: Rx Fwd disabled status
2647 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002648static uint8_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002649ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002650{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002651 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002652 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2653 vdev->pdev->ctrl_pdev;
2654 return cfg->rx_fwd_disabled;
2655}
2656
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002657#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002658/**
2659 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2660 * @vdev: vdev handle
2661 * @peer_num_delta: peer nums to be adjusted
2662 *
2663 * Return: -1 for failure or total peer nums after adjustment.
2664 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002665static int16_t
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002666ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002667 int16_t peer_num_delta)
2668{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002669 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002670 int16_t new_peer_num;
2671
2672 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07002673 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002674 return OL_TXRX_INVALID_NUM_PEERS;
2675
2676 vdev->ibss_peer_num = new_peer_num;
2677
2678 return new_peer_num;
2679}
2680
2681/**
2682 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2683 * beat timer
2684 * @vdev: vdev handle
2685 * @timer_value_sec: new heart beat timer value
2686 *
2687 * Return: Old timer value set in vdev.
2688 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002689static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_vdev *pvdev,
2690 uint16_t timer_value_sec)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002691{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002692 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002693 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2694
2695 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2696
2697 return old_timer_value;
2698}
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002699#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002700
2701/**
2702 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
2703 * @vdev: vdev handle
2704 * @callback: callback function to remove the peer.
2705 * @callback_context: handle for callback function
2706 * @remove_last_peer: Does it required to last peer.
2707 *
2708 * Return: NONE
2709 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002710static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002711ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002712 ol_txrx_vdev_peer_remove_cb callback,
2713 void *callback_context, bool remove_last_peer)
2714{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002715 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002716 ol_txrx_peer_handle peer, temp;
2717 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002718 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002719
2720 temp = NULL;
2721 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
2722 peer_list_elem) {
2723 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002724 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2725 if (qdf_atomic_read(&temp->delete_in_progress) == 0) {
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002726 callback(callback_context, temp->mac_addr.raw,
2727 vdev->vdev_id, temp, false);
2728 }
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002729 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002730 }
2731 /* self peer is deleted last */
2732 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
2733 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
2734 "%s: self peer removed by caller ",
2735 __func__);
2736 break;
2737 } else
2738 temp = peer;
2739 }
2740
Mohit Khanna137b97d2016-04-21 16:11:33 -07002741 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2742
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002743 if (remove_last_peer) {
2744 /* remove IBSS bss peer last */
2745 peer = TAILQ_FIRST(&vdev->peer_list);
2746 callback(callback_context, (uint8_t *) &vdev->mac_addr,
2747 vdev->vdev_id, peer, false);
2748 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002749}
2750
2751/**
2752 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
2753 * @vdev: vdev handle
2754 * @callback: callback function to remove the peer.
2755 * @callback_context: handle for callback function
2756 *
2757 * Return: NONE
2758 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002759static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002760ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002761 ol_txrx_vdev_peer_remove_cb callback,
2762 void *callback_context)
2763{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002764 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002765 ol_txrx_peer_handle peer = NULL;
2766
2767 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2768 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
2769 "%s: peer found for vdev id %d. deleting the peer",
2770 __func__, vdev->vdev_id);
2771 callback(callback_context, (uint8_t *)&vdev->mac_addr,
2772 vdev->vdev_id, peer, false);
2773 }
2774}
2775
2776/**
2777 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2778 * @vdev: vdev handle
2779 * @ocb_set_chan: OCB channel information to be set in vdev.
2780 *
2781 * Return: NONE
2782 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002783static void ol_txrx_set_ocb_chan_info(struct cdp_vdev *pvdev,
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002784 struct ol_txrx_ocb_set_chan ocb_set_chan)
2785{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002786 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002787 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2788 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2789}
2790
2791/**
2792 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2793 * @vdev: vdev handle
2794 *
2795 * Return: handle to struct ol_txrx_ocb_chan_info
2796 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08002797static struct ol_txrx_ocb_chan_info *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002798ol_txrx_get_ocb_chan_info(struct cdp_vdev *pvdev)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002799{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002800 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002801 return vdev->ocb_channel_info;
2802}
2803
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002804/**
2805 * @brief specify the peer's authentication state
2806 * @details
2807 * Specify the peer's authentication state (none, connected, authenticated)
2808 * to allow the data SW to determine whether to filter out invalid data frames.
2809 * (In the "connected" state, where security is enabled, but authentication
2810 * has not completed, tx and rx data frames other than EAPOL or WAPI should
2811 * be discarded.)
2812 * This function is only relevant for systems in which the tx and rx filtering
2813 * are done in the host rather than in the target.
2814 *
2815 * @param data_peer - which peer has changed its state
2816 * @param state - the new state of the peer
2817 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002818 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002819 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002820QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002821 uint8_t *peer_mac,
2822 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002823{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002824 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002825 struct ol_txrx_peer_t *peer;
2826
Anurag Chouhanc5548422016-02-24 18:33:27 +05302827 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002828 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05302829 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302830 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002831 }
2832
2833 peer = ol_txrx_peer_find_hash_find(pdev, peer_mac, 0, 1);
2834 if (NULL == peer) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302835 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
2836 "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
2837 __func__,
2838 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2839 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302840 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002841 }
2842
2843 /* TODO: Should we send WMI command of the connection state? */
2844 /* avoid multiple auth state change. */
2845 if (peer->state == state) {
2846#ifdef TXRX_PRINT_VERBOSE_ENABLE
2847 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO3,
2848 "%s: no state change, returns directly\n",
2849 __func__);
2850#endif
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302851 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -07002852 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2853 "%s: peer %p peer->ref_cnt %d", __func__, peer,
2854 qdf_atomic_read(&peer->ref_cnt));
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302855 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002856 }
2857
2858 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: change from %d to %d\n",
2859 __func__, peer->state, state);
2860
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002861 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002862 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002863 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002864 ? ol_tx_filter_non_auth
2865 : ol_tx_filter_discard);
2866
2867 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002868 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002869 int tid;
2870 /*
2871 * Pause all regular (non-extended) TID tx queues until
2872 * data arrives and ADDBA negotiation has completed.
2873 */
2874 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
2875 "%s: pause peer and unpause mgmt/non-qos\n",
2876 __func__);
2877 ol_txrx_peer_pause(peer); /* pause all tx queues */
2878 /* unpause mgmt and non-QoS tx queues */
2879 for (tid = OL_TX_NUM_QOS_TIDS;
2880 tid < OL_TX_NUM_TIDS; tid++)
2881 ol_txrx_peer_tid_unpause(peer, tid);
2882 }
2883 }
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302884 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -07002885 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2886 "%s: peer %p peer->ref_cnt %d", __func__, peer,
2887 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002888 /* Set the state after the Pause to avoid the race condiction
2889 with ADDBA check in tx path */
2890 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302891 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002892}
2893
2894void
2895ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
2896{
2897 peer->keyinstalled = val;
2898}
2899
2900void
2901ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
2902 uint8_t *peer_mac,
2903 union ol_txrx_peer_update_param_t *param,
2904 enum ol_txrx_peer_update_select_t select)
2905{
2906 struct ol_txrx_peer_t *peer;
2907
2908 peer = ol_txrx_peer_find_hash_find(vdev->pdev, peer_mac, 0, 1);
2909 if (!peer) {
2910 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: peer is null",
2911 __func__);
2912 return;
2913 }
2914
2915 switch (select) {
2916 case ol_txrx_peer_update_qos_capable:
2917 {
2918 /* save qos_capable here txrx peer,
2919 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
2920 */
2921 peer->qos_capable = param->qos_capable;
2922 /*
2923 * The following function call assumes that the peer has a
2924 * single ID. This is currently true, and
2925 * is expected to remain true.
2926 */
2927 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
2928 peer->peer_ids[0],
2929 peer->qos_capable);
2930 break;
2931 }
2932 case ol_txrx_peer_update_uapsdMask:
2933 {
2934 peer->uapsd_mask = param->uapsd_mask;
2935 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
2936 peer->peer_ids[0],
2937 peer->uapsd_mask);
2938 break;
2939 }
2940 case ol_txrx_peer_update_peer_security:
2941 {
2942 enum ol_sec_type sec_type = param->sec_type;
2943 enum htt_sec_type peer_sec_type = htt_sec_type_none;
2944
2945 switch (sec_type) {
2946 case ol_sec_type_none:
2947 peer_sec_type = htt_sec_type_none;
2948 break;
2949 case ol_sec_type_wep128:
2950 peer_sec_type = htt_sec_type_wep128;
2951 break;
2952 case ol_sec_type_wep104:
2953 peer_sec_type = htt_sec_type_wep104;
2954 break;
2955 case ol_sec_type_wep40:
2956 peer_sec_type = htt_sec_type_wep40;
2957 break;
2958 case ol_sec_type_tkip:
2959 peer_sec_type = htt_sec_type_tkip;
2960 break;
2961 case ol_sec_type_tkip_nomic:
2962 peer_sec_type = htt_sec_type_tkip_nomic;
2963 break;
2964 case ol_sec_type_aes_ccmp:
2965 peer_sec_type = htt_sec_type_aes_ccmp;
2966 break;
2967 case ol_sec_type_wapi:
2968 peer_sec_type = htt_sec_type_wapi;
2969 break;
2970 default:
2971 peer_sec_type = htt_sec_type_none;
2972 break;
2973 }
2974
2975 peer->security[txrx_sec_ucast].sec_type =
2976 peer->security[txrx_sec_mcast].sec_type =
2977 peer_sec_type;
2978
2979 break;
2980 }
2981 default:
2982 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05302983 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002984 "ERROR: unknown param %d in %s", select,
2985 __func__);
2986 break;
2987 }
2988 }
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302989 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -07002990 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2991 "%s: peer %p peer->ref_cnt %d", __func__, peer,
2992 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002993}
2994
2995uint8_t
2996ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2997{
2998
2999 struct ol_txrx_peer_t *peer;
3000 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3001 if (peer)
3002 return peer->uapsd_mask;
3003 return 0;
3004}
3005
3006uint8_t
3007ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3008{
3009
3010 struct ol_txrx_peer_t *peer_t =
3011 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3012 if (peer_t != NULL)
3013 return peer_t->qos_capable;
3014 return 0;
3015}
3016
3017void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
3018{
3019 struct ol_txrx_vdev_t *vdev;
3020 struct ol_txrx_pdev_t *pdev;
3021 int i;
3022
3023 /* preconditions */
3024 TXRX_ASSERT2(peer);
3025
3026 vdev = peer->vdev;
3027 if (NULL == vdev) {
3028 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
3029 "The vdev is not present anymore\n");
3030 return;
3031 }
3032
3033 pdev = vdev->pdev;
3034 if (NULL == pdev) {
3035 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
3036 "The pdev is not present anymore\n");
3037 return;
3038 }
3039
3040 /*
3041 * Check for the reference count before deleting the peer
3042 * as we noticed that sometimes we are re-entering this
3043 * function again which is leading to dead-lock.
3044 * (A double-free should never happen, so assert if it does.)
3045 */
3046
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303047 if (0 == qdf_atomic_read(&(peer->ref_cnt))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003048 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
3049 "The Peer is not present anymore\n");
Anurag Chouhanc5548422016-02-24 18:33:27 +05303050 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003051 return;
3052 }
3053
3054 /*
3055 * Hold the lock all the way from checking if the peer ref count
3056 * is zero until the peer references are removed from the hash
3057 * table and vdev list (if the peer ref count is zero).
3058 * This protects against a new HL tx operation starting to use the
3059 * peer object just after this function concludes it's done being used.
3060 * Furthermore, the lock needs to be held while checking whether the
3061 * vdev's list of peers is empty, to make sure that list is not modified
3062 * concurrently with the empty check.
3063 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303064 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003065
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07003066 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003067 u_int16_t peer_id;
3068
yeshwanth sriram guntukae83d8ff2017-02-07 12:18:18 +05303069 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003070 "Deleting peer %p (%pM) ref_cnt %d\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003071 peer,
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003072 peer->mac_addr.raw,
3073 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003074
3075 peer_id = peer->local_id;
3076 /* remove the reference to the peer from the hash table */
3077 ol_txrx_peer_find_hash_remove(pdev, peer);
3078
3079 /* remove the peer from its parent vdev's list */
3080 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3081
3082 /* cleanup the Rx reorder queues for this peer */
3083 ol_rx_peer_cleanup(vdev, peer);
3084
3085 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303086 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003087
3088 /*
3089 * Set wait_delete_comp event if the current peer id matches
3090 * with registered peer id.
3091 */
3092 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05303093 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003094 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3095 }
3096
3097 /* check whether the parent vdev has no peers left */
3098 if (TAILQ_EMPTY(&vdev->peer_list)) {
3099 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003100 * Check if the parent vdev was waiting for its peers
3101 * to be deleted, in order for it to be deleted too.
3102 */
3103 if (vdev->delete.pending) {
3104 ol_txrx_vdev_delete_cb vdev_delete_cb =
3105 vdev->delete.callback;
3106 void *vdev_delete_context =
3107 vdev->delete.context;
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303108 /*
3109 * Now that there are no references to the peer,
3110 * we can release the peer reference lock.
3111 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303112 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303113
gbian016a42e2017-03-01 18:49:11 +08003114 /*
3115 * The ol_tx_desc_free might access the invalid content of vdev
3116 * referred by tx desc, since this vdev might be detached in
3117 * another thread asynchronous.
3118 *
3119 * Go through tx desc pool to set corresponding tx desc's vdev
3120 * to NULL when detach this vdev, and add vdev checking in the
3121 * ol_tx_desc_free to avoid crash.
3122 *
3123 */
3124 ol_txrx_tx_desc_reset_vdev(vdev);
Mohit Khanna3aee1312016-07-28 19:07:05 -07003125 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003126 "%s: deleting vdev object %p "
3127 "(%02x:%02x:%02x:%02x:%02x:%02x)"
Mohit Khanna47384bc2016-08-15 15:37:05 -07003128 " - its last peer is done",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003129 __func__, vdev,
3130 vdev->mac_addr.raw[0],
3131 vdev->mac_addr.raw[1],
3132 vdev->mac_addr.raw[2],
3133 vdev->mac_addr.raw[3],
3134 vdev->mac_addr.raw[4],
3135 vdev->mac_addr.raw[5]);
3136 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303137 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003138 if (vdev_delete_cb)
3139 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303140 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303141 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003142 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303143 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303144 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05303145 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003146
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303147 ol_txrx_peer_tx_queue_free(pdev, peer);
3148
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003149 /*
3150 * 'array' is allocated in addba handler and is supposed to be
3151 * freed in delba handler. There is the case (for example, in
3152 * SSR) where delba handler is not called. Because array points
3153 * to address of 'base' by default and is reallocated in addba
3154 * handler later, only free the memory when the array does not
3155 * point to base.
3156 */
3157 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
3158 if (peer->tids_rx_reorder[i].array !=
3159 &peer->tids_rx_reorder[i].base) {
3160 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
3161 "%s, delete reorder arr, tid:%d\n",
3162 __func__, i);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303163 qdf_mem_free(peer->tids_rx_reorder[i].array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003164 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
3165 (uint8_t) i);
3166 }
3167 }
3168
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303169 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003170 } else {
Mohit Khanna47384bc2016-08-15 15:37:05 -07003171 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3172 "%s: peer %p peer->ref_cnt = %d", __func__, peer,
3173 qdf_atomic_read(&peer->ref_cnt));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303174 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003175 }
3176}
3177
Dhanashri Atre12a08392016-02-17 13:10:34 -08003178/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07003179 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3180 * @peer: pointer to ol txrx peer structure
3181 *
3182 * Return: QDF Status
3183 */
3184static QDF_STATUS
3185ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3186{
3187 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3188 /* Drop pending Rx frames in CDS */
3189 if (sched_ctx)
3190 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3191
3192 /* Purge the cached rx frame queue */
3193 ol_txrx_flush_rx_frames(peer, 1);
3194
3195 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003196 peer->state = OL_TXRX_PEER_STATE_DISC;
3197 qdf_spin_unlock_bh(&peer->peer_info_lock);
3198
3199 return QDF_STATUS_SUCCESS;
3200}
3201
3202/**
3203 * ol_txrx_clear_peer() - clear peer
3204 * @sta_id: sta id
3205 *
3206 * Return: QDF Status
3207 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003208static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id)
Mohit Khanna0696eef2016-04-14 16:14:08 -07003209{
3210 struct ol_txrx_peer_t *peer;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003211 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Mohit Khanna0696eef2016-04-14 16:14:08 -07003212
3213 if (!pdev) {
3214 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find pdev!",
3215 __func__);
3216 return QDF_STATUS_E_FAILURE;
3217 }
3218
3219 if (sta_id >= WLAN_MAX_STA_COUNT) {
3220 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid sta id %d", sta_id);
3221 return QDF_STATUS_E_INVAL;
3222 }
3223
3224
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003225 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Mohit Khanna0696eef2016-04-14 16:14:08 -07003226 if (!peer)
3227 return QDF_STATUS_E_FAULT;
3228
3229 return ol_txrx_clear_peer_internal(peer);
3230
3231}
3232
3233/**
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003234 * ol_txrx_peer_detach() - Delete a peer's data object.
3235 * @peer - the object to detach
Dhanashri Atre12a08392016-02-17 13:10:34 -08003236 *
3237 * When the host's control SW disassociates a peer, it calls
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003238 * this function to detach and delete the peer. The reference
Dhanashri Atre12a08392016-02-17 13:10:34 -08003239 * stored in the control peer object to the data peer
3240 * object (set up by a call to ol_peer_store()) is provided.
3241 *
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003242 * Return: None
Dhanashri Atre12a08392016-02-17 13:10:34 -08003243 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003244static void ol_txrx_peer_detach(void *ppeer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003245{
Leo Chang98726762016-10-28 11:07:18 -07003246 ol_txrx_peer_handle peer = ppeer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003247 struct ol_txrx_vdev_t *vdev = peer->vdev;
3248
3249 /* redirect peer's rx delivery function to point to a discard func */
3250 peer->rx_opt_proc = ol_rx_discard;
3251
3252 peer->valid = 0;
3253
Mohit Khanna0696eef2016-04-14 16:14:08 -07003254 /* flush all rx packets before clearing up the peer local_id */
3255 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003256 ol_txrx_local_peer_id_free(peer->vdev->pdev, peer);
3257
3258 /* debug print to dump rx reorder state */
3259 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3260
yeshwanth sriram guntukae83d8ff2017-02-07 12:18:18 +05303261 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08003262 "%s:peer %p (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003263 __func__, peer,
3264 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
3265 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
3266 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003267
3268 if (peer->vdev->last_real_peer == peer)
3269 peer->vdev->last_real_peer = NULL;
3270
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303271 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003272 if (vdev->last_real_peer == peer)
3273 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303274 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003275 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3276
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303277 qdf_spinlock_destroy(&peer->peer_info_lock);
3278 qdf_spinlock_destroy(&peer->bufq_lock);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003279 /*
3280 * set delete_in_progress to identify that wma
3281 * is waiting for unmap massage for this peer
3282 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303283 qdf_atomic_set(&peer->delete_in_progress, 1);
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003284
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003285 /*
3286 * Remove the reference added during peer_attach.
3287 * The peer will still be left allocated until the
3288 * PEER_UNMAP message arrives to remove the other
3289 * reference, added by the PEER_MAP message.
3290 */
3291 ol_txrx_peer_unref_delete(peer);
3292}
3293
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003294/**
3295 * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
3296 * @peer - the object to detach
3297 *
3298 * Detach a peer and force the peer object to be removed. It is called during
3299 * roaming scenario when the firmware has already deleted a peer.
3300 * Peer object is freed immediately to avoid duplicate peers during roam sync
3301 * indication processing.
3302 *
3303 * Return: None
3304 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003305static void ol_txrx_peer_detach_force_delete(void *ppeer)
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003306{
Leo Chang98726762016-10-28 11:07:18 -07003307 ol_txrx_peer_handle peer = ppeer;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -07003308 ol_txrx_pdev_handle pdev = peer->vdev->pdev;
3309
3310 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s peer %p, peer->ref_cnt %d",
3311 __func__, peer, qdf_atomic_read(&peer->ref_cnt));
3312
3313 /* Clear the peer_id_to_obj map entries */
3314 qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
3315 ol_txrx_peer_remove_obj_map_entries(pdev, peer);
3316 qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
3317
3318 /*
3319 * Set ref_cnt = 1 so that ol_txrx_peer_unref_delete() called by
3320 * ol_txrx_peer_detach() will actually delete this peer entry properly.
3321 */
3322 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
3323 qdf_atomic_set(&peer->ref_cnt, 1);
3324 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3325
3326 ol_txrx_peer_detach(peer);
3327}
3328
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003329ol_txrx_peer_handle
3330ol_txrx_peer_find_by_addr(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac_addr)
3331{
3332 struct ol_txrx_peer_t *peer;
3333 peer = ol_txrx_peer_find_hash_find(pdev, peer_mac_addr, 0, 0);
3334 if (peer) {
3335 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08003336 "%s: Delete extra reference %p", __func__, peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003337 /* release the extra reference */
3338 ol_txrx_peer_unref_delete(peer);
3339 }
3340 return peer;
3341}
3342
3343/**
3344 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3345 * @txrx_pdev: Pointer to txrx pdev
3346 *
3347 * Return: none
3348 */
3349static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3350{
3351 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003352 uint32_t total, num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003353
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303354 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3355 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3356 else
3357 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003358
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003359 num_free = ol_tx_get_total_free_desc(pdev);
3360
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003361 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303362 "total tx credit %d num_free %d",
Houston Hoffman02d1e8e2016-10-13 18:54:52 -07003363 total, num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003364
3365 return;
3366}
3367
3368/**
3369 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3370 * @timeout: timeout in ms
3371 *
3372 * Wait for tx queue to be empty, return timeout error if
3373 * queue doesn't empty before timeout occurs.
3374 *
3375 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303376 * QDF_STATUS_SUCCESS if the queue empties,
3377 * QDF_STATUS_E_TIMEOUT in case of timeout,
3378 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003379 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003380static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003381{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003382 struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003383
3384 if (txrx_pdev == NULL) {
3385 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
3386 "%s: txrx context is null", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303387 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003388 }
3389
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003390 while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303391 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003392 if (timeout <= 0) {
3393 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303394 "%s: tx frames are pending", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003395 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303396 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003397 }
3398 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3399 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303400 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003401}
3402
3403#ifndef QCA_WIFI_3_0_EMU
3404#define SUSPEND_DRAIN_WAIT 500
3405#else
3406#define SUSPEND_DRAIN_WAIT 3000
3407#endif
3408
Yue Ma1e11d792016-02-26 18:58:44 -08003409#ifdef FEATURE_RUNTIME_PM
3410/**
3411 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3412 * @txrx_pdev: TXRX pdev context
3413 *
3414 * TXRX is ready to runtime suspend if there are no pending packets
3415 * in the tx queue.
3416 *
3417 * Return: QDF_STATUS
3418 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003419static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003420{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003421 struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07003422
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003423 if (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev))
Yue Ma1e11d792016-02-26 18:58:44 -08003424 return QDF_STATUS_E_BUSY;
3425 else
3426 return QDF_STATUS_SUCCESS;
3427}
3428
3429/**
3430 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3431 * @txrx_pdev: TXRX pdev context
3432 *
3433 * This is a dummy function for symmetry.
3434 *
3435 * Return: QDF_STATUS_SUCCESS
3436 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003437static QDF_STATUS ol_txrx_runtime_resume(struct cdp_pdev *ppdev)
Yue Ma1e11d792016-02-26 18:58:44 -08003438{
3439 return QDF_STATUS_SUCCESS;
3440}
3441#endif
3442
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003443/**
3444 * ol_txrx_bus_suspend() - bus suspend
3445 *
3446 * Ensure that ol_txrx is ready for bus suspend
3447 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303448 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003449 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003450static QDF_STATUS ol_txrx_bus_suspend(void)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003451{
3452 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3453}
3454
3455/**
3456 * ol_txrx_bus_resume() - bus resume
3457 *
3458 * Dummy function for symetry
3459 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303460 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003461 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003462static QDF_STATUS ol_txrx_bus_resume(void)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003463{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303464 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003465}
3466
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003467/**
3468 * ol_txrx_get_tx_pending - Get the number of pending transmit
3469 * frames that are awaiting completion.
3470 *
3471 * @pdev - the data physical device object
3472 * Mainly used in clean up path to make sure all buffers have been freed
3473 *
3474 * Return: count of pending frames
3475 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003476int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003477{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003478 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003479 uint32_t total;
3480
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303481 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3482 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3483 else
3484 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003485
Nirav Shah55b45a02016-01-21 10:00:16 +05303486 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003487}
3488
3489void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3490{
3491 ol_tx_desc_list tx_descs;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303492 /* First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
3493 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003494 * which is tha same with normal data send complete path*/
3495 htt_tx_pending_discard(pdev_handle->htt_pdev);
3496
3497 TAILQ_INIT(&tx_descs);
3498 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3499 /* Discard Frames in Discard List */
3500 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3501
3502 ol_tx_discard_target_frms(pdev_handle);
3503}
3504
3505/*--- debug features --------------------------------------------------------*/
3506
3507unsigned g_txrx_print_level = TXRX_PRINT_LEVEL_ERR; /* default */
3508
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003509#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003510void ol_txrx_print_level_set(unsigned level)
3511{
3512#ifndef TXRX_PRINT_ENABLE
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303513 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003514 "The driver is compiled without TXRX prints enabled.\n"
3515 "To enable them, recompile with TXRX_PRINT_ENABLE defined");
3516#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303517 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003518 "TXRX printout level changed from %d to %d",
3519 g_txrx_print_level, level);
3520 g_txrx_print_level = level;
3521#endif
3522}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003523#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003524
3525struct ol_txrx_stats_req_internal {
3526 struct ol_txrx_stats_req base;
3527 int serviced; /* state of this request */
3528 int offset;
3529};
3530
3531static inline
3532uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3533{
3534 return (uint64_t) ((size_t) req);
3535}
3536
3537static inline
3538struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3539{
3540 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3541}
3542
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003543#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003544void
3545ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3546 uint8_t cfg_stats_type, uint32_t cfg_val)
3547{
3548 uint64_t dummy_cookie = 0;
3549 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3550 0 /* reset mask */,
3551 cfg_stats_type, cfg_val, dummy_cookie);
3552}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003553#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003554
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003555static A_STATUS
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003556ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
Dhanashri Atre52f71332016-08-22 12:12:36 -07003557 bool per_vdev, bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003558{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003559 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003560 struct ol_txrx_pdev_t *pdev = vdev->pdev;
3561 uint64_t cookie;
3562 struct ol_txrx_stats_req_internal *non_volatile_req;
3563
3564 if (!pdev ||
3565 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
3566 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
3567 return A_ERROR;
3568 }
3569
3570 /*
3571 * Allocate a non-transient stats request object.
3572 * (The one provided as an argument is likely allocated on the stack.)
3573 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303574 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003575 if (!non_volatile_req)
3576 return A_NO_MEMORY;
3577
3578 /* copy the caller's specifications */
3579 non_volatile_req->base = *req;
3580 non_volatile_req->serviced = 0;
3581 non_volatile_req->offset = 0;
3582
3583 /* use the non-volatile request object's address as the cookie */
3584 cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
3585
3586 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
3587 req->stats_type_upload_mask,
3588 req->stats_type_reset_mask,
3589 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
3590 cookie)) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303591 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003592 return A_ERROR;
3593 }
3594
3595 if (req->wait.blocking)
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303596 while (qdf_semaphore_acquire(req->wait.sem_ptr))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003597 ;
3598
Nirav Shahd2310422016-01-21 18:58:06 +05303599 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303600 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05303601
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003602 return A_OK;
3603}
Dhanashri Atre12a08392016-02-17 13:10:34 -08003604
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003605void
3606ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
3607 uint64_t cookie, uint8_t *stats_info_list)
3608{
3609 enum htt_dbg_stats_type type;
3610 enum htt_dbg_stats_status status;
3611 int length;
3612 uint8_t *stats_data;
3613 struct ol_txrx_stats_req_internal *req;
3614 int more = 0;
3615
3616 req = ol_txrx_u64_to_stats_ptr(cookie);
3617
3618 do {
3619 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
3620 &length, &stats_data);
3621 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
3622 break;
3623 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
3624 status == HTT_DBG_STATS_STATUS_PARTIAL) {
3625 uint8_t *buf;
3626 int bytes = 0;
3627
3628 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
3629 more = 1;
3630 if (req->base.print.verbose || req->base.print.concise)
3631 /* provide the header along with the data */
3632 htt_t2h_stats_print(stats_info_list,
3633 req->base.print.concise);
3634
3635 switch (type) {
3636 case HTT_DBG_STATS_WAL_PDEV_TXRX:
3637 bytes = sizeof(struct wlan_dbg_stats);
3638 if (req->base.copy.buf) {
3639 int lmt;
3640
3641 lmt = sizeof(struct wlan_dbg_stats);
3642 if (req->base.copy.byte_limit < lmt)
3643 lmt = req->base.copy.byte_limit;
3644 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303645 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003646 }
3647 break;
3648 case HTT_DBG_STATS_RX_REORDER:
3649 bytes = sizeof(struct rx_reorder_stats);
3650 if (req->base.copy.buf) {
3651 int lmt;
3652
3653 lmt = sizeof(struct rx_reorder_stats);
3654 if (req->base.copy.byte_limit < lmt)
3655 lmt = req->base.copy.byte_limit;
3656 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303657 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003658 }
3659 break;
3660 case HTT_DBG_STATS_RX_RATE_INFO:
3661 bytes = sizeof(wlan_dbg_rx_rate_info_t);
3662 if (req->base.copy.buf) {
3663 int lmt;
3664
3665 lmt = sizeof(wlan_dbg_rx_rate_info_t);
3666 if (req->base.copy.byte_limit < lmt)
3667 lmt = req->base.copy.byte_limit;
3668 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303669 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003670 }
3671 break;
3672
3673 case HTT_DBG_STATS_TX_RATE_INFO:
3674 bytes = sizeof(wlan_dbg_tx_rate_info_t);
3675 if (req->base.copy.buf) {
3676 int lmt;
3677
3678 lmt = sizeof(wlan_dbg_tx_rate_info_t);
3679 if (req->base.copy.byte_limit < lmt)
3680 lmt = req->base.copy.byte_limit;
3681 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303682 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003683 }
3684 break;
3685
3686 case HTT_DBG_STATS_TX_PPDU_LOG:
3687 bytes = 0;
3688 /* TO DO: specify how many bytes are present */
3689 /* TO DO: add copying to the requestor's buf */
3690
3691 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
3692 bytes = sizeof(struct rx_remote_buffer_mgmt_stats);
3693 if (req->base.copy.buf) {
3694 int limit;
3695
3696 limit = sizeof(struct rx_remote_buffer_mgmt_stats);
3697 if (req->base.copy.byte_limit < limit) {
3698 limit = req->base.copy.byte_limit;
3699 }
3700 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303701 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003702 }
3703 break;
3704
3705 case HTT_DBG_STATS_TXBF_INFO:
3706 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
3707 if (req->base.copy.buf) {
3708 int limit;
3709
3710 limit = sizeof(struct wlan_dbg_txbf_data_stats);
3711 if (req->base.copy.byte_limit < limit)
3712 limit = req->base.copy.byte_limit;
3713 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303714 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003715 }
3716 break;
3717
3718 case HTT_DBG_STATS_SND_INFO:
3719 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
3720 if (req->base.copy.buf) {
3721 int limit;
3722
3723 limit = sizeof(struct wlan_dbg_txbf_snd_stats);
3724 if (req->base.copy.byte_limit < limit)
3725 limit = req->base.copy.byte_limit;
3726 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303727 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003728 }
3729 break;
3730
3731 case HTT_DBG_STATS_TX_SELFGEN_INFO:
3732 bytes = sizeof(struct wlan_dbg_tx_selfgen_stats);
3733 if (req->base.copy.buf) {
3734 int limit;
3735
3736 limit = sizeof(struct wlan_dbg_tx_selfgen_stats);
3737 if (req->base.copy.byte_limit < limit)
3738 limit = req->base.copy.byte_limit;
3739 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303740 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003741 }
3742 break;
3743
3744 case HTT_DBG_STATS_ERROR_INFO:
3745 bytes =
3746 sizeof(struct wlan_dbg_wifi2_error_stats);
3747 if (req->base.copy.buf) {
3748 int limit;
3749
3750 limit =
3751 sizeof(struct wlan_dbg_wifi2_error_stats);
3752 if (req->base.copy.byte_limit < limit)
3753 limit = req->base.copy.byte_limit;
3754 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303755 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003756 }
3757 break;
3758
3759 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
3760 bytes =
3761 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
3762 if (req->base.copy.buf) {
3763 int limit;
3764
3765 limit = sizeof(struct
3766 rx_txbf_musu_ndpa_pkts_stats);
3767 if (req->base.copy.byte_limit < limit)
3768 limit =
3769 req->base.copy.byte_limit;
3770 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303771 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003772 }
3773 break;
3774
3775 default:
3776 break;
3777 }
3778 buf = req->base.copy.buf
3779 ? req->base.copy.buf
3780 : stats_data;
3781 if (req->base.callback.fp)
3782 req->base.callback.fp(req->base.callback.ctxt,
3783 type, buf, bytes);
3784 }
3785 stats_info_list += length;
3786 } while (1);
3787
3788 if (!more) {
3789 if (req->base.wait.blocking)
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303790 qdf_semaphore_release(req->base.wait.sem_ptr);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303791 qdf_mem_free(req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003792 }
3793}
3794
3795#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
3796int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
3797{
3798 if (debug_specs & TXRX_DBG_MASK_OBJS) {
3799#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3800 ol_txrx_pdev_display(vdev->pdev, 0);
3801#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303802 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303803 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003804#endif
3805 }
3806 if (debug_specs & TXRX_DBG_MASK_STATS) {
3807 ol_txrx_stats_display(vdev->pdev);
3808 }
3809 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
3810#if defined(ENABLE_TXRX_PROT_ANALYZE)
3811 ol_txrx_prot_ans_display(vdev->pdev);
3812#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303813 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303814 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003815#endif
3816 }
3817 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
3818#if defined(ENABLE_RX_REORDER_TRACE)
3819 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
3820#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303821 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303822 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003823#endif
3824
3825 }
3826 return 0;
3827}
3828#endif
3829
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003830#ifdef currently_unused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003831int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
3832 int max_subfrms_ampdu, int max_subfrms_amsdu)
3833{
3834 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
3835 max_subfrms_ampdu, max_subfrms_amsdu);
3836}
Jeff Johnson6f9aa562017-01-17 13:52:18 -08003837#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003838
3839#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3840void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
3841{
3842 struct ol_txrx_vdev_t *vdev;
3843
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303844 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003845 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303846 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003847 "%*spdev object: %p", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303848 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003849 "%*svdev list:", indent + 4, " ");
3850 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303851 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003852 }
3853 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303854 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003855 "%*stx desc pool: %d elems @ %p", indent + 4, " ",
3856 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303857 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003858 htt_display(pdev->htt_pdev, indent);
3859}
3860
3861void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
3862{
3863 struct ol_txrx_peer_t *peer;
3864
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303865 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003866 "%*stxrx vdev: %p\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303867 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003868 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303869 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003870 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
3871 indent + 4, " ",
3872 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
3873 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
3874 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303875 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003876 "%*speer list:", indent + 4, " ");
3877 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303878 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003879 }
3880}
3881
3882void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
3883{
3884 int i;
3885
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303886 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003887 "%*stxrx peer: %p", indent, " ", peer);
3888 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
3889 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303890 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003891 "%*sID: %d", indent + 4, " ",
3892 peer->peer_ids[i]);
3893 }
3894 }
3895}
3896#endif /* TXRX_DEBUG_LEVEL */
3897
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003898/**
3899 * ol_txrx_stats() - update ol layer stats
3900 * @vdev_id: vdev_id
3901 * @buffer: pointer to buffer
3902 * @buf_len: length of the buffer
3903 *
3904 * Return: length of string
3905 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08003906static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003907ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned buf_len)
3908{
3909 uint32_t len = 0;
3910
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003911 struct ol_txrx_vdev_t *vdev =
3912 (struct ol_txrx_vdev_t *)
3913 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003914 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303915 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303916 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003917 snprintf(buffer, buf_len, "vdev not found");
3918 return len;
3919 }
3920
3921 len = scnprintf(buffer, buf_len,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303922 "\nTXRX stats:\n\nllQueue State : %s\n pause %u unpause %u\n overflow %u\n llQueue timer state : %s\n",
3923 ((vdev->ll_pause.is_q_paused == false) ?
3924 "UNPAUSED" : "PAUSED"),
3925 vdev->ll_pause.q_pause_cnt,
3926 vdev->ll_pause.q_unpause_cnt,
3927 vdev->ll_pause.q_overflow_cnt,
3928 ((vdev->ll_pause.is_q_timer_on == false)
3929 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003930 return len;
3931}
3932
3933void ol_txrx_stats_display(ol_txrx_pdev_handle pdev)
3934{
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303935 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shah6a4eee62016-04-25 10:15:04 +05303936 "TX PATH Statistics:");
3937 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303938 "sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
3939 pdev->stats.pub.tx.from_stack.pkts,
3940 pdev->stats.pub.tx.from_stack.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003941 pdev->stats.pub.tx.dropped.host_reject.pkts,
3942 pdev->stats.pub.tx.dropped.host_reject.bytes,
3943 pdev->stats.pub.tx.dropped.download_fail.pkts
3944 + pdev->stats.pub.tx.dropped.target_discard.pkts
3945 + pdev->stats.pub.tx.dropped.no_ack.pkts,
3946 pdev->stats.pub.tx.dropped.download_fail.bytes
3947 + pdev->stats.pub.tx.dropped.target_discard.bytes
3948 + pdev->stats.pub.tx.dropped.no_ack.bytes);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303949 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303950 "successfully delivered: %lld (%lld B), "
3951 "download fail: %lld (%lld B), "
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003952 "target discard: %lld (%lld B), "
3953 "no ack: %lld (%lld B)",
Nirav Shahda008342016-05-17 18:50:40 +05303954 pdev->stats.pub.tx.delivered.pkts,
3955 pdev->stats.pub.tx.delivered.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003956 pdev->stats.pub.tx.dropped.download_fail.pkts,
3957 pdev->stats.pub.tx.dropped.download_fail.bytes,
3958 pdev->stats.pub.tx.dropped.target_discard.pkts,
3959 pdev->stats.pub.tx.dropped.target_discard.bytes,
3960 pdev->stats.pub.tx.dropped.no_ack.pkts,
3961 pdev->stats.pub.tx.dropped.no_ack.bytes);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303962 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303963 "Tx completions per HTT message:\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003964 "Single Packet %d\n"
3965 " 2-10 Packets %d\n"
3966 "11-20 Packets %d\n"
3967 "21-30 Packets %d\n"
3968 "31-40 Packets %d\n"
3969 "41-50 Packets %d\n"
3970 "51-60 Packets %d\n"
3971 " 60+ Packets %d\n",
3972 pdev->stats.pub.tx.comp_histogram.pkts_1,
3973 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
3974 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
3975 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
3976 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
3977 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
3978 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
3979 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05303980
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303981 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shah6a4eee62016-04-25 10:15:04 +05303982 "RX PATH Statistics:");
3983 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3984 "%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
Nirav Shahda008342016-05-17 18:50:40 +05303985 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
3986 "msdus with frag_ind: %d msdus with offload_ind: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003987 pdev->stats.priv.rx.normal.ppdus,
3988 pdev->stats.priv.rx.normal.mpdus,
3989 pdev->stats.pub.rx.delivered.pkts,
3990 pdev->stats.pub.rx.delivered.bytes,
Nirav Shah6a4eee62016-04-25 10:15:04 +05303991 pdev->stats.pub.rx.dropped_err.pkts,
3992 pdev->stats.pub.rx.dropped_err.bytes,
3993 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
3994 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
3995 pdev->stats.pub.rx.dropped_mic_err.pkts,
Nirav Shahda008342016-05-17 18:50:40 +05303996 pdev->stats.pub.rx.dropped_mic_err.bytes,
3997 pdev->stats.pub.rx.msdus_with_frag_ind,
3998 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003999
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304000 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004001 " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
4002 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4003 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4004 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304005
4006 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05304007 "Rx packets per HTT message:\n"
Nirav Shah6a4eee62016-04-25 10:15:04 +05304008 "Single Packet %d\n"
4009 " 2-10 Packets %d\n"
4010 "11-20 Packets %d\n"
4011 "21-30 Packets %d\n"
4012 "31-40 Packets %d\n"
4013 "41-50 Packets %d\n"
4014 "51-60 Packets %d\n"
4015 " 60+ Packets %d\n",
4016 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4017 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4018 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4019 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4020 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4021 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4022 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4023 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004024}
4025
4026void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4027{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304028 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004029}
4030
4031#if defined(ENABLE_TXRX_PROT_ANALYZE)
4032
4033void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4034{
4035 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4036 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4037}
4038
4039#endif /* ENABLE_TXRX_PROT_ANALYZE */
4040
4041#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
4042int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4043{
4044 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4045 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4046}
4047#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4048
4049#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4050A_STATUS
4051ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4052 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4053{
Anurag Chouhanc5548422016-02-24 18:33:27 +05304054 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304055 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304056 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304057 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004058 return A_OK;
4059}
4060#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4061
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004062static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004063{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004064 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004065 if (NULL == vdev)
4066 return;
4067
4068 vdev->disable_intrabss_fwd = val;
4069}
4070
Nirav Shahc657ef52016-07-26 14:22:38 +05304071/**
4072 * ol_txrx_update_mac_id() - update mac_id for vdev
4073 * @vdev_id: vdev id
4074 * @mac_id: mac id
4075 *
4076 * Return: none
4077 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004078static void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
Nirav Shahc657ef52016-07-26 14:22:38 +05304079{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004080 struct ol_txrx_vdev_t *vdev =
4081 (struct ol_txrx_vdev_t *)
4082 ol_txrx_get_vdev_from_vdev_id(vdev_id);
Nirav Shahc657ef52016-07-26 14:22:38 +05304083
4084 if (NULL == vdev) {
4085 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4086 "%s: Invalid vdev_id %d", __func__, vdev_id);
4087 return;
4088 }
4089 vdev->mac_id = mac_id;
4090}
4091
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004092#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
4093
4094/**
4095 * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
4096 * @sta_id: sta_id
4097 *
4098 * Return: vdev handle
4099 * NULL if not found.
4100 */
4101static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
4102{
4103 struct ol_txrx_peer_t *peer = NULL;
4104 ol_txrx_pdev_handle pdev = NULL;
4105
4106 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304107 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304108 "Invalid sta id passed");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004109 return NULL;
4110 }
4111
Anurag Chouhan6d760662016-02-20 16:05:43 +05304112 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004113 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304114 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304115 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004116 return NULL;
4117 }
4118
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004119 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004120
4121 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304122 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304123 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004124 return NULL;
4125 }
4126
4127 return peer->vdev;
4128}
4129
4130/**
4131 * ol_txrx_register_tx_flow_control() - register tx flow control callback
4132 * @vdev_id: vdev_id
4133 * @flowControl: flow control callback
4134 * @osif_fc_ctx: callback context
4135 *
4136 * Return: 0 for sucess or error code
4137 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004138static int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304139 ol_txrx_tx_flow_control_fp flowControl,
4140 void *osif_fc_ctx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004141{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004142 struct ol_txrx_vdev_t *vdev =
4143 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004144 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304145 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304146 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004147 return -EINVAL;
4148 }
4149
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304150 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004151 vdev->osif_flow_control_cb = flowControl;
4152 vdev->osif_fc_ctx = osif_fc_ctx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304153 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004154 return 0;
4155}
4156
4157/**
4158 * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control callback
4159 * @vdev_id: vdev_id
4160 *
4161 * Return: 0 for success or error code
4162 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004163static int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004164{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004165 struct ol_txrx_vdev_t *vdev =
4166 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004167 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304168 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304169 "%s: Invalid vdev_id", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004170 return -EINVAL;
4171 }
4172
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304173 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004174 vdev->osif_flow_control_cb = NULL;
4175 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304176 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004177 return 0;
4178}
4179
4180/**
4181 * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
4182 * @sta_id: sta id
4183 * @low_watermark: low watermark
4184 * @high_watermark_offset: high watermark offset value
4185 *
4186 * Return: true/false
4187 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004188static bool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004189ol_txrx_get_tx_resource(uint8_t sta_id,
4190 unsigned int low_watermark,
4191 unsigned int high_watermark_offset)
4192{
4193 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
4194 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304195 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304196 "%s: Invalid sta_id %d", __func__, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004197 /* Return true so caller do not understand that resource
4198 * is less than low_watermark.
4199 * sta_id validation will be done in ol_tx_send_data_frame
4200 * and if sta_id is not registered then host will drop
4201 * packet.
4202 */
4203 return true;
4204 }
4205
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304206 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304207
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004208 if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
4209 vdev->tx_fl_lwm = (uint16_t) low_watermark;
4210 vdev->tx_fl_hwm =
4211 (uint16_t) (low_watermark + high_watermark_offset);
4212 /* Not enough free resource, stop TX OS Q */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05304213 qdf_atomic_set(&vdev->os_q_paused, 1);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304214 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004215 return false;
4216 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304217 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004218 return true;
4219}
4220
4221/**
4222 * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
4223 * @vdev_id: vdev id
4224 * @pause_q_depth: pause queue depth
4225 *
4226 * Return: 0 for success or error code
4227 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004228static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004229ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
4230{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004231 struct ol_txrx_vdev_t *vdev =
4232 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004233 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304234 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304235 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004236 return -EINVAL;
4237 }
4238
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304239 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004240 vdev->ll_pause.max_q_depth = pause_q_depth;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304241 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004242
4243 return 0;
4244}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004245#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
4246
4247#ifdef IPA_OFFLOAD
Leo Chang8e073612015-11-13 10:55:34 -08004248/**
4249 * ol_txrx_ipa_uc_get_resource() - Client request resource information
4250 * @pdev: handle to the HTT instance
4251 * @ce_sr_base_paddr: copy engine source ring base physical address
4252 * @ce_sr_ring_size: copy engine source ring size
4253 * @ce_reg_paddr: copy engine register physical address
4254 * @tx_comp_ring_base_paddr: tx comp ring base physical address
4255 * @tx_comp_ring_size: tx comp ring size
4256 * @tx_num_alloc_buffer: number of allocated tx buffer
4257 * @rx_rdy_ring_base_paddr: rx ready ring base physical address
4258 * @rx_rdy_ring_size: rx ready ring size
4259 * @rx_proc_done_idx_paddr: rx process done index physical address
4260 * @rx_proc_done_idx_vaddr: rx process done index virtual address
4261 * @rx2_rdy_ring_base_paddr: rx done ring base physical address
4262 * @rx2_rdy_ring_size: rx done ring size
4263 * @rx2_proc_done_idx_paddr: rx done index physical address
4264 * @rx2_proc_done_idx_vaddr: rx done index virtual address
4265 *
4266 * OL client will reuqest IPA UC related resource information
4267 * Resource information will be distributted to IPA module
4268 * All of the required resources should be pre-allocated
4269 *
4270 * Return: none
4271 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004272static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004273ol_txrx_ipa_uc_get_resource(struct cdp_pdev *ppdev,
Leo Chang98726762016-10-28 11:07:18 -07004274 struct ol_txrx_ipa_resources *ipa_res)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004275{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004276 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07004277
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004278 htt_ipa_uc_get_resource(pdev->htt_pdev,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004279 &ipa_res->ce_sr_base_paddr,
4280 &ipa_res->ce_sr_ring_size,
4281 &ipa_res->ce_reg_paddr,
4282 &ipa_res->tx_comp_ring_base_paddr,
4283 &ipa_res->tx_comp_ring_size,
4284 &ipa_res->tx_num_alloc_buffer,
4285 &ipa_res->rx_rdy_ring_base_paddr,
4286 &ipa_res->rx_rdy_ring_size,
4287 &ipa_res->rx_proc_done_idx_paddr,
4288 &ipa_res->rx_proc_done_idx_vaddr,
4289 &ipa_res->rx2_rdy_ring_base_paddr,
4290 &ipa_res->rx2_rdy_ring_size,
4291 &ipa_res->rx2_proc_done_idx_paddr,
4292 &ipa_res->rx2_proc_done_idx_vaddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004293}
4294
Leo Chang8e073612015-11-13 10:55:34 -08004295/**
4296 * ol_txrx_ipa_uc_set_doorbell_paddr() - Client set IPA UC doorbell register
4297 * @pdev: handle to the HTT instance
4298 * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
4299 * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
4300 *
4301 * IPA UC let know doorbell register physical address
4302 * WLAN firmware will use this physical address to notify IPA UC
4303 *
4304 * Return: none
4305 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004306static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004307ol_txrx_ipa_uc_set_doorbell_paddr(struct cdp_pdev *ppdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05304308 qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
4309 qdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004310{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004311 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004312 htt_ipa_uc_set_doorbell_paddr(pdev->htt_pdev,
4313 ipa_tx_uc_doorbell_paddr,
4314 ipa_rx_uc_doorbell_paddr);
4315}
4316
Leo Chang8e073612015-11-13 10:55:34 -08004317/**
4318 * ol_txrx_ipa_uc_set_active() - Client notify IPA UC data path active or not
4319 * @pdev: handle to the HTT instance
4320 * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
4321 * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
4322 *
4323 * IPA UC let know doorbell register physical address
4324 * WLAN firmware will use this physical address to notify IPA UC
4325 *
4326 * Return: none
4327 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004328static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004329ol_txrx_ipa_uc_set_active(struct cdp_pdev *ppdev, bool uc_active, bool is_tx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004330{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004331 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004332 htt_h2t_ipa_uc_set_active(pdev->htt_pdev, uc_active, is_tx);
4333}
4334
4335/**
Leo Chang8e073612015-11-13 10:55:34 -08004336 * ol_txrx_ipa_uc_op_response() - Handle OP command response from firmware
4337 * @pdev: handle to the HTT instance
4338 * @op_msg: op response message from firmware
4339 *
4340 * Return: none
4341 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004342static void ol_txrx_ipa_uc_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg)
Govind Singh66615292015-12-28 23:07:54 +05304343{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004344 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Govind Singh66615292015-12-28 23:07:54 +05304345 if (pdev->ipa_uc_op_cb) {
4346 pdev->ipa_uc_op_cb(op_msg, pdev->osif_dev);
4347 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304348 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Govind Singh66615292015-12-28 23:07:54 +05304349 "%s: IPA callback function is not registered", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304350 qdf_mem_free(op_msg);
Govind Singh66615292015-12-28 23:07:54 +05304351 return;
4352 }
4353}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004354
Leo Chang8e073612015-11-13 10:55:34 -08004355/**
4356 * ol_txrx_ipa_uc_register_op_cb() - Register OP handler function
4357 * @pdev: handle to the HTT instance
4358 * @op_cb: handler function pointer
4359 * @osif_dev: register client context
4360 *
4361 * Return: none
4362 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004363static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004364ol_txrx_ipa_uc_register_op_cb(struct cdp_pdev *ppdev,
4365 ipa_uc_op_cb_type op_cb, void *osif_dev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004366{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004367 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004368 pdev->ipa_uc_op_cb = op_cb;
4369 pdev->osif_dev = osif_dev;
4370}
4371
Leo Chang8e073612015-11-13 10:55:34 -08004372/**
4373 * ol_txrx_ipa_uc_get_stat() - Get firmware wdi status
4374 * @pdev: handle to the HTT instance
4375 *
4376 * Return: none
4377 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004378static void ol_txrx_ipa_uc_get_stat(struct cdp_pdev *ppdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004379{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004380 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004381 htt_h2t_ipa_uc_get_stats(pdev->htt_pdev);
4382}
4383#endif /* IPA_UC_OFFLOAD */
4384
Nirav Shahda008342016-05-17 18:50:40 +05304385/**
4386 * ol_txrx_display_stats_help() - print statistics help
4387 *
4388 * Return: none
4389 */
4390static void ol_txrx_display_stats_help(void)
4391{
4392 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4393 "iwpriv wlan0 dumpStats [option] - dump statistics");
4394 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4395 "iwpriv wlan0 clearStats [option] - clear statistics");
4396 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4397 "options:");
4398 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4399 " 1 -- TXRX Layer statistics");
4400 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4401 " 2 -- Bandwidth compute timer stats");
4402 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4403 " 3 -- TSO statistics");
4404 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4405 " 4 -- Network queue statistics");
4406 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4407 " 5 -- Flow control statistics");
4408 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4409 " 6 -- Per Layer statistics");
4410 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4411 " 7 -- Copy engine interrupt statistics");
4412
4413}
4414
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004415static void ol_txrx_display_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004416{
4417 ol_txrx_pdev_handle pdev;
4418
Anurag Chouhan6d760662016-02-20 16:05:43 +05304419 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004420 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304421 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304422 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004423 return;
4424 }
4425
4426 switch (value) {
4427 case WLAN_TXRX_STATS:
4428 ol_txrx_stats_display(pdev);
4429 break;
4430 case WLAN_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004431 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004432 break;
4433 case WLAN_DUMP_TX_FLOW_POOL_INFO:
4434 ol_tx_dump_flow_pool_info();
4435 break;
4436 case WLAN_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304437 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004438 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304439#ifdef CONFIG_HL_SUPPORT
4440 case WLAN_SCHEDULER_STATS:
4441 ol_tx_sched_cur_state_display(pdev);
4442 ol_tx_sched_stats_display(pdev);
4443 break;
4444 case WLAN_TX_QUEUE_STATS:
4445 ol_tx_queue_log_display(pdev);
4446 break;
4447#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
4448 case WLAN_CREDIT_STATS:
4449 ol_tx_dump_group_credit_stats(pdev);
4450 break;
4451#endif
4452
4453#ifdef DEBUG_HL_LOGGING
4454 case WLAN_BUNDLE_STATS:
4455 htt_dump_bundle_stats(pdev->htt_pdev);
4456 break;
4457#endif
4458#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004459 default:
Nirav Shahda008342016-05-17 18:50:40 +05304460 ol_txrx_display_stats_help();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004461 break;
4462 }
4463}
4464
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004465static void ol_txrx_clear_stats(uint16_t value)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004466{
4467 ol_txrx_pdev_handle pdev;
4468
Anurag Chouhan6d760662016-02-20 16:05:43 +05304469 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004470 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304471 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304472 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004473 return;
4474 }
4475
4476 switch (value) {
4477 case WLAN_TXRX_STATS:
4478 ol_txrx_stats_clear(pdev);
4479 break;
4480 case WLAN_DUMP_TX_FLOW_POOL_INFO:
4481 ol_tx_clear_flow_pool_stats();
4482 break;
4483 case WLAN_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304484 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004485 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304486#ifdef CONFIG_HL_SUPPORT
4487 case WLAN_SCHEDULER_STATS:
4488 ol_tx_sched_stats_clear(pdev);
4489 break;
4490 case WLAN_TX_QUEUE_STATS:
4491 ol_tx_queue_log_clear(pdev);
4492 break;
4493#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
4494 case WLAN_CREDIT_STATS:
4495 ol_tx_clear_group_credit_stats(pdev);
4496 break;
4497#endif
4498 case WLAN_BUNDLE_STATS:
4499 htt_clear_bundle_stats(pdev->htt_pdev);
4500 break;
4501#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004502 default:
Nirav Shahda008342016-05-17 18:50:40 +05304503 ol_txrx_display_stats_help();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004504 break;
4505 }
4506}
4507
4508/**
4509 * ol_rx_data_cb() - data rx callback
4510 * @peer: peer
4511 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304512 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004513 *
4514 * Return: None
4515 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304516static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
4517 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004518{
Nirav Shah36a87bf2016-02-22 12:38:46 +05304519 void *cds_ctx = cds_get_global_context();
Mohit Khanna0696eef2016-04-14 16:14:08 -07004520 void *osif_dev;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304521 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304522 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004523 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304524 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004525
Nirav Shah36a87bf2016-02-22 12:38:46 +05304526 if (qdf_unlikely(!cds_ctx) || qdf_unlikely(!pdev))
4527 goto free_buf;
4528
4529 /* Do not use peer directly. Derive peer from staid to
4530 * make sure that peer is valid.
4531 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004532 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, staid);
Nirav Shah36a87bf2016-02-22 12:38:46 +05304533 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004534 goto free_buf;
4535
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304536 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07004537 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
4538 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304539 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004540 goto free_buf;
4541 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08004542
4543 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07004544 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304545 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004546
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304547 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004548 if (!list_empty(&peer->cached_bufq)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304549 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004550 /* Flush the cached frames to HDD before passing new rx frame */
4551 ol_txrx_flush_rx_frames(peer, 0);
4552 } else
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304553 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004554
4555 buf = buf_list;
4556 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304557 next_buf = qdf_nbuf_queue_next(buf);
4558 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07004559 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304560 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004561 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05304562 if (pdev)
4563 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304564 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004565 }
4566 buf = next_buf;
4567 }
4568 return;
4569
4570free_buf:
4571 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%s:Dropping frames", __func__);
4572 buf = buf_list;
4573 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304574 next_buf = qdf_nbuf_queue_next(buf);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304575 if (pdev)
4576 TXRX_STATS_MSDU_INCR(pdev,
4577 rx.dropped_peer_invalid, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304578 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004579 buf = next_buf;
4580 }
4581}
4582
4583/**
4584 * ol_rx_data_process() - process rx frame
4585 * @peer: peer
4586 * @rx_buf_list: rx buffer list
4587 *
4588 * Return: None
4589 */
4590void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05304591 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004592{
4593 /* Firmware data path active response will use shim RX thread
4594 * T2H MSG running on SIRQ context,
4595 * IPA kernel module API should not be called on SIRQ CTXT */
Nirav Shahcbc6d722016-03-01 16:24:53 +05304596 qdf_nbuf_t buf, next_buf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004597 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304598 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004599
4600 if ((!peer) || (!pdev)) {
4601 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "peer/pdev is NULL");
4602 goto drop_rx_buf;
4603 }
4604
Dhanashri Atre182b0272016-02-17 15:35:07 -08004605 qdf_assert(peer->vdev);
4606
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304607 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004608 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08004609 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304610 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004611
4612 /*
4613 * If there is a data frame from peer before the peer is
4614 * registered for data service, enqueue them on to pending queue
4615 * which will be flushed to HDD once that station is registered.
4616 */
4617 if (!data_rx) {
4618 struct ol_rx_cached_buf *cache_buf;
Manjunathappa Prakash92db7712016-05-27 00:19:34 -07004619
4620 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
4621 "Data on the peer before it is registered!!!");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004622 buf = rx_buf_list;
4623 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304624 next_buf = qdf_nbuf_queue_next(buf);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304625 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004626 if (!cache_buf) {
4627 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
4628 "Failed to allocate buf to cache the rx frames");
Nirav Shahcbc6d722016-03-01 16:24:53 +05304629 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004630 } else {
4631 /* Add NULL terminator */
Nirav Shahcbc6d722016-03-01 16:24:53 +05304632 qdf_nbuf_set_next(buf, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004633 cache_buf->buf = buf;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304634 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004635 list_add_tail(&cache_buf->list,
4636 &peer->cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304637 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004638 }
4639 buf = next_buf;
4640 }
4641 } else {
4642#ifdef QCA_CONFIG_SMP
4643 /*
4644 * If the kernel is SMP, schedule rx thread to
4645 * better use multicores.
4646 */
4647 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05304648 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004649 } else {
4650 p_cds_sched_context sched_ctx =
4651 get_cds_sched_ctxt();
4652 struct cds_ol_rx_pkt *pkt;
4653
4654 if (unlikely(!sched_ctx))
4655 goto drop_rx_buf;
4656
4657 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
4658 if (!pkt) {
4659 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304660 "No available Rx message buffer");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004661 goto drop_rx_buf;
4662 }
4663 pkt->callback = (cds_ol_rx_thread_cb)
4664 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304665 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004666 pkt->Rxpkt = (void *)rx_buf_list;
4667 pkt->staId = peer->local_id;
4668 cds_indicate_rxpkt(sched_ctx, pkt);
4669 }
4670#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304671 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004672#endif /* QCA_CONFIG_SMP */
4673 }
4674
4675 return;
4676
4677drop_rx_buf:
4678 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Dropping rx packets");
4679 buf = rx_buf_list;
4680 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304681 next_buf = qdf_nbuf_queue_next(buf);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304682 if (pdev)
4683 TXRX_STATS_MSDU_INCR(pdev,
4684 rx.dropped_peer_invalid, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304685 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004686 buf = next_buf;
4687 }
4688}
4689
4690/**
4691 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004692 * @sta_desc: sta descriptor
4693 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304694 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004695 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004696static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004697{
4698 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304699 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004700 union ol_txrx_peer_update_param_t param;
4701 struct privacy_exemption privacy_filter;
4702
4703 if (!pdev) {
4704 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304705 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004706 }
4707
4708 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
4709 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid sta id :%d",
4710 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304711 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004712 }
4713
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004714 peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev,
4715 sta_desc->sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004716 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304717 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004718
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304719 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004720 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304721 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004722
4723 param.qos_capable = sta_desc->is_qos_enabled;
4724 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
4725 ol_txrx_peer_update_qos_capable);
4726
4727 if (sta_desc->is_wapi_supported) {
4728 /*Privacy filter to accept unencrypted WAI frames */
4729 privacy_filter.ether_type = ETHERTYPE_WAI;
4730 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
4731 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
4732 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
4733 }
4734
4735 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304736 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004737}
4738
4739/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004740 * ol_txrx_register_ocb_peer - Function to register the OCB peer
4741 * @cds_ctx: Pointer to the global OS context
4742 * @mac_addr: MAC address of the self peer
4743 * @peer_id: Pointer to the peer ID
4744 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304745 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004746 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004747static QDF_STATUS ol_txrx_register_ocb_peer(void *cds_ctx, uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004748 uint8_t *peer_id)
4749{
4750 ol_txrx_pdev_handle pdev;
4751 ol_txrx_peer_handle peer;
4752
4753 if (!cds_ctx) {
4754 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Invalid context",
4755 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304756 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004757 }
4758
Anurag Chouhan6d760662016-02-20 16:05:43 +05304759 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004760 if (!pdev) {
4761 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find pdev!",
4762 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304763 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004764 }
4765
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004766 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
4767 mac_addr, peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004768 if (!peer) {
4769 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find OCB peer!",
4770 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304771 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004772 }
4773
4774 ol_txrx_set_ocb_peer(pdev, peer);
4775
4776 /* Set peer state to connected */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08004777 ol_txrx_peer_state_update((struct cdp_pdev *)pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004778 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004779
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304780 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004781}
4782
4783/**
4784 * ol_txrx_set_ocb_peer - Function to store the OCB peer
4785 * @pdev: Handle to the HTT instance
4786 * @peer: Pointer to the peer
4787 */
4788void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
4789 struct ol_txrx_peer_t *peer)
4790{
4791 if (pdev == NULL)
4792 return;
4793
4794 pdev->ocb_peer = peer;
4795 pdev->ocb_peer_valid = (NULL != peer);
4796}
4797
4798/**
4799 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
4800 * @pdev: Handle to the HTT instance
4801 * @peer: Pointer to the returned peer
4802 *
4803 * Return: true if the peer is valid, false if not
4804 */
4805bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
4806 struct ol_txrx_peer_t **peer)
4807{
4808 int rc;
4809
4810 if ((pdev == NULL) || (peer == NULL)) {
4811 rc = false;
4812 goto exit;
4813 }
4814
4815 if (pdev->ocb_peer_valid) {
4816 *peer = pdev->ocb_peer;
4817 rc = true;
4818 } else {
4819 rc = false;
4820 }
4821
4822exit:
4823 return rc;
4824}
4825
4826#ifdef QCA_LL_TX_FLOW_CONTROL_V2
4827/**
4828 * ol_txrx_register_pause_cb() - register pause callback
4829 * @pause_cb: pause callback
4830 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304831 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004832 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004833static QDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004834{
Anurag Chouhan6d760662016-02-20 16:05:43 +05304835 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004836 if (!pdev || !pause_cb) {
4837 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "pdev or pause_cb is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304838 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004839 }
4840 pdev->pause_cb = pause_cb;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304841 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004842}
4843#endif
4844
4845#if defined(FEATURE_LRO)
Dhanashri Atre8d978172015-10-30 15:12:03 -07004846/**
4847 * ol_txrx_lro_flush_handler() - LRO flush handler
4848 * @context: dev handle
4849 * @rxpkt: rx data
4850 * @staid: station id
4851 *
4852 * This function handles an LRO flush indication.
4853 * If the rx thread is enabled, it will be invoked by the rx
4854 * thread else it will be called in the tasklet context
4855 *
4856 * Return: none
4857 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -07004858static void ol_txrx_lro_flush_handler(void *context,
4859 void *rxpkt,
4860 uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004861{
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004862 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004863
Anurag Chouhanc5548422016-02-24 18:33:27 +05304864 if (qdf_unlikely(!pdev)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304865 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304866 "%s: Invalid context", __func__);
Anurag Chouhanc5548422016-02-24 18:33:27 +05304867 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004868 return;
4869 }
4870
4871 if (pdev->lro_info.lro_flush_cb)
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004872 pdev->lro_info.lro_flush_cb(context);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004873 else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304874 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304875 "%s: lro_flush_cb NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004876}
4877
Dhanashri Atre8d978172015-10-30 15:12:03 -07004878/**
4879 * ol_txrx_lro_flush() - LRO flush callback
4880 * @data: opaque data pointer
4881 *
4882 * This is the callback registered with CE to trigger
4883 * an LRO flush
4884 *
4885 * Return: none
4886 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -07004887static void ol_txrx_lro_flush(void *data)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004888{
4889 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
4890 struct cds_ol_rx_pkt *pkt;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004891 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004892
Anurag Chouhanc5548422016-02-24 18:33:27 +05304893 if (qdf_unlikely(!sched_ctx))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004894 return;
4895
4896 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004897 ol_txrx_lro_flush_handler(data, NULL, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004898 } else {
4899 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Anurag Chouhanc5548422016-02-24 18:33:27 +05304900 if (qdf_unlikely(!pkt)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304901 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304902 "%s: Not able to allocate context", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004903 return;
4904 }
4905
Dhanashri Atre8d978172015-10-30 15:12:03 -07004906 pkt->callback =
4907 (cds_ol_rx_thread_cb) ol_txrx_lro_flush_handler;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004908 pkt->context = data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004909 pkt->Rxpkt = NULL;
4910 pkt->staId = 0;
4911 cds_indicate_rxpkt(sched_ctx, pkt);
4912 }
4913}
4914
Dhanashri Atre8d978172015-10-30 15:12:03 -07004915/**
4916 * ol_register_lro_flush_cb() - register the LRO flush callback
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004917 * @lro_flush_cb: flush callback function
4918 * @lro_init_cb: Allocate and initialize LRO data structure.
Dhanashri Atre8d978172015-10-30 15:12:03 -07004919 *
4920 * Store the LRO flush callback provided and in turn
4921 * register OL's LRO flush handler with CE
4922 *
4923 * Return: none
4924 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004925static void ol_register_lro_flush_cb(void (lro_flush_cb)(void *),
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004926 void *(lro_init_cb)(void))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004927{
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004928 struct hif_opaque_softc *hif_device;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304929 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004930
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004931 if (pdev == NULL) {
Manjunathappa Prakashef45aba2016-04-29 11:09:15 -07004932 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev NULL!", __func__);
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004933 TXRX_ASSERT2(0);
4934 goto out;
4935 }
4936 if (pdev->lro_info.lro_flush_cb != NULL) {
4937 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
4938 "%s: LRO already initialised\n", __func__);
4939 if (pdev->lro_info.lro_flush_cb != lro_flush_cb) {
4940 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
4941 "lro_flush_cb is differ to previously registered callback\n")
4942 TXRX_ASSERT2(0);
4943 goto out;
4944 }
4945 qdf_atomic_inc(&pdev->lro_info.lro_dev_cnt);
4946 goto out;
4947 }
4948 pdev->lro_info.lro_flush_cb = lro_flush_cb;
4949 hif_device = (struct hif_opaque_softc *)
4950 cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004951
Mohit Khannabf9e3dd2016-11-30 18:39:07 -08004952 if (qdf_unlikely(hif_device == NULL)) {
4953 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
4954 "%s: hif_device NULL!", __func__);
4955 qdf_assert(0);
4956 goto out;
4957 }
4958
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004959 hif_lro_flush_cb_register(hif_device, ol_txrx_lro_flush, lro_init_cb);
4960 qdf_atomic_inc(&pdev->lro_info.lro_dev_cnt);
4961
4962out:
4963 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004964}
Dhanashri Atre8d978172015-10-30 15:12:03 -07004965
4966/**
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004967 * ol_deregister_lro_flush_cb() - deregister the LRO flush callback
4968 * @lro_deinit_cb: callback function for deregistration.
Dhanashri Atre8d978172015-10-30 15:12:03 -07004969 *
4970 * Remove the LRO flush callback provided and in turn
4971 * deregister OL's LRO flush handler with CE
4972 *
4973 * Return: none
4974 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08004975static void ol_deregister_lro_flush_cb(void (lro_deinit_cb)(void *))
Dhanashri Atre8d978172015-10-30 15:12:03 -07004976{
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004977 struct hif_opaque_softc *hif_device;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304978 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Dhanashri Atre8d978172015-10-30 15:12:03 -07004979
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004980 if (pdev == NULL) {
Manjunathappa Prakashef45aba2016-04-29 11:09:15 -07004981 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev NULL!", __func__);
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004982 return;
4983 }
4984 if (qdf_atomic_dec_and_test(&pdev->lro_info.lro_dev_cnt) == 0) {
4985 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
4986 "%s: Other LRO enabled modules still exist, do not unregister the lro_flush_cb\n", __func__);
4987 return;
4988 }
4989 hif_device =
4990 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
4991
Mohit Khannabf9e3dd2016-11-30 18:39:07 -08004992 if (qdf_unlikely(hif_device == NULL)) {
4993 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
4994 "%s: hif_device NULL!", __func__);
4995 qdf_assert(0);
4996 return;
4997 }
4998
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07004999 hif_lro_flush_cb_deregister(hif_device, lro_deinit_cb);
5000
5001 pdev->lro_info.lro_flush_cb = NULL;
Dhanashri Atre8d978172015-10-30 15:12:03 -07005002}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08005003#endif /* FEATURE_LRO */
Dhanashri Atre12a08392016-02-17 13:10:34 -08005004
Poddar, Siddarthbd804202016-11-23 18:19:49 +05305005void
5006ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5007{
5008 qdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
5009 qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5010 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
5011 qdf_nbuf_data(nbuf), len, true);
5012}
5013
Poddar, Siddarth8e3ee2d2016-11-29 20:17:01 +05305014#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5015bool
5016ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5017{
5018 struct ol_tx_flow_pool_t *pool = vdev->pool;
5019 bool enough_desc_flag;
5020
5021 if (!vdev)
5022 return true;
5023
5024 pool = vdev->pool;
5025
5026 qdf_spin_lock_bh(&pool->flow_pool_lock);
5027 enough_desc_flag = (pool->avail_desc < (pool->stop_th +
5028 OL_TX_NON_FWD_RESERVE))
5029 ? false : true;
5030 qdf_spin_unlock_bh(&pool->flow_pool_lock);
5031 return enough_desc_flag;
5032}
5033#else
5034bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
5035{
5036 return true;
5037}
5038#endif
5039
Dhanashri Atre12a08392016-02-17 13:10:34 -08005040/**
5041 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
5042 * @vdev_id: vdev_id
5043 *
5044 * Return: vdev handle
5045 * NULL if not found.
5046 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005047struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
Dhanashri Atre12a08392016-02-17 13:10:34 -08005048{
5049 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
5050 ol_txrx_vdev_handle vdev = NULL;
5051
5052 if (qdf_unlikely(!pdev))
5053 return NULL;
5054
5055 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5056 if (vdev->vdev_id == vdev_id)
5057 break;
5058 }
5059
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005060 return (struct cdp_vdev *)vdev;
Dhanashri Atre12a08392016-02-17 13:10:34 -08005061}
Nirav Shah2e583a02016-04-30 14:06:12 +05305062
5063/**
5064 * ol_txrx_set_wisa_mode() - set wisa mode
5065 * @vdev: vdev handle
5066 * @enable: enable flag
5067 *
5068 * Return: QDF STATUS
5069 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005070static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_vdev *pvdev, bool enable)
Nirav Shah2e583a02016-04-30 14:06:12 +05305071{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005072 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005073
Nirav Shah2e583a02016-04-30 14:06:12 +05305074 if (!vdev)
5075 return QDF_STATUS_E_INVAL;
5076
5077 vdev->is_wisa_mode_enable = enable;
5078 return QDF_STATUS_SUCCESS;
5079}
Leo Chang98726762016-10-28 11:07:18 -07005080
5081/**
5082 * ol_txrx_get_vdev_id() - get interface id from interface context
5083 * @pvdev: vdev handle
5084 *
5085 * Return: virtual interface id
5086 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005087static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
Leo Chang98726762016-10-28 11:07:18 -07005088{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005089 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -07005090 return vdev->vdev_id;
5091}
5092
5093/**
5094 * ol_txrx_last_assoc_received() - get time of last assoc received
5095 * @ppeer: peer handle
5096 *
5097 * Return: pointer of the time of last assoc received
5098 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005099static qdf_time_t *ol_txrx_last_assoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005100{
5101 ol_txrx_peer_handle peer = ppeer;
5102
5103 return &peer->last_assoc_rcvd;
5104}
5105
5106/**
5107 * ol_txrx_last_disassoc_received() - get time of last disassoc received
5108 * @ppeer: peer handle
5109 *
5110 * Return: pointer of the time of last disassoc received
5111 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005112static qdf_time_t *ol_txrx_last_disassoc_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005113{
5114 ol_txrx_peer_handle peer = ppeer;
5115
5116 return &peer->last_disassoc_rcvd;
5117}
5118
5119/**
5120 * ol_txrx_last_deauth_received() - get time of last deauth received
5121 * @ppeer: peer handle
5122 *
5123 * Return: pointer of the time of last deauth received
5124 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005125static qdf_time_t *ol_txrx_last_deauth_received(void *ppeer)
Leo Chang98726762016-10-28 11:07:18 -07005126{
5127 ol_txrx_peer_handle peer = ppeer;
5128
5129 return &peer->last_deauth_rcvd;
5130}
5131
5132/**
5133 * ol_txrx_soc_attach_target() - attach soc target
5134 * @soc: soc handle
5135 *
5136 * MCL legacy OL do nothing here
5137 *
5138 * Return: 0
5139 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005140static int ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
Leo Chang98726762016-10-28 11:07:18 -07005141{
5142 /* MCL legacy OL do nothing here */
5143 return 0;
5144}
5145
5146/**
5147 * ol_txrx_soc_detach() - detach soc target
5148 * @soc: soc handle
5149 *
5150 * MCL legacy OL do nothing here
5151 *
5152 * Return: noe
5153 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005154static void ol_txrx_soc_detach(void *soc)
Leo Chang98726762016-10-28 11:07:18 -07005155{
5156 /* MCL legacy OL do nothing here */
5157 return;
5158}
5159
5160/**
5161 * ol_txrx_pkt_log_con_service() - connect packet log service
5162 * @ppdev: physical device handle
5163 * @scn: device context
5164 *
5165 * Return: noe
5166 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005167static void ol_txrx_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
Leo Chang98726762016-10-28 11:07:18 -07005168{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005169 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Leo Chang98726762016-10-28 11:07:18 -07005170
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005171 htt_pkt_log_init((struct cdp_pdev *)pdev, scn);
Leo Chang98726762016-10-28 11:07:18 -07005172 pktlog_htc_attach();
5173}
5174
5175/* OL wrapper functions for CDP abstraction */
5176/**
5177 * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5178 * @peer: peer handle
5179 * @drop: rx packets drop or deliver
5180 *
5181 * Return: none
5182 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005183static void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
Leo Chang98726762016-10-28 11:07:18 -07005184{
5185 ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
5186}
5187
5188/**
5189 * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
5190 * @ppdev: pdev handle
5191 * @vdev_id: interface id
5192 *
5193 * Return: virtual interface instance
5194 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005195static
5196struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
5197 uint8_t vdev_id)
Leo Chang98726762016-10-28 11:07:18 -07005198{
5199 return ol_txrx_get_vdev_from_vdev_id(vdev_id);
5200}
5201
5202/**
5203 * ol_txrx_wrapper_register_peer() - register peer
5204 * @pdev: pdev handle
5205 * @sta_desc: peer description
5206 *
5207 * Return: QDF STATUS
5208 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005209static QDF_STATUS ol_txrx_wrapper_register_peer(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005210 struct ol_txrx_desc_type *sta_desc)
5211{
5212 return ol_txrx_register_peer(sta_desc);
5213}
5214
5215/**
5216 * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
5217 * @pdev - the data physical device object
5218 * @local_peer_id - the ID txrx assigned locally to the peer in question
5219 *
5220 * The control SW typically uses the txrx peer handle to refer to the peer.
5221 * In unusual circumstances, if it is infeasible for the control SW maintain
5222 * the txrx peer handle but it can maintain a small integer local peer ID,
5223 * this function allows the peer handled to be retrieved, based on the local
5224 * peer ID.
5225 *
5226 * @return handle to the txrx peer object
5227 */
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005228static void *
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005229ol_txrx_wrapper_peer_find_by_local_id(struct cdp_pdev *pdev,
5230 uint8_t local_peer_id)
Leo Chang98726762016-10-28 11:07:18 -07005231{
5232 return (void *)ol_txrx_peer_find_by_local_id(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005233 pdev, local_peer_id);
Leo Chang98726762016-10-28 11:07:18 -07005234}
5235
5236/**
5237 * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5238 * @pdev: pdev handle
5239 *
5240 * Return: 1 high latency bus
5241 * 0 low latency bus
5242 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005243static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Leo Chang98726762016-10-28 11:07:18 -07005244{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005245 return ol_cfg_is_high_latency(cfg_pdev);
Leo Chang98726762016-10-28 11:07:18 -07005246}
5247
5248/**
5249 * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5250 * @data_peer - which peer has changed its state
5251 * @state - the new state of the peer
5252 *
5253 * Specify the peer's authentication state (none, connected, authenticated)
5254 * to allow the data SW to determine whether to filter out invalid data frames.
5255 * (In the "connected" state, where security is enabled, but authentication
5256 * has not completed, tx and rx data frames other than EAPOL or WAPI should
5257 * be discarded.)
5258 * This function is only relevant for systems in which the tx and rx filtering
5259 * are done in the host rather than in the target.
5260 *
5261 * Return: QDF Status
5262 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005263static QDF_STATUS ol_txrx_wrapper_peer_state_update(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005264 uint8_t *peer_mac, enum ol_txrx_peer_state state)
5265{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005266 return ol_txrx_peer_state_update(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005267 peer_mac, state);
5268}
5269
5270/**
5271 * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
5272 * @pdev: pdev handle
5273 * @peer_addr: peer address wnat to find
5274 * @peer_id: peer id
5275 *
5276 * Return: peer instance pointer
5277 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005278static void *ol_txrx_wrapper_find_peer_by_addr(struct cdp_pdev *pdev,
Leo Chang98726762016-10-28 11:07:18 -07005279 uint8_t *peer_addr, uint8_t *peer_id)
5280{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005281 return ol_txrx_find_peer_by_addr(pdev,
Leo Chang98726762016-10-28 11:07:18 -07005282 peer_addr, peer_id);
5283}
5284
5285/**
5286 * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5287 * @cfg_ctx: cfg context
5288 * @cfg_param: cfg parameters
5289 *
5290 * Return: none
5291 */
Jeff Johnsonffa9afc2016-12-19 15:34:41 -08005292static void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005293ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5294 void *cfg_param)
Leo Chang98726762016-10-28 11:07:18 -07005295{
5296 return ol_tx_set_flow_control_parameters(
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08005297 cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -07005298 (struct txrx_pdev_cfg_param_t *)cfg_param);
5299}
5300
5301static struct cdp_cmn_ops ol_ops_cmn = {
5302 .txrx_soc_attach_target = ol_txrx_soc_attach_target,
5303 .txrx_vdev_attach = ol_txrx_vdev_attach,
5304 .txrx_vdev_detach = ol_txrx_vdev_detach,
5305 .txrx_pdev_attach = ol_txrx_pdev_attach,
5306 .txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
5307 .txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
Himanshu Agarwal0b9bbc32017-02-23 16:23:05 +05305308 .txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
Leo Chang98726762016-10-28 11:07:18 -07005309 .txrx_pdev_detach = ol_txrx_pdev_detach,
Dhanashri Atre272fd232016-11-10 16:20:46 -08005310 .txrx_peer_create = ol_txrx_peer_attach,
5311 .txrx_peer_setup = NULL,
5312 .txrx_peer_teardown = NULL,
5313 .txrx_peer_delete = ol_txrx_peer_detach,
Leo Chang98726762016-10-28 11:07:18 -07005314 .txrx_vdev_register = ol_txrx_vdev_register,
5315 .txrx_soc_detach = ol_txrx_soc_detach,
5316 .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
5317 .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
5318 .txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
Krishna Kumaar Natarajan5fb9ac12016-12-06 14:28:35 -08005319 .txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
Leo Chang98726762016-10-28 11:07:18 -07005320 .txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
5321 .txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
5322 .txrx_get_tx_pending = ol_txrx_get_tx_pending,
Manikandan Mohan8b4e2012017-03-22 11:15:55 -07005323 .flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
Leo Chang98726762016-10-28 11:07:18 -07005324 .txrx_fw_stats_get = ol_txrx_fw_stats_get
5325 /* TODO: Add other functions */
5326};
5327
5328static struct cdp_misc_ops ol_ops_misc = {
5329 .set_ibss_vdev_heart_beat_timer =
5330 ol_txrx_set_ibss_vdev_heart_beat_timer,
5331#ifdef CONFIG_HL_SUPPORT
5332 .set_wmm_param = ol_txrx_set_wmm_param,
5333#endif /* CONFIG_HL_SUPPORT */
5334 .bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
5335 .bad_peer_txctl_update_threshold =
5336 ol_txrx_bad_peer_txctl_update_threshold,
5337 .hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
5338 .tx_non_std = ol_tx_non_std,
5339 .get_vdev_id = ol_txrx_get_vdev_id,
5340 .set_wisa_mode = ol_txrx_set_wisa_mode,
5341#ifdef FEATURE_RUNTIME_PM
5342 .runtime_suspend = ol_txrx_runtime_suspend,
5343 .runtime_resume = ol_txrx_runtime_resume,
5344#endif /* FEATURE_RUNTIME_PM */
5345 .get_opmode = ol_txrx_get_opmode,
5346 .mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
5347 .update_mac_id = ol_txrx_update_mac_id,
5348 .flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
5349 .get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
5350 .pkt_log_init = htt_pkt_log_init,
5351 .pkt_log_con_service = ol_txrx_pkt_log_con_service
5352};
5353
5354static struct cdp_flowctl_ops ol_ops_flowctl = {
5355#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5356 .register_pause_cb = ol_txrx_register_pause_cb,
5357 .set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
5358 .dump_flow_pool_info = ol_tx_dump_flow_pool_info
5359#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
5360};
5361
5362static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
5363#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
5364 .register_tx_flow_control = ol_txrx_register_tx_flow_control,
5365 .deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
5366 .flow_control_cb = ol_txrx_flow_control_cb,
5367 .get_tx_resource = ol_txrx_get_tx_resource,
5368 .ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
5369 .vdev_flush = ol_txrx_vdev_flush,
5370 .vdev_pause = ol_txrx_vdev_pause,
5371 .vdev_unpause = ol_txrx_vdev_unpause
5372#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
5373};
5374
5375static struct cdp_ipa_ops ol_ops_ipa = {
5376#ifdef IPA_OFFLOAD
5377 .ipa_get_resource = ol_txrx_ipa_uc_get_resource,
5378 .ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
5379 .ipa_set_active = ol_txrx_ipa_uc_set_active,
5380 .ipa_op_response = ol_txrx_ipa_uc_op_response,
5381 .ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
5382 .ipa_get_stat = ol_txrx_ipa_uc_get_stat,
5383 .ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
5384 .ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base
5385#endif /* IPA_OFFLOAD */
5386};
5387
5388static struct cdp_lro_ops ol_ops_lro = {
5389#ifdef FEATURE_LRO
5390 .register_lro_flush_cb = ol_register_lro_flush_cb,
5391 .deregister_lro_flush_cb = ol_deregister_lro_flush_cb
5392#endif /* FEATURE_LRO */
5393};
5394
5395static struct cdp_bus_ops ol_ops_bus = {
5396 .bus_suspend = ol_txrx_bus_suspend,
5397 .bus_resume = ol_txrx_bus_resume
5398};
5399
5400static struct cdp_ocb_ops ol_ops_ocb = {
5401 .set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
5402 .get_ocb_chan_info = ol_txrx_get_ocb_chan_info
5403};
5404
5405static struct cdp_throttle_ops ol_ops_throttle = {
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005406#ifdef QCA_SUPPORT_TX_THROTTLE
Leo Chang98726762016-10-28 11:07:18 -07005407 .throttle_init_period = ol_tx_throttle_init_period,
5408 .throttle_set_level = ol_tx_throttle_set_level
Jeff Johnsonb13a5012016-12-21 08:41:16 -08005409#endif /* QCA_SUPPORT_TX_THROTTLE */
Leo Chang98726762016-10-28 11:07:18 -07005410};
5411
5412static struct cdp_mob_stats_ops ol_ops_mob_stats = {
5413 .display_stats = ol_txrx_display_stats,
5414 .clear_stats = ol_txrx_clear_stats,
5415 .stats = ol_txrx_stats
5416};
5417
5418static struct cdp_cfg_ops ol_ops_cfg = {
5419 .set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
5420 .set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
5421 .cfg_attach = ol_pdev_cfg_attach,
5422 .vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
5423 .is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
5424 .tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
5425 .is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
5426 .set_flow_control_parameters =
5427 ol_txrx_wrapper_set_flow_control_parameters,
5428 .set_flow_steering = ol_set_cfg_flow_steering,
5429};
5430
5431static struct cdp_peer_ops ol_ops_peer = {
5432 .register_peer = ol_txrx_wrapper_register_peer,
5433 .clear_peer = ol_txrx_clear_peer,
5434 .find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
5435 .find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
5436 .local_peer_id = ol_txrx_local_peer_id,
5437 .peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
5438 .peer_state_update = ol_txrx_wrapper_peer_state_update,
5439 .get_vdevid = ol_txrx_get_vdevid,
5440 .get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
5441 .register_ocb_peer = ol_txrx_register_ocb_peer,
5442 .peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
5443 .get_peer_state = ol_txrx_get_peer_state,
5444 .get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
5445 .update_ibss_add_peer_num_of_vdev =
5446 ol_txrx_update_ibss_add_peer_num_of_vdev,
5447 .remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
5448 .remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
Yu Wang053d3e72017-02-08 18:48:24 +08005449#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
Leo Chang98726762016-10-28 11:07:18 -07005450 .copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
5451 .add_last_real_peer = ol_txrx_add_last_real_peer,
Jeff Johnson2338e1a2016-12-16 15:59:24 -08005452 .is_vdev_restore_last_peer = is_vdev_restore_last_peer,
5453 .update_last_real_peer = ol_txrx_update_last_real_peer,
5454#endif /* CONFIG_HL_SUPPORT */
Leo Chang98726762016-10-28 11:07:18 -07005455 .last_assoc_received = ol_txrx_last_assoc_received,
5456 .last_disassoc_received = ol_txrx_last_disassoc_received,
5457 .last_deauth_received = ol_txrx_last_deauth_received,
Leo Chang98726762016-10-28 11:07:18 -07005458 .peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
5459};
5460
5461static struct cdp_tx_delay_ops ol_ops_delay = {
5462#ifdef QCA_COMPUTE_TX_DELAY
5463 .tx_delay = ol_tx_delay,
5464 .tx_delay_hist = ol_tx_delay_hist,
5465 .tx_packet_count = ol_tx_packet_count,
5466 .tx_set_compute_interval = ol_tx_set_compute_interval
5467#endif /* QCA_COMPUTE_TX_DELAY */
5468};
5469
5470static struct cdp_pmf_ops ol_ops_pmf = {
5471 .get_pn_info = ol_txrx_get_pn_info
5472};
5473
5474/* WINplatform specific structures */
5475static struct cdp_ctrl_ops ol_ops_ctrl = {
5476 /* EMPTY FOR MCL */
5477};
5478
5479static struct cdp_me_ops ol_ops_me = {
5480 /* EMPTY FOR MCL */
5481};
5482
5483static struct cdp_mon_ops ol_ops_mon = {
5484 /* EMPTY FOR MCL */
5485};
5486
5487static struct cdp_host_stats_ops ol_ops_host_stats = {
5488 /* EMPTY FOR MCL */
5489};
5490
5491static struct cdp_wds_ops ol_ops_wds = {
5492 /* EMPTY FOR MCL */
5493};
5494
5495static struct cdp_raw_ops ol_ops_raw = {
5496 /* EMPTY FOR MCL */
5497};
5498
5499static struct cdp_ops ol_txrx_ops = {
5500 .cmn_drv_ops = &ol_ops_cmn,
5501 .ctrl_ops = &ol_ops_ctrl,
5502 .me_ops = &ol_ops_me,
5503 .mon_ops = &ol_ops_mon,
5504 .host_stats_ops = &ol_ops_host_stats,
5505 .wds_ops = &ol_ops_wds,
5506 .raw_ops = &ol_ops_raw,
5507 .misc_ops = &ol_ops_misc,
5508 .cfg_ops = &ol_ops_cfg,
5509 .flowctl_ops = &ol_ops_flowctl,
5510 .l_flowctl_ops = &ol_ops_l_flowctl,
5511 .ipa_ops = &ol_ops_ipa,
5512 .lro_ops = &ol_ops_lro,
5513 .bus_ops = &ol_ops_bus,
5514 .ocb_ops = &ol_ops_ocb,
5515 .peer_ops = &ol_ops_peer,
5516 .throttle_ops = &ol_ops_throttle,
5517 .mob_stats_ops = &ol_ops_mob_stats,
5518 .delay_ops = &ol_ops_delay,
5519 .pmf_ops = &ol_ops_pmf
5520};
5521
Jeff Johnson02c37b42017-01-10 14:49:24 -08005522/*
5523 * Local prototype added to temporarily address warning caused by
5524 * -Wmissing-prototypes. A more correct solution, namely to expose
5525 * a prototype in an appropriate header file, will come later.
5526 */
5527struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5528 struct ol_if_ops *dp_ol_if_ops);
5529struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
5530 struct ol_if_ops *dp_ol_if_ops)
Leo Chang98726762016-10-28 11:07:18 -07005531{
5532 struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
5533 if (!soc) {
5534 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5535 "%s: OL SOC memory allocation failed\n", __func__);
5536 return NULL;
5537 }
5538
5539 soc->ops = &ol_txrx_ops;
5540 return soc;
5541}
5542
5543