blob: a80f6ac84aa81c467d93f16d1753dba38b3460a3 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelamc4b28632016-02-03 15:02:18 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*=== includes ===*/
29/* header files for OS primitives */
30#include <osdep.h> /* uint32_t, etc. */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053031#include <qdf_mem.h> /* qdf_mem_malloc,free */
Anurag Chouhan6d760662016-02-20 16:05:43 +053032#include <qdf_types.h> /* qdf_device_t, qdf_print */
Nirav Shahcbc6d722016-03-01 16:24:53 +053033#include <qdf_lock.h> /* qdf_spinlock */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053034#include <qdf_atomic.h> /* qdf_atomic_read */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053036#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037/* Required for WLAN_FEATURE_FASTPATH */
38#include <ce_api.h>
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053039#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080040/* header files for utilities */
41#include <cds_queue.h> /* TAILQ */
42
43/* header files for configuration API */
44#include <ol_cfg.h> /* ol_cfg_is_high_latency */
45#include <ol_if_athvar.h>
46
47/* header files for HTT API */
48#include <ol_htt_api.h>
49#include <ol_htt_tx_api.h>
50
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051/* header files for our own APIs */
52#include <ol_txrx_api.h>
53#include <ol_txrx_dbg.h>
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070054#include <cdp_txrx_ocb.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070055#include <ol_txrx_ctrl_api.h>
56#include <cdp_txrx_stats.h>
57#include <ol_txrx_osif_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058/* header files for our internal definitions */
59#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
60#include <wdi_event.h> /* WDI events */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080061#include <ol_tx.h> /* ol_tx_ll */
62#include <ol_rx.h> /* ol_rx_deliver */
63#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
64#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
65#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
66#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
67#include <ol_rx_reorder.h>
68#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
69#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
70#include <ol_tx_queue.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053071#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080072#include <ol_txrx.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080073#include <cdp_txrx_flow_ctrl_legacy.h>
74#include <cdp_txrx_ipa.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080075#include "wma.h"
Poddar, Siddarth27b1a602016-04-29 11:01:33 +053076#include "hif.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070077#include <cdp_txrx_peer_ops.h>
Komal Seelamc4b28632016-02-03 15:02:18 +053078#ifndef REMOVE_PKT_LOG
79#include "pktlog_ac.h"
80#endif
81#include <cds_concurrency.h>
82#include "epping_main.h"
Govind Singh8c46db92016-05-10 14:17:16 +053083#include <a_types.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080084
Siddarth Poddarb2011f62016-04-27 20:45:42 +053085#ifdef CONFIG_HL_SUPPORT
86
87/**
88 * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
89 * @vdev: the data virtual device
90 * @bss_addr: bss address
91 *
92 * Return: None
93 */
94void
95ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr)
96{
97 if (bss_addr && vdev->last_real_peer &&
Ankit Guptaa5076012016-09-14 11:32:19 -070098 !qdf_mem_cmp((u8 *)bss_addr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +053099 vdev->last_real_peer->mac_addr.raw,
Ankit Guptaa5076012016-09-14 11:32:19 -0700100 IEEE80211_ADDR_LEN))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530101 qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
102 vdev->last_real_peer->mac_addr.raw,
103 OL_TXRX_MAC_ADDR_LEN);
104}
105
106/**
107 * ol_txrx_add_last_real_peer() - add last peer
108 * @pdev: the data physical device
109 * @vdev: virtual device
110 * @peer_id: peer id
111 *
112 * Return: None
113 */
114void
115ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
116 ol_txrx_vdev_handle vdev,
117 uint8_t *peer_id)
118{
119 ol_txrx_peer_handle peer;
120 if (vdev->last_real_peer == NULL) {
121 peer = NULL;
122 peer = ol_txrx_find_peer_by_addr(pdev,
123 vdev->hl_tdls_ap_mac_addr.raw,
124 peer_id);
125 if (peer && (peer->peer_ids[0] !=
126 HTT_INVALID_PEER_ID))
127 vdev->last_real_peer = peer;
128 }
129}
130
131/**
132 * is_vdev_restore_last_peer() - check for vdev last peer
133 * @peer: peer object
134 *
135 * Return: true if last peer is not null
136 */
137bool
138is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer)
139{
140 struct ol_txrx_vdev_t *vdev;
141 vdev = peer->vdev;
142 return vdev->last_real_peer && (vdev->last_real_peer == peer);
143}
144
145/**
146 * ol_txrx_update_last_real_peer() - check for vdev last peer
147 * @pdev: the data physical device
148 * @peer: peer device
149 * @peer_id: peer id
150 * @restore_last_peer: restore last peer flag
151 *
152 * Return: None
153 */
154void
155ol_txrx_update_last_real_peer(
156 ol_txrx_pdev_handle pdev,
157 struct ol_txrx_peer_t *peer,
158 uint8_t *peer_id, bool restore_last_peer)
159{
160 struct ol_txrx_vdev_t *vdev;
161 vdev = peer->vdev;
162 if (restore_last_peer && (vdev->last_real_peer == NULL)) {
163 peer = NULL;
164 peer = ol_txrx_find_peer_by_addr(pdev,
165 vdev->hl_tdls_ap_mac_addr.raw, peer_id);
166 if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
167 vdev->last_real_peer = peer;
168 }
169}
170#endif
171
Himanshu Agarwal19141bb2016-07-20 20:15:48 +0530172/**
173 * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
174 * fw is compatible for marking first packet after wow wakeup
175 * @value: 1 for enabled/ 0 for disabled
176 *
177 * Return: None
178 */
179void ol_tx_mark_first_wakeup_packet(uint8_t value)
180{
181 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
182
183 if (!pdev) {
184 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
185 "%s: pdev is NULL\n", __func__);
186 return;
187 }
188
189 htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
190}
191
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530192u_int16_t
193ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev)
194{
195 u_int16_t desc_pool_size;
196 u_int16_t steady_state_tx_lifetime_ms;
197 u_int16_t safety_factor;
198
199 /*
200 * Steady-state tx latency:
201 * roughly 1-2 ms flight time
202 * + roughly 1-2 ms prep time,
203 * + roughly 1-2 ms target->host notification time.
204 * = roughly 6 ms total
205 * Thus, steady state number of frames =
206 * steady state max throughput / frame size * tx latency, e.g.
207 * 1 Gbps / 1500 bytes * 6 ms = 500
208 *
209 */
210 steady_state_tx_lifetime_ms = 6;
211
212 safety_factor = 8;
213
214 desc_pool_size =
215 ol_cfg_max_thruput_mbps(ctrl_pdev) *
216 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
217 (8 * OL_TX_AVG_FRM_BYTES) *
218 steady_state_tx_lifetime_ms *
219 safety_factor;
220
221 /* minimum */
222 if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
223 desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
224
225 /* maximum */
226 if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
227 desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
228
229 return desc_pool_size;
230}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800231
232/*=== function definitions ===*/
233
Nirav Shah22bf44d2015-12-10 15:39:48 +0530234/**
235 * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
236 * wmi is enabled or not.
237 * @value: 1 for enabled/ 0 for disable
238 *
239 * Return: None
240 */
241void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
242{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530243 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530244 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530245 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530246 return;
247 }
248 pdev->is_mgmt_over_wmi_enabled = value;
249 return;
250}
251
252/**
253 * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
254 *
255 * Return: is_mgmt_over_wmi_enabled
256 */
257uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
258{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530259 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530260 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530261 qdf_print("%s: pdev is NULL\n", __func__);
Nirav Shah22bf44d2015-12-10 15:39:48 +0530262 return 0;
263 }
264 return pdev->is_mgmt_over_wmi_enabled;
265}
266
267
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800268#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
269ol_txrx_peer_handle
270ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
271 ol_txrx_vdev_handle vdev,
272 uint8_t *peer_addr, uint8_t *peer_id)
273{
274 struct ol_txrx_peer_t *peer;
275
276 peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
277 if (!peer)
278 return NULL;
279 *peer_id = peer->local_id;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530280 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700281 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
282 "%s: peer %p peer->ref_cnt %d", __func__, peer,
283 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800284 return peer;
285}
286
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530287QDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800288{
289 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530290 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530291 "peer argument is null!!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530292 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800293 }
294
295 *vdev_id = peer->vdev->vdev_id;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530296 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800297}
298
299void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id)
300{
301 struct ol_txrx_peer_t *peer = NULL;
302 ol_txrx_pdev_handle pdev = NULL;
303
304 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530305 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306 "Invalid sta id passed");
307 return NULL;
308 }
309
Anurag Chouhan6d760662016-02-20 16:05:43 +0530310 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800311 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530312 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530313 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800314 return NULL;
315 }
316
317 peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
318 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530319 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530320 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800321 return NULL;
322 }
323
324 return peer->vdev;
325}
326
327ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
328 uint8_t *peer_addr,
329 uint8_t *peer_id)
330{
331 struct ol_txrx_peer_t *peer;
332
333 peer = ol_txrx_peer_find_hash_find(pdev, peer_addr, 0, 1);
334 if (!peer)
335 return NULL;
336 *peer_id = peer->local_id;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530337 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700338 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
339 "%s: peer %p peer->ref_cnt %d", __func__, peer,
340 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800341 return peer;
342}
343
344uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer)
345{
346 return peer->local_id;
347}
348
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700349/**
350 * @brief Find a txrx peer handle from a peer's local ID
351 * @details
352 * The control SW typically uses the txrx peer handle to refer to the peer.
353 * In unusual circumstances, if it is infeasible for the control SW maintain
354 * the txrx peer handle but it can maintain a small integer local peer ID,
355 * this function allows the peer handled to be retrieved, based on the local
356 * peer ID.
357 *
358 * @param pdev - the data physical device object
359 * @param local_peer_id - the ID txrx assigned locally to the peer in question
360 * @return handle to the txrx peer object
361 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800362ol_txrx_peer_handle
363ol_txrx_peer_find_by_local_id(struct ol_txrx_pdev_t *pdev,
364 uint8_t local_peer_id)
365{
366 struct ol_txrx_peer_t *peer;
367 if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
368 (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
369 return NULL;
370 }
371
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530372 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 peer = pdev->local_peer_ids.map[local_peer_id];
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530374 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375 return peer;
376}
377
378static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
379{
380 int i;
381
382 /* point the freelist to the first ID */
383 pdev->local_peer_ids.freelist = 0;
384
385 /* link each ID to the next one */
386 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
387 pdev->local_peer_ids.pool[i] = i + 1;
388 pdev->local_peer_ids.map[i] = NULL;
389 }
390
391 /* link the last ID to itself, to mark the end of the list */
392 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
393 pdev->local_peer_ids.pool[i] = i;
394
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530395 qdf_spinlock_create(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800396}
397
398static void
399ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
400 struct ol_txrx_peer_t *peer)
401{
402 int i;
403
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530404 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405 i = pdev->local_peer_ids.freelist;
406 if (pdev->local_peer_ids.pool[i] == i) {
407 /* the list is empty, except for the list-end marker */
408 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
409 } else {
410 /* take the head ID and advance the freelist */
411 peer->local_id = i;
412 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
413 pdev->local_peer_ids.map[i] = peer;
414 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530415 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416}
417
418static void
419ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
420 struct ol_txrx_peer_t *peer)
421{
422 int i = peer->local_id;
423 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
424 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
425 return;
426 }
427 /* put this ID on the head of the freelist */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530428 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800429 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
430 pdev->local_peer_ids.freelist = i;
431 pdev->local_peer_ids.map[i] = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530432 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800433}
434
435static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
436{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530437 qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800438}
439
440#else
441#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
442#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
443#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
444#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
445#endif
446
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530447#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
448
449/**
450 * ol_txrx_update_group_credit() - update group credit for tx queue
451 * @group: for which credit needs to be updated
452 * @credit: credits
453 * @absolute: TXQ group absolute
454 *
455 * Return: allocated pool size
456 */
457void ol_txrx_update_group_credit(
458 struct ol_tx_queue_group_t *group,
459 int32_t credit,
460 u_int8_t absolute)
461{
462 if (absolute)
463 qdf_atomic_set(&group->credit, credit);
464 else
465 qdf_atomic_add(credit, &group->credit);
466}
467
468/**
469 * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
470 * vdev id mask and ac mask is not matching
471 * @pdev: the data physical device
472 * @group_id: TXQ group id
473 * @credit: TXQ group credit count
474 * @absolute: TXQ group absolute
475 * @vdev_id_mask: TXQ vdev group id mask
476 * @ac_mask: TQX access category mask
477 *
478 * Return: None
479 */
480void ol_txrx_update_tx_queue_groups(
481 ol_txrx_pdev_handle pdev,
482 u_int8_t group_id,
483 int32_t credit,
484 u_int8_t absolute,
485 u_int32_t vdev_id_mask,
486 u_int32_t ac_mask
487 )
488{
489 struct ol_tx_queue_group_t *group;
490 u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
491 u_int32_t membership;
492 struct ol_txrx_vdev_t *vdev;
493 group = &pdev->txq_grps[group_id];
494
495 membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
496
497 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
498 /*
499 * if the membership (vdev id mask and ac mask)
500 * matches then no need to update tx qeue groups.
501 */
502 if (group->membership == membership)
503 /* Update Credit Only */
504 goto credit_update;
505
506
507 /*
508 * membership (vdev id mask and ac mask) is not matching
509 * TODO: ignoring ac mask for now
510 */
511 group_vdev_id_mask =
512 OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
513
514 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
515 group_vdev_bit_mask =
516 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
517 group_vdev_id_mask, vdev->vdev_id);
518 vdev_bit_mask =
519 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
520 vdev_id_mask, vdev->vdev_id);
521
522 if (group_vdev_bit_mask != vdev_bit_mask) {
523 /*
524 * Change in vdev tx queue group
525 */
526 if (!vdev_bit_mask) {
527 /* Set Group Pointer (vdev and peer) to NULL */
528 ol_tx_set_vdev_group_ptr(
529 pdev, vdev->vdev_id, NULL);
530 } else {
531 /* Set Group Pointer (vdev and peer) */
532 ol_tx_set_vdev_group_ptr(
533 pdev, vdev->vdev_id, group);
534 }
535 }
536 }
537 /* Update membership */
538 group->membership = membership;
539credit_update:
540 /* Update Credit */
541 ol_txrx_update_group_credit(group, credit, absolute);
542 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
543}
544#endif
545
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800546#ifdef WLAN_FEATURE_FASTPATH
547/**
548 * setup_fastpath_ce_handles() Update pdev with ce_handle for fastpath use.
549 *
550 * @osc: pointer to HIF context
551 * @pdev: pointer to ol pdev
552 *
553 * Return: void
554 */
Komal Seelam3d202862016-02-24 18:43:24 +0530555static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
556 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800557{
558 /*
559 * Before the HTT attach, set up the CE handles
560 * CE handles are (struct CE_state *)
561 * This is only required in the fast path
562 */
Komal Seelam7fde14c2016-02-02 13:05:57 +0530563 pdev->ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_H2T_MSG);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800564
565}
566
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800567#else /* not WLAN_FEATURE_FASTPATH */
Komal Seelam3d202862016-02-24 18:43:24 +0530568static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
569 struct ol_txrx_pdev_t *pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570{
571}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800572#endif /* WLAN_FEATURE_FASTPATH */
573
574#ifdef QCA_LL_TX_FLOW_CONTROL_V2
575/**
576 * ol_tx_set_desc_global_pool_size() - set global pool size
577 * @num_msdu_desc: total number of descriptors
578 *
579 * Return: none
580 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800581void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
582{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530583 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800584 if (!pdev) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530585 qdf_print("%s: pdev is NULL\n", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800586 return;
587 }
Nirav Shah2ae038d2015-12-23 20:36:11 +0530588 pdev->num_msdu_desc = num_msdu_desc;
589 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
590 pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
591 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global pool size: %d\n",
592 pdev->num_msdu_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800593 return;
594}
595
596/**
597 * ol_tx_get_desc_global_pool_size() - get global pool size
598 * @pdev: pdev handle
599 *
600 * Return: global pool size
601 */
602static inline
603uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
604{
605 return pdev->num_msdu_desc;
606}
Nirav Shah55b45a02016-01-21 10:00:16 +0530607
608/**
609 * ol_tx_get_total_free_desc() - get total free descriptors
610 * @pdev: pdev handle
611 *
612 * Return: total free descriptors
613 */
614static inline
615uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
616{
617 struct ol_tx_flow_pool_t *pool = NULL;
618 uint32_t free_desc;
619
620 free_desc = pdev->tx_desc.num_free;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530621 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530622 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
623 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530624 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530625 free_desc += pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530626 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530627 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530628 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Nirav Shah55b45a02016-01-21 10:00:16 +0530629
630 return free_desc;
631}
632
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800633#else
634/**
635 * ol_tx_get_desc_global_pool_size() - get global pool size
636 * @pdev: pdev handle
637 *
638 * Return: global pool size
639 */
640static inline
641uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
642{
643 return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
644}
Nirav Shah55b45a02016-01-21 10:00:16 +0530645
646/**
647 * ol_tx_get_total_free_desc() - get total free descriptors
648 * @pdev: pdev handle
649 *
650 * Return: total free descriptors
651 */
652static inline
653uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
654{
655 return pdev->tx_desc.num_free;
656}
657
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800658#endif
659
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530660#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
661
662/**
663 * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
664 * margin replenishment
665 * @desc_pool_size: tx desc pool size
666 *
667 * Return: threshold low
668 */
669static inline uint16_t
670ol_txrx_rsrc_threshold_lo(int desc_pool_size)
671{
672 int threshold_low;
673 /*
674 * 5% margin of unallocated desc is too much for per
675 * vdev mechanism.
676 * Define the value seperately.
677 */
678 threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
679
680 return threshold_low;
681}
682
683/**
684 * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
685 * during tx desc margin replenishment
686 * @desc_pool_size: tx desc pool size
687 *
688 * Return: threshold high
689 */
690static inline uint16_t
691ol_txrx_rsrc_threshold_hi(int desc_pool_size)
692{
693 int threshold_high;
694 /* when freeing up descriptors,
695 * keep going until there's a 7.5% margin
696 */
697 threshold_high = ((15 * desc_pool_size)/100)/2;
698
699 return threshold_high;
700}
701#else
702
703static inline uint16_t
704ol_txrx_rsrc_threshold_lo(int desc_pool_size)
705{
706 int threshold_low;
707 /* always maintain a 5% margin of unallocated descriptors */
708 threshold_low = (5 * desc_pool_size)/100;
709
710 return threshold_low;
711}
712
713static inline uint16_t
714ol_txrx_rsrc_threshold_hi(int desc_pool_size)
715{
716 int threshold_high;
717 /* when freeing up descriptors, keep going until
718 * there's a 15% margin
719 */
720 threshold_high = (15 * desc_pool_size)/100;
721
722 return threshold_high;
723}
724#endif
725
726#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
727
728/**
729 * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
730 * @pdev: the physical device object
731 *
732 * Return: None
733 */
734static void
735ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
736{
737 qdf_spinlock_create(&pdev->txq_log_spinlock);
738 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
739 pdev->txq_log.oldest_record_offset = 0;
740 pdev->txq_log.offset = 0;
741 pdev->txq_log.allow_wrap = 1;
742 pdev->txq_log.wrapped = 0;
743}
744
745/**
746 * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
747 * @pdev: the physical device object
748 *
749 * Return: None
750 */
751static inline void
752ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
753{
754 qdf_spinlock_destroy(&pdev->txq_log_spinlock);
755}
756
757#else
758
759static inline void
760ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
761{
762 return;
763}
764
765static inline void
766ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
767{
768 return;
769}
770
771
772#endif
773
774#if defined(DEBUG_HL_LOGGING)
775
776/**
777 * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
778 * @pdev: the physical device object
779 *
780 * Return: None
781 */
782static inline void
783ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
784{
785 qdf_spinlock_create(&pdev->grp_stat_spinlock);
786 pdev->grp_stats.last_valid_index = -1;
787 pdev->grp_stats.wrap_around = 0;
788}
789
790/**
791 * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
792 * @pdev: the physical device object
793 *
794 * Return: None
795 */
796static inline void
797ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
798{
799 qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
800}
801#else
802
803static inline void
804ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
805{
806 return;
807}
808
809static inline void
810ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
811{
812 return;
813}
814#endif
815
816#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
817
818/**
819 * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
820 * @vdev: the virtual device object
821 * @flag: flag
822 *
823 * Return: None
824 */
825void
826ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag)
827{
828 vdev->hlTdlsFlag = flag;
829}
830#endif
831
832#if defined(CONFIG_HL_SUPPORT)
833
834/**
835 * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
836 * @vdev: the virtual device object
837 *
838 * Return: None
839 */
840static void
841ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
842{
843 u_int8_t i;
844 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
845 TAILQ_INIT(&vdev->txqs[i].head);
846 vdev->txqs[i].paused_count.total = 0;
847 vdev->txqs[i].frms = 0;
848 vdev->txqs[i].bytes = 0;
849 vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
850 vdev->txqs[i].flag = ol_tx_queue_empty;
851 /* aggregation is not applicable for vdev tx queues */
852 vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
853 ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
854 ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
855 }
856}
857
858/**
859 * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
860 * @vdev: the virtual device object
861 *
862 * Return: None
863 */
864static void
865ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
866{
867 struct ol_txrx_pdev_t *pdev = vdev->pdev;
868 struct ol_tx_frms_queue_t *txq;
869 int i;
870
871 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
872 txq = &vdev->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530873 ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530874 }
875}
876
877/**
878 * ol_txrx_peer_txqs_init() - initialise peer tx queues
879 * @pdev: the physical device object
880 * @peer: peer object
881 *
882 * Return: None
883 */
884static void
885ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
886 struct ol_txrx_peer_t *peer)
887{
888 uint8_t i;
889 struct ol_txrx_vdev_t *vdev = peer->vdev;
890 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
891 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
892 TAILQ_INIT(&peer->txqs[i].head);
893 peer->txqs[i].paused_count.total = 0;
894 peer->txqs[i].frms = 0;
895 peer->txqs[i].bytes = 0;
896 peer->txqs[i].ext_tid = i;
897 peer->txqs[i].flag = ol_tx_queue_empty;
898 peer->txqs[i].aggr_state = ol_tx_aggr_untried;
899 ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
900 ol_txrx_set_txq_peer(&peer->txqs[i], peer);
901 }
902 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
903
904 /* aggregation is not applicable for mgmt and non-QoS tx queues */
905 for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
906 peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
907
908 ol_txrx_peer_pause(peer);
909}
910
911/**
912 * ol_txrx_peer_tx_queue_free() - free peer tx queues
913 * @pdev: the physical device object
914 * @peer: peer object
915 *
916 * Return: None
917 */
918static void
919ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
920 struct ol_txrx_peer_t *peer)
921{
922 struct ol_tx_frms_queue_t *txq;
923 uint8_t i;
924
925 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
926 txq = &peer->txqs[i];
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530927 ol_tx_queue_free(pdev, txq, i, true);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530928 }
929}
930#else
931
932static inline void
933ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
934{
935 return;
936}
937
938static inline void
939ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
940{
941 return;
942}
943
944static inline void
945ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
946 struct ol_txrx_peer_t *peer)
947{
948 return;
949}
950
951static inline void
952ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
953 struct ol_txrx_peer_t *peer)
954{
955 return;
956}
957#endif
958
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800959/**
Dhanashri Atre12a08392016-02-17 13:10:34 -0800960 * ol_txrx_pdev_attach() - allocate txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800961 * @ctrl_pdev: cfg pdev
962 * @htc_pdev: HTC pdev
963 * @osdev: os dev
964 *
965 * Return: txrx pdev handle
966 * NULL for failure
967 */
968ol_txrx_pdev_handle
Dhanashri Atre12a08392016-02-17 13:10:34 -0800969ol_txrx_pdev_attach(ol_pdev_handle ctrl_pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530970 HTC_HANDLE htc_pdev, qdf_device_t osdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800971{
972 struct ol_txrx_pdev_t *pdev;
973 int i;
974
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530975 pdev = qdf_mem_malloc(sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800976 if (!pdev)
977 goto fail0;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530978 qdf_mem_zero(pdev, sizeof(*pdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800979
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530980 /* init LL/HL cfg here */
981 pdev->cfg.is_high_latency = ol_cfg_is_high_latency(ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800982 pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
983
984 /* store provided params */
985 pdev->ctrl_pdev = ctrl_pdev;
986 pdev->osdev = osdev;
987
988 for (i = 0; i < htt_num_sec_types; i++)
989 pdev->sec_types[i] = (enum ol_sec_type)i;
990
991 TXRX_STATS_INIT(pdev);
992
993 TAILQ_INIT(&pdev->vdev_list);
994
995 /* do initial set up of the peer ID -> peer object lookup map */
996 if (ol_txrx_peer_find_attach(pdev))
997 goto fail1;
998
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530999 /* initialize the counter of the target's tx buffer availability */
1000 qdf_atomic_init(&pdev->target_tx_credit);
1001 qdf_atomic_init(&pdev->orig_target_tx_credit);
1002
1003 if (ol_cfg_is_high_latency(ctrl_pdev)) {
1004 qdf_spinlock_create(&pdev->tx_queue_spinlock);
1005 pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
1006 if (pdev->tx_sched.scheduler == NULL)
1007 goto fail2;
1008 }
1009 ol_txrx_pdev_txq_log_init(pdev);
1010 ol_txrx_pdev_grp_stats_init(pdev);
1011
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001012 pdev->htt_pdev =
1013 htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
1014 if (!pdev->htt_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301015 goto fail3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001016
1017 return pdev;
1018
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301019fail3:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001020 ol_txrx_peer_find_detach(pdev);
1021
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301022fail2:
1023 if (ol_cfg_is_high_latency(ctrl_pdev))
1024 qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
1025
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001026fail1:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301027 qdf_mem_free(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001028
1029fail0:
1030 return NULL;
1031}
1032
Komal Seelamc4b28632016-02-03 15:02:18 +05301033#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1034/**
1035 * htt_pkt_log_init() - API to initialize packet log
1036 * @handle: pdev handle
1037 * @scn: HIF context
1038 *
1039 * Return: void
1040 */
1041void htt_pkt_log_init(struct ol_txrx_pdev_t *handle, void *scn)
1042{
1043 if (handle->pkt_log_init)
1044 return;
1045
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301046 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001047 !QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
Komal Seelamc4b28632016-02-03 15:02:18 +05301048 ol_pl_sethandle(&handle->pl_dev, scn);
1049 if (pktlogmod_init(scn))
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301050 qdf_print("%s: pktlogmod_init failed", __func__);
Komal Seelamc4b28632016-02-03 15:02:18 +05301051 else
1052 handle->pkt_log_init = true;
1053 }
1054}
1055
1056/**
1057 * htt_pktlogmod_exit() - API to cleanup pktlog info
1058 * @handle: Pdev handle
1059 * @scn: HIF Context
1060 *
1061 * Return: void
1062 */
1063void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle, void *scn)
1064{
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301065 if (scn && cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
Houston Hoffman371d4a92016-04-14 17:02:37 -07001066 !QDF_IS_EPPING_ENABLED(cds_get_conparam()) &&
Komal Seelamc4b28632016-02-03 15:02:18 +05301067 handle->pkt_log_init) {
1068 pktlogmod_exit(scn);
1069 handle->pkt_log_init = false;
1070 }
1071}
1072#else
1073void htt_pkt_log_init(ol_txrx_pdev_handle handle, void *ol_sc) { }
1074void htt_pktlogmod_exit(ol_txrx_pdev_handle handle, void *sc) { }
1075#endif
1076
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001077/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08001078 * ol_txrx_pdev_post_attach() - attach txrx pdev
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001079 * @pdev: txrx pdev
1080 *
1081 * Return: 0 for success
1082 */
1083int
Dhanashri Atre12a08392016-02-17 13:10:34 -08001084ol_txrx_pdev_post_attach(ol_txrx_pdev_handle pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001085{
Leo Chang376398b2015-10-23 14:19:02 -07001086 uint16_t i;
1087 uint16_t fail_idx = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001088 int ret = 0;
1089 uint16_t desc_pool_size;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301090 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001091
Leo Chang376398b2015-10-23 14:19:02 -07001092 uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1093 union ol_tx_desc_list_elem_t *c_element;
1094 unsigned int sig_bit;
1095 uint16_t desc_per_page;
1096
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001097 if (!osc) {
1098 ret = -EINVAL;
Leo Chang376398b2015-10-23 14:19:02 -07001099 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001100 }
1101
1102 /*
1103 * For LL, limit the number of host's tx descriptors to match
1104 * the number of target FW tx descriptors.
1105 * This simplifies the FW, by ensuring the host will never
1106 * download more tx descriptors than the target has space for.
1107 * The FW will drop/free low-priority tx descriptors when it
1108 * starts to run low, so that in theory the host should never
1109 * run out of tx descriptors.
1110 */
1111
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001112 /*
1113 * LL - initialize the target credit outselves.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301114 * HL - wait for a HTT target credit initialization
1115 * during htt_attach.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001116 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301117 if (pdev->cfg.is_high_latency) {
1118 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001119
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301120 qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
1121 qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001122
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301123 pdev->tx_queue.rsrc_threshold_lo =
1124 ol_txrx_rsrc_threshold_lo(desc_pool_size);
1125 pdev->tx_queue.rsrc_threshold_hi =
1126 ol_txrx_rsrc_threshold_hi(desc_pool_size);
1127
1128 for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
1129 qdf_atomic_init(&pdev->txq_grps[i].credit);
1130
1131 ol_tx_target_credit_init(pdev, desc_pool_size);
1132 } else {
1133 qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
1134 &pdev->target_tx_credit);
1135 desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1136 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001137
Nirav Shah76291962016-04-25 10:50:37 +05301138 ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1139
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001140 setup_fastpath_ce_handles(osc, pdev);
1141
1142 ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1143 if (ret)
Leo Chang376398b2015-10-23 14:19:02 -07001144 goto ol_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001145
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001146 /* Attach micro controller data path offload resource */
1147 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1148 if (htt_ipa_uc_attach(pdev->htt_pdev))
Leo Chang376398b2015-10-23 14:19:02 -07001149 goto uc_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001150
Leo Chang376398b2015-10-23 14:19:02 -07001151 /* Calculate single element reserved size power of 2 */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301152 pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301153 qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
Leo Chang376398b2015-10-23 14:19:02 -07001154 pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1155 if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1156 (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301157 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001158 "Page alloc fail");
1159 goto page_alloc_fail;
1160 }
1161 desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1162 pdev->tx_desc.offset_filter = desc_per_page - 1;
1163 /* Calculate page divider to find page number */
1164 sig_bit = 0;
1165 while (desc_per_page) {
1166 sig_bit++;
1167 desc_per_page = desc_per_page >> 1;
1168 }
1169 pdev->tx_desc.page_divider = (sig_bit - 1);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301170 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001171 "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1172 pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1173 desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1174 pdev->tx_desc.desc_pages.num_element_per_page);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001175
1176 /*
1177 * Each SW tx desc (used only within the tx datapath SW) has a
1178 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1179 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1180 * desc now, to avoid doing it during time-critical transmit.
1181 */
1182 pdev->tx_desc.pool_size = desc_pool_size;
Leo Chang376398b2015-10-23 14:19:02 -07001183 pdev->tx_desc.freelist =
1184 (union ol_tx_desc_list_elem_t *)
1185 (*pdev->tx_desc.desc_pages.cacheable_pages);
1186 c_element = pdev->tx_desc.freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001187 for (i = 0; i < desc_pool_size; i++) {
1188 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001189 void *htt_frag_desc = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301190 qdf_dma_addr_t frag_paddr = 0;
1191 qdf_dma_addr_t paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001192
Leo Chang376398b2015-10-23 14:19:02 -07001193 if (i == (desc_pool_size - 1))
1194 c_element->next = NULL;
1195 else
1196 c_element->next = (union ol_tx_desc_list_elem_t *)
1197 ol_tx_desc_find(pdev, i + 1);
1198
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001199 htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001200 if (!htt_tx_desc) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301201 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001202 "%s: failed to alloc HTT tx desc (%d of %d)",
1203 __func__, i, desc_pool_size);
Leo Chang376398b2015-10-23 14:19:02 -07001204 fail_idx = i;
1205 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001206 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001207
Leo Chang376398b2015-10-23 14:19:02 -07001208 c_element->tx_desc.htt_tx_desc = htt_tx_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001209 c_element->tx_desc.htt_tx_desc_paddr = paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001210 ret = htt_tx_frag_alloc(pdev->htt_pdev,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001211 i, &frag_paddr, &htt_frag_desc);
Leo Chang376398b2015-10-23 14:19:02 -07001212 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301213 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Leo Chang376398b2015-10-23 14:19:02 -07001214 "%s: failed to alloc HTT frag dsc (%d/%d)",
1215 __func__, i, desc_pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001216 /* Is there a leak here, is this handling correct? */
Leo Chang376398b2015-10-23 14:19:02 -07001217 fail_idx = i;
1218 goto desc_alloc_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001219 }
Leo Chang376398b2015-10-23 14:19:02 -07001220 if (!ret && htt_frag_desc) {
1221 /* Initialize the first 6 words (TSO flags)
1222 of the frag descriptor */
1223 memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1224 c_element->tx_desc.htt_frag_desc = htt_frag_desc;
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001225 c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
Leo Chang376398b2015-10-23 14:19:02 -07001226 }
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301227 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001228 "%s:%d - %d FRAG VA 0x%p FRAG PA 0x%llx",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001229 __func__, __LINE__, i,
Leo Chang376398b2015-10-23 14:19:02 -07001230 c_element->tx_desc.htt_frag_desc,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301231 (long long unsigned int)
Leo Chang376398b2015-10-23 14:19:02 -07001232 c_element->tx_desc.htt_frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001233#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
Leo Chang376398b2015-10-23 14:19:02 -07001234 c_element->tx_desc.pkt_type = 0xff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001235#ifdef QCA_COMPUTE_TX_DELAY
Leo Chang376398b2015-10-23 14:19:02 -07001236 c_element->tx_desc.entry_timestamp_ticks =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001237 0xffffffff;
1238#endif
1239#endif
Leo Chang376398b2015-10-23 14:19:02 -07001240 c_element->tx_desc.id = i;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301241 qdf_atomic_init(&c_element->tx_desc.ref_cnt);
Leo Chang376398b2015-10-23 14:19:02 -07001242 c_element = c_element->next;
1243 fail_idx = i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001244 }
1245
1246 /* link SW tx descs into a freelist */
1247 pdev->tx_desc.num_free = desc_pool_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001248 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
1249 "%s first tx_desc:0x%p Last tx desc:0x%p\n", __func__,
1250 (uint32_t *) pdev->tx_desc.freelist,
1251 (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001252
1253 /* check what format of frames are expected to be delivered by the OS */
1254 pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1255 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1256 pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1257 else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1258 if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1259 pdev->htt_pkt_type = htt_pkt_type_eth2;
1260 else
1261 pdev->htt_pkt_type = htt_pkt_type_ethernet;
1262 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301263 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001264 "%s Invalid standard frame type: %d",
1265 __func__, pdev->frame_format);
Leo Chang376398b2015-10-23 14:19:02 -07001266 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001267 }
1268
1269 /* setup the global rx defrag waitlist */
1270 TAILQ_INIT(&pdev->rx.defrag.waitlist);
1271
1272 /* configure where defrag timeout and duplicate detection is handled */
1273 pdev->rx.flags.defrag_timeout_check =
1274 pdev->rx.flags.dup_check =
1275 ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1276
1277#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1278 /* Need to revisit this part. Currently,hardcode to riva's caps */
1279 pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1280 pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1281 /*
1282 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1283 * header for non-first subframe of A-MSDU.
1284 */
1285 pdev->sw_subfrm_hdr_recovery_enable = 1;
1286 /*
1287 * The Riva HW doesn't have the capability to set Protected Frame bit
1288 * in the MAC header for encrypted data frame.
1289 */
1290 pdev->sw_pf_proc_enable = 1;
1291
1292 if (pdev->frame_format == wlan_frm_fmt_802_3) {
1293 /* sw llc process is only needed in
1294 802.3 to 802.11 transform case */
1295 pdev->sw_tx_llc_proc_enable = 1;
1296 pdev->sw_rx_llc_proc_enable = 1;
1297 } else {
1298 pdev->sw_tx_llc_proc_enable = 0;
1299 pdev->sw_rx_llc_proc_enable = 0;
1300 }
1301
1302 switch (pdev->frame_format) {
1303 case wlan_frm_fmt_raw:
1304 pdev->sw_tx_encap =
1305 pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1306 ? 0 : 1;
1307 pdev->sw_rx_decap =
1308 pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1309 ? 0 : 1;
1310 break;
1311 case wlan_frm_fmt_native_wifi:
1312 pdev->sw_tx_encap =
1313 pdev->
1314 target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1315 ? 0 : 1;
1316 pdev->sw_rx_decap =
1317 pdev->
1318 target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1319 ? 0 : 1;
1320 break;
1321 case wlan_frm_fmt_802_3:
1322 pdev->sw_tx_encap =
1323 pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1324 ? 0 : 1;
1325 pdev->sw_rx_decap =
1326 pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1327 ? 0 : 1;
1328 break;
1329 default:
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301330 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001331 "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1332 pdev->frame_format,
1333 pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
Leo Chang376398b2015-10-23 14:19:02 -07001334 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001335 }
1336#endif
1337
1338 /*
1339 * Determine what rx processing steps are done within the host.
1340 * Possibilities:
1341 * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
1342 * (This is unlikely; even if the target is doing rx->tx forwarding,
1343 * the host should be doing rx->tx forwarding too, as a back up for
1344 * the target's rx->tx forwarding, in case the target runs short on
1345 * memory, and can't store rx->tx frames that are waiting for
1346 * missing prior rx frames to arrive.)
1347 * 2. Just rx -> tx forwarding.
1348 * This is the typical configuration for HL, and a likely
1349 * configuration for LL STA or small APs (e.g. retail APs).
1350 * 3. Both PN check and rx -> tx forwarding.
1351 * This is the typical configuration for large LL APs.
1352 * Host-side PN check without rx->tx forwarding is not a valid
1353 * configuration, since the PN check needs to be done prior to
1354 * the rx->tx forwarding.
1355 */
1356 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
1357 /* PN check, rx-tx forwarding and rx reorder is done by
1358 the target */
1359 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1360 pdev->rx_opt_proc = ol_rx_in_order_deliver;
1361 else
1362 pdev->rx_opt_proc = ol_rx_fwd_check;
1363 } else {
1364 if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1365 if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1366 /*
1367 * PN check done on host,
1368 * rx->tx forwarding not done at all.
1369 */
1370 pdev->rx_opt_proc = ol_rx_pn_check_only;
1371 } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1372 /*
1373 * Both PN check and rx->tx forwarding done
1374 * on host.
1375 */
1376 pdev->rx_opt_proc = ol_rx_pn_check;
1377 } else {
1378#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1379"rx->tx forwarding check needs to also be on the host"
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301380 QDF_TRACE(QDF_MODULE_ID_TXRX,
1381 QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382 "%s: %s", __func__, TRACESTR01);
1383#undef TRACESTR01
Leo Chang376398b2015-10-23 14:19:02 -07001384 goto control_init_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001385 }
1386 } else {
1387 /* PN check done on target */
1388 if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1389 ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1390 /*
1391 * rx->tx forwarding done on host (possibly as
1392 * back-up for target-side primary rx->tx
1393 * forwarding)
1394 */
1395 pdev->rx_opt_proc = ol_rx_fwd_check;
1396 } else {
1397 /* rx->tx forwarding either done in target,
1398 * or not done at all */
1399 pdev->rx_opt_proc = ol_rx_deliver;
1400 }
1401 }
1402 }
1403
1404 /* initialize mutexes for tx desc alloc and peer lookup */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301405 qdf_spinlock_create(&pdev->tx_mutex);
1406 qdf_spinlock_create(&pdev->peer_ref_mutex);
1407 qdf_spinlock_create(&pdev->rx.mutex);
1408 qdf_spinlock_create(&pdev->last_real_peer_mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001409 qdf_spinlock_create(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001410 OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1411
1412 if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK)
Leo Chang376398b2015-10-23 14:19:02 -07001413 goto reorder_trace_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001414
1415 if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK)
Leo Chang376398b2015-10-23 14:19:02 -07001416 goto pn_trace_attach_fail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001417
1418#ifdef PERE_IP_HDR_ALIGNMENT_WAR
1419 pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
1420#endif
1421
1422 /*
1423 * WDI event attach
1424 */
1425 wdi_event_attach(pdev);
1426
1427 /*
1428 * Initialize rx PN check characteristics for different security types.
1429 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301430 qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001431
1432 /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1433 pdev->rx_pn[htt_sec_type_tkip].len =
1434 pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1435 pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1436 pdev->rx_pn[htt_sec_type_tkip].cmp =
1437 pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1438 pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1439
1440 /* WAPI: 128-bit PN */
1441 pdev->rx_pn[htt_sec_type_wapi].len = 128;
1442 pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1443
1444 OL_RX_REORDER_TIMEOUT_INIT(pdev);
1445
1446 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Created pdev %p\n", pdev);
1447
1448 pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1449
1450#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1451#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1452
1453/* #if 1 -- TODO: clean this up */
1454#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
1455 /* avg = 100% * new + 0% * old */ \
1456 (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1457/*
1458#else
1459#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1460 //avg = 25% * new + 25% * old
1461 (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1462#endif
1463*/
1464 pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1465 pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1466#endif
1467
1468 ol_txrx_local_peer_id_pool_init(pdev);
1469
1470 pdev->cfg.ll_pause_txq_limit =
1471 ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1472
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301473 /* TX flow control for peer who is in very bad link status */
1474 ol_tx_badpeer_flow_cl_init(pdev);
1475
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001476#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301477 qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301478 qdf_spinlock_create(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001479
1480 /* initialize compute interval with 5 seconds (ESE default) */
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301481 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001482 {
1483 uint32_t bin_width_1000ticks;
1484 bin_width_1000ticks =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301485 qdf_system_msecs_to_ticks
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001486 (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1487 * 1000);
1488 /*
1489 * Compute a factor and shift that together are equal to the
1490 * inverse of the bin_width time, so that rather than dividing
1491 * by the bin width time, approximately the same result can be
1492 * obtained much more efficiently by a multiply + shift.
1493 * multiply_factor >> shift = 1 / bin_width_time, so
1494 * multiply_factor = (1 << shift) / bin_width_time.
1495 *
1496 * Pick the shift semi-arbitrarily.
1497 * If we knew statically what the bin_width would be, we could
1498 * choose a shift that minimizes the error.
1499 * Since the bin_width is determined dynamically, simply use a
1500 * shift that is about half of the uint32_t size. This should
1501 * result in a relatively large multiplier value, which
1502 * minimizes error from rounding the multiplier to an integer.
1503 * The rounding error only becomes significant if the tick units
1504 * are on the order of 1 microsecond. In most systems, it is
1505 * expected that the tick units will be relatively low-res,
1506 * on the order of 1 millisecond. In such systems the rounding
1507 * error is negligible.
1508 * It would be more accurate to dynamically try out different
1509 * shifts and choose the one that results in the smallest
1510 * rounding error, but that extra level of fidelity is
1511 * not needed.
1512 */
1513 pdev->tx_delay.hist_internal_bin_width_shift = 16;
1514 pdev->tx_delay.hist_internal_bin_width_mult =
1515 ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1516 1000 + (bin_width_1000ticks >> 1)) /
1517 bin_width_1000ticks;
1518 }
1519#endif /* QCA_COMPUTE_TX_DELAY */
1520
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001521 /* Thermal Mitigation */
1522 ol_tx_throttle_init(pdev);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001523
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001524 ol_tso_seg_list_init(pdev, desc_pool_size);
Dhanashri Atre83d373d2015-07-28 16:45:59 -07001525
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001526 ol_tx_register_flow_control(pdev);
1527
1528 return 0; /* success */
1529
Leo Chang376398b2015-10-23 14:19:02 -07001530pn_trace_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001531 OL_RX_REORDER_TRACE_DETACH(pdev);
1532
Leo Chang376398b2015-10-23 14:19:02 -07001533reorder_trace_attach_fail:
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301534 qdf_spinlock_destroy(&pdev->tx_mutex);
1535 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1536 qdf_spinlock_destroy(&pdev->rx.mutex);
1537 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001538 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1539
Leo Chang376398b2015-10-23 14:19:02 -07001540control_init_fail:
1541desc_alloc_fail:
1542 for (i = 0; i < fail_idx; i++)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001543 htt_tx_desc_free(pdev->htt_pdev,
Leo Chang376398b2015-10-23 14:19:02 -07001544 (ol_tx_desc_find(pdev, i))->htt_tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001545
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301546 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001547 &pdev->tx_desc.desc_pages, 0, true);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001548
Leo Chang376398b2015-10-23 14:19:02 -07001549page_alloc_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001550 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1551 htt_ipa_uc_detach(pdev->htt_pdev);
Leo Chang376398b2015-10-23 14:19:02 -07001552uc_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001553 htt_detach(pdev->htt_pdev);
1554
Leo Chang376398b2015-10-23 14:19:02 -07001555ol_attach_fail:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001556 return ret; /* fail */
1557}
1558
Dhanashri Atre12a08392016-02-17 13:10:34 -08001559/**
1560 * ol_txrx_pdev_attach_target() - send target configuration
1561 *
1562 * @pdev - the physical device being initialized
1563 *
1564 * The majority of the data SW setup are done by the pdev_attach
1565 * functions, but this function completes the data SW setup by
1566 * sending datapath configuration messages to the target.
1567 *
1568 * Return: 0 - success 1 - failure
1569 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001570A_STATUS ol_txrx_pdev_attach_target(ol_txrx_pdev_handle pdev)
1571{
Dhanashri Atre12a08392016-02-17 13:10:34 -08001572 return htt_attach_target(pdev->htt_pdev) == A_OK ? 0:1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001573}
1574
Dhanashri Atre12a08392016-02-17 13:10:34 -08001575/**
1576 * ol_txrx_pdev_detach() - delete the data SW state
1577 *
1578 * @pdev - the data physical device object being removed
1579 * @force - delete the pdev (and its vdevs and peers) even if
1580 * there are outstanding references by the target to the vdevs
1581 * and peers within the pdev
1582 *
1583 * This function is used when the WLAN driver is being removed to
1584 * remove the host data component within the driver.
1585 * All virtual devices within the physical device need to be deleted
1586 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1587 *
1588 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001589void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
1590{
1591 int i;
Anurag Chouhan6d760662016-02-20 16:05:43 +05301592 struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
Leo Chang376398b2015-10-23 14:19:02 -07001593
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001594 /*checking to ensure txrx pdev structure is not NULL */
1595 if (!pdev) {
1596 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "NULL pdev passed to %s\n", __func__);
1597 return;
1598 }
1599 /* preconditions */
1600 TXRX_ASSERT2(pdev);
1601
1602 /* check that the pdev has no vdevs allocated */
1603 TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1604
Komal Seelamc4b28632016-02-03 15:02:18 +05301605 htt_pktlogmod_exit(pdev, osc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001606
Komal Seelamc4b28632016-02-03 15:02:18 +05301607 OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301608
1609 if (pdev->cfg.is_high_latency)
1610 ol_tx_sched_detach(pdev);
1611
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001612#ifdef QCA_SUPPORT_TX_THROTTLE
1613 /* Thermal Mitigation */
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301614 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1615 qdf_timer_free(&pdev->tx_throttle.phase_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001616#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301617 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1618 qdf_timer_free(&pdev->tx_throttle.tx_timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001619#endif
1620#endif
Leo Chang376398b2015-10-23 14:19:02 -07001621 ol_tso_seg_list_deinit(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001622
1623 if (force) {
1624 /*
1625 * The assertion above confirms that all vdevs within this pdev
1626 * were detached. However, they may not have actually been
1627 * deleted.
1628 * If the vdev had peers which never received a PEER_UNMAP msg
1629 * from the target, then there are still zombie peer objects,
1630 * and the vdev parents of the zombie peers are also zombies,
1631 * hanging around until their final peer gets deleted.
1632 * Go through the peer hash table and delete any peers left.
1633 * As a side effect, this will complete the deletion of any
1634 * vdevs that are waiting for their peers to finish deletion.
1635 */
1636 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Force delete for pdev %p\n",
1637 pdev);
1638 ol_txrx_peer_find_hash_erase(pdev);
1639 }
1640
Nirav Shah7a0a9052016-04-14 16:52:21 +05301641 ol_tx_deregister_flow_control(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001642 /* Stop the communication between HTT and target at first */
1643 htt_detach_target(pdev->htt_pdev);
1644
1645 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1646 void *htt_tx_desc;
Leo Chang376398b2015-10-23 14:19:02 -07001647 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001648
Leo Chang376398b2015-10-23 14:19:02 -07001649 tx_desc = ol_tx_desc_find(pdev, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001650 /*
1651 * Confirm that each tx descriptor is "empty", i.e. it has
1652 * no tx frame attached.
1653 * In particular, check that there are no frames that have
1654 * been given to the target to transmit, for which the
1655 * target has never provided a response.
1656 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301657 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001658 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
1659 "Warning: freeing tx frame (no compltn)\n");
1660 ol_tx_desc_frame_free_nonstd(pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001661 tx_desc, 1);
1662 }
Leo Chang376398b2015-10-23 14:19:02 -07001663 htt_tx_desc = tx_desc->htt_tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001664 htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1665 }
1666
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301667 qdf_mem_multi_pages_free(pdev->osdev,
Leo Chang376398b2015-10-23 14:19:02 -07001668 &pdev->tx_desc.desc_pages, 0, true);
1669 pdev->tx_desc.freelist = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001670
1671 /* Detach micro controller data path offload resource */
1672 if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1673 htt_ipa_uc_detach(pdev->htt_pdev);
1674
1675 htt_detach(pdev->htt_pdev);
1676 htt_pdev_free(pdev->htt_pdev);
1677
Nirav Shah76291962016-04-25 10:50:37 +05301678 ol_tx_desc_dup_detect_deinit(pdev);
1679
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001680 ol_txrx_peer_find_detach(pdev);
1681
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301682 qdf_spinlock_destroy(&pdev->tx_mutex);
1683 qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1684 qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1685 qdf_spinlock_destroy(&pdev->rx.mutex);
Mohit Khanna37ffb292016-08-08 16:20:01 -07001686 qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001687#ifdef QCA_SUPPORT_TX_THROTTLE
1688 /* Thermal Mitigation */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301689 qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001690#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301691
1692 /* TX flow control for peer who is in very bad link status */
1693 ol_tx_badpeer_flow_cl_deinit(pdev);
1694
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001695 OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1696
1697 OL_RX_REORDER_TRACE_DETACH(pdev);
1698 OL_RX_PN_TRACE_DETACH(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301699
1700 ol_txrx_pdev_txq_log_destroy(pdev);
1701 ol_txrx_pdev_grp_stat_destroy(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001702 /*
1703 * WDI event detach
1704 */
1705 wdi_event_detach(pdev);
1706 ol_txrx_local_peer_id_cleanup(pdev);
1707
1708#ifdef QCA_COMPUTE_TX_DELAY
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301709 qdf_spinlock_destroy(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001710#endif
1711}
1712
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301713#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
1714
1715/**
1716 * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
1717 * @vdev: the virtual device object
1718 *
1719 * Return: None
1720 */
1721static inline void
1722ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
1723{
1724 qdf_atomic_init(&vdev->tx_desc_count);
1725}
1726#else
1727
1728static inline void
1729ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
1730{
1731 return;
1732}
1733#endif
1734
Dhanashri Atre12a08392016-02-17 13:10:34 -08001735/**
1736 * ol_txrx_vdev_attach - Allocate and initialize the data object
1737 * for a new virtual device.
1738 *
1739 * @data_pdev - the physical device the virtual device belongs to
1740 * @vdev_mac_addr - the MAC address of the virtual device
1741 * @vdev_id - the ID used to identify the virtual device to the target
1742 * @op_mode - whether this virtual device is operating as an AP,
1743 * an IBSS, or a STA
1744 *
1745 * Return: success: handle to new data vdev object, failure: NULL
1746 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001747ol_txrx_vdev_handle
1748ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
1749 uint8_t *vdev_mac_addr,
1750 uint8_t vdev_id, enum wlan_op_mode op_mode)
1751{
1752 struct ol_txrx_vdev_t *vdev;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001753 QDF_STATUS qdf_status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001754
1755 /* preconditions */
1756 TXRX_ASSERT2(pdev);
1757 TXRX_ASSERT2(vdev_mac_addr);
1758
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301759 vdev = qdf_mem_malloc(sizeof(*vdev));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001760 if (!vdev)
1761 return NULL; /* failure */
1762
1763 /* store provided params */
1764 vdev->pdev = pdev;
1765 vdev->vdev_id = vdev_id;
1766 vdev->opmode = op_mode;
1767
1768 vdev->delete.pending = 0;
1769 vdev->safemode = 0;
1770 vdev->drop_unenc = 1;
1771 vdev->num_filters = 0;
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301772 vdev->fwd_tx_packets = 0;
1773 vdev->fwd_rx_packets = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001774
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301775 ol_txrx_vdev_tx_desc_cnt_init(vdev);
1776
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301777 qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001778 OL_TXRX_MAC_ADDR_LEN);
1779
1780 TAILQ_INIT(&vdev->peer_list);
1781 vdev->last_real_peer = NULL;
1782
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301783 ol_txrx_hl_tdls_flag_reset(vdev, false);
1784
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001785#ifdef QCA_IBSS_SUPPORT
1786 vdev->ibss_peer_num = 0;
1787 vdev->ibss_peer_heart_beat_timer = 0;
1788#endif
1789
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301790 ol_txrx_vdev_txqs_init(vdev);
1791
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301792 qdf_spinlock_create(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001793 vdev->ll_pause.paused_reason = 0;
1794 vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
1795 vdev->ll_pause.txq.depth = 0;
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301796 qdf_timer_init(pdev->osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001797 &vdev->ll_pause.timer,
1798 ol_tx_vdev_ll_pause_queue_send, vdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301799 QDF_TIMER_TYPE_SW);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301800 qdf_atomic_init(&vdev->os_q_paused);
1801 qdf_atomic_set(&vdev->os_q_paused, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001802 vdev->tx_fl_lwm = 0;
1803 vdev->tx_fl_hwm = 0;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001804 vdev->rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001805 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301806 qdf_spinlock_create(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001807 vdev->osif_flow_control_cb = NULL;
1808 vdev->osif_fc_ctx = NULL;
1809
1810 /* Default MAX Q depth for every VDEV */
1811 vdev->ll_pause.max_q_depth =
1812 ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001813 qdf_status = qdf_event_create(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001814 /* add this vdev into the pdev's list */
1815 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
1816
1817 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
1818 "Created vdev %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
1819 vdev,
1820 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1821 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1822 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1823
1824 /*
1825 * We've verified that htt_op_mode == wlan_op_mode,
1826 * so no translation is needed.
1827 */
1828 htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
1829
1830 return vdev;
1831}
1832
Dhanashri Atre12a08392016-02-17 13:10:34 -08001833/**
1834 *ol_txrx_vdev_register - Link a vdev's data object with the
1835 * matching OS shim vdev object.
1836 *
1837 * @txrx_vdev: the virtual device's data object
1838 * @osif_vdev: the virtual device's OS shim object
1839 * @txrx_ops: (pointers to)functions used for tx and rx data xfer
1840 *
1841 * The data object for a virtual device is created by the
1842 * function ol_txrx_vdev_attach. However, rather than fully
1843 * linking the data vdev object with the vdev objects from the
1844 * other subsystems that the data vdev object interacts with,
1845 * the txrx_vdev_attach function focuses primarily on creating
1846 * the data vdev object. After the creation of both the data
1847 * vdev object and the OS shim vdev object, this
1848 * txrx_osif_vdev_attach function is used to connect the two
1849 * vdev objects, so the data SW can use the OS shim vdev handle
1850 * when passing rx data received by a vdev up to the OS shim.
1851 */
1852void ol_txrx_vdev_register(ol_txrx_vdev_handle vdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001853 void *osif_vdev,
Dhanashri Atre12a08392016-02-17 13:10:34 -08001854 struct ol_txrx_ops *txrx_ops)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001855{
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001856 if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
1857 qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
1858 qdf_assert(0);
1859 return;
1860 }
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001861
Dhanashri Atre41c0d282016-06-28 14:09:59 -07001862 vdev->osif_dev = osif_vdev;
Dhanashri Atre182b0272016-02-17 15:35:07 -08001863 vdev->rx = txrx_ops->rx.rx;
Dhanashri Atre168d2b42016-02-22 14:43:06 -08001864 txrx_ops->tx.tx = ol_tx_data;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001865}
1866
Dhanashri Atre12a08392016-02-17 13:10:34 -08001867/**
1868 * ol_txrx_set_curchan - Setup the current operating channel of
1869 * the device
1870 * @pdev - the data physical device object
1871 * @chan_mhz - the channel frequency (mhz) packets on
1872 *
1873 * Mainly used when populating monitor mode status that requires
1874 * the current operating channel
1875 *
1876 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001877void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
1878{
1879 return;
1880}
1881
1882void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
1883{
1884 vdev->safemode = val;
1885}
1886
Dhanashri Atre12a08392016-02-17 13:10:34 -08001887/**
1888 * ol_txrx_set_privacy_filters - set the privacy filter
1889 * @vdev - the data virtual device object
1890 * @filter - filters to be set
1891 * @num - the number of filters
1892 *
1893 * Rx related. Set the privacy filters. When rx packets, check
1894 * the ether type, filter type and packet type to decide whether
1895 * discard these packets.
1896 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001897void
1898ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
1899 void *filters, uint32_t num)
1900{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301901 qdf_mem_copy(vdev->privacy_filters, filters,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001902 num * sizeof(struct privacy_exemption));
1903 vdev->num_filters = num;
1904}
1905
1906void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
1907{
1908 vdev->drop_unenc = val;
1909}
1910
Dhanashri Atre12a08392016-02-17 13:10:34 -08001911/**
1912 * ol_txrx_vdev_detach - Deallocate the specified data virtual
1913 * device object.
1914 * @data_vdev: data object for the virtual device in question
1915 * @callback: function to call (if non-NULL) once the vdev has
1916 * been wholly deleted
1917 * @callback_context: context to provide in the callback
1918 *
1919 * All peers associated with the virtual device need to be deleted
1920 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
1921 * However, for the peers to be fully deleted, the peer deletion has to
1922 * percolate through the target data FW and back up to the host data SW.
1923 * Thus, even though the host control SW may have issued a peer_detach
1924 * call for each of the vdev's peers, the peer objects may still be
1925 * allocated, pending removal of all references to them by the target FW.
1926 * In this case, though the vdev_detach function call will still return
1927 * immediately, the vdev itself won't actually be deleted, until the
1928 * deletions of all its peers complete.
1929 * The caller can provide a callback function pointer to be notified when
1930 * the vdev deletion actually happens - whether it's directly within the
1931 * vdev_detach call, or if it's deferred until all in-progress peer
1932 * deletions have completed.
1933 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001934void
1935ol_txrx_vdev_detach(ol_txrx_vdev_handle vdev,
1936 ol_txrx_vdev_delete_cb callback, void *context)
1937{
1938 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1939
1940 /* preconditions */
1941 TXRX_ASSERT2(vdev);
1942
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301943 ol_txrx_vdev_tx_queue_free(vdev);
1944
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301945 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301946 qdf_timer_stop(&vdev->ll_pause.timer);
1947 qdf_timer_free(&vdev->ll_pause.timer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001948 vdev->ll_pause.is_q_timer_on = false;
1949 while (vdev->ll_pause.txq.head) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301950 qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
1951 qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
1952 qdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
Anurag Chouhandf2b2682016-02-29 14:15:27 +05301953 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301954 qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001955 vdev->ll_pause.txq.head = next;
1956 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301957 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1958 qdf_spinlock_destroy(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001959
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301960 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001961 vdev->osif_flow_control_cb = NULL;
1962 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301963 qdf_spin_unlock_bh(&vdev->flow_control_lock);
1964 qdf_spinlock_destroy(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001965
1966 /* remove the vdev from its parent pdev's list */
1967 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
1968
1969 /*
1970 * Use peer_ref_mutex while accessing peer_list, in case
1971 * a peer is in the process of being removed from the list.
1972 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301973 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001974 /* check that the vdev has no peers allocated */
1975 if (!TAILQ_EMPTY(&vdev->peer_list)) {
1976 /* debug print - will be removed later */
1977 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301978 "%s: not deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001979 __func__, vdev,
1980 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1981 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1982 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1983 /* indicate that the vdev needs to be deleted */
1984 vdev->delete.pending = 1;
1985 vdev->delete.callback = callback;
1986 vdev->delete.context = context;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301987 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001988 return;
1989 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301990 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07001991 qdf_event_destroy(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001992
1993 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
1994 "%s: deleting vdev obj %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
1995 __func__, vdev,
1996 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
1997 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
1998 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
1999
2000 htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2001
2002 /*
2003 * Doesn't matter if there are outstanding tx frames -
2004 * they will be freed once the target sends a tx completion
2005 * message for them.
2006 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302007 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002008 if (callback)
2009 callback(context);
2010}
2011
2012/**
2013 * ol_txrx_flush_rx_frames() - flush cached rx frames
2014 * @peer: peer
2015 * @drop: set flag to drop frames
2016 *
2017 * Return: None
2018 */
2019void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302020 bool drop)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002021{
2022 struct ol_rx_cached_buf *cache_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302023 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08002024 ol_txrx_rx_fp data_rx = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002025
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302026 if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2027 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002028 return;
2029 }
2030
Dhanashri Atre182b0272016-02-17 15:35:07 -08002031 qdf_assert(peer->vdev);
2032
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302033 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre182b0272016-02-17 15:35:07 -08002034
Dhanashri Atre50141c52016-04-07 13:15:29 -07002035 if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
Dhanashri Atre182b0272016-02-17 15:35:07 -08002036 data_rx = peer->vdev->rx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002037 else
2038 drop = true;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302039 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002040
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302041 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002042 cache_buf = list_entry((&peer->cached_bufq)->next,
2043 typeof(*cache_buf), list);
2044 while (!list_empty(&peer->cached_bufq)) {
2045 list_del(&cache_buf->list);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302046 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002047 if (drop) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302048 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002049 } else {
2050 /* Flush the cached frames to HDD */
Dhanashri Atre182b0272016-02-17 15:35:07 -08002051 ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302052 if (ret != QDF_STATUS_SUCCESS)
Nirav Shahcbc6d722016-03-01 16:24:53 +05302053 qdf_nbuf_free(cache_buf->buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002054 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302055 qdf_mem_free(cache_buf);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302056 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002057 cache_buf = list_entry((&peer->cached_bufq)->next,
2058 typeof(*cache_buf), list);
2059 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302060 qdf_spin_unlock_bh(&peer->bufq_lock);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302061 qdf_atomic_dec(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002062}
2063
Dhanashri Atre12a08392016-02-17 13:10:34 -08002064/**
2065 * ol_txrx_peer_attach - Allocate and set up references for a
2066 * data peer object.
2067 * @data_pdev: data physical device object that will indirectly
2068 * own the data_peer object
2069 * @data_vdev - data virtual device object that will directly
2070 * own the data_peer object
2071 * @peer_mac_addr - MAC address of the new peer
2072 *
2073 * When an association with a peer starts, the host's control SW
2074 * uses this function to inform the host data SW.
2075 * The host data SW allocates its own peer object, and stores a
2076 * reference to the control peer object within the data peer object.
2077 * The host data SW also stores a reference to the virtual device
2078 * that the peer is associated with. This virtual device handle is
2079 * used when the data SW delivers rx data frames to the OS shim layer.
2080 * The host data SW returns a handle to the new peer data object,
2081 * so a reference within the control peer object can be set to the
2082 * data peer object.
2083 *
2084 * Return: handle to new data peer object, or NULL if the attach
2085 * fails
2086 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002087ol_txrx_peer_handle
Dhanashri Atre12a08392016-02-17 13:10:34 -08002088ol_txrx_peer_attach(ol_txrx_vdev_handle vdev, uint8_t *peer_mac_addr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002089{
2090 struct ol_txrx_peer_t *peer;
2091 struct ol_txrx_peer_t *temp_peer;
2092 uint8_t i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002093 bool wait_on_deletion = false;
2094 unsigned long rc;
Dhanashri Atre12a08392016-02-17 13:10:34 -08002095 struct ol_txrx_pdev_t *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002096
2097 /* preconditions */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002098 TXRX_ASSERT2(vdev);
2099 TXRX_ASSERT2(peer_mac_addr);
2100
Dhanashri Atre12a08392016-02-17 13:10:34 -08002101 pdev = vdev->pdev;
2102 TXRX_ASSERT2(pdev);
2103
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302104 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002105 /* check for duplicate exsisting peer */
2106 TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2107 if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2108 (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
2109 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
2110 "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exsist.\n",
2111 vdev->vdev_id,
2112 peer_mac_addr[0], peer_mac_addr[1],
2113 peer_mac_addr[2], peer_mac_addr[3],
2114 peer_mac_addr[4], peer_mac_addr[5]);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302115 if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002116 vdev->wait_on_peer_id = temp_peer->local_id;
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002117 qdf_event_reset(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002118 wait_on_deletion = true;
2119 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302120 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002121 return NULL;
2122 }
2123 }
2124 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302125 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002126
2127 if (wait_on_deletion) {
2128 /* wait for peer deletion */
Anurag Chouhance0dc992016-02-16 18:18:03 +05302129 rc = qdf_wait_single_event(&vdev->wait_delete_comp,
Prakash Manjunathappad3ccca22016-05-05 19:23:19 -07002130 PEER_DELETION_TIMEOUT);
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002131 if (QDF_STATUS_SUCCESS != rc) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002132 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Deepak Dhamdhere561cdb92016-09-02 20:12:58 -07002133 "error waiting for peer(%d) deletion, status %d\n",
2134 vdev->wait_on_peer_id, (int) rc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002135 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2136 return NULL;
2137 }
2138 }
2139
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302140 peer = qdf_mem_malloc(sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002141 if (!peer)
2142 return NULL; /* failure */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302143 qdf_mem_zero(peer, sizeof(*peer));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002144
2145 /* store provided params */
2146 peer->vdev = vdev;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302147 qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002148 OL_TXRX_MAC_ADDR_LEN);
2149
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302150 ol_txrx_peer_txqs_init(pdev, peer);
2151
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002152 INIT_LIST_HEAD(&peer->cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302153 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002154 /* add this peer into the vdev's list */
2155 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302156 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002157 /* check whether this is a real peer (peer mac addr != vdev mac addr) */
2158 if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr))
2159 vdev->last_real_peer = peer;
2160
2161 peer->rx_opt_proc = pdev->rx_opt_proc;
2162
2163 ol_rx_peer_init(pdev, peer);
2164
2165 /* initialize the peer_id */
2166 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2167 peer->peer_ids[i] = HTT_INVALID_PEER;
2168
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302169 qdf_spinlock_create(&peer->peer_info_lock);
2170 qdf_spinlock_create(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002171
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302172 qdf_atomic_init(&peer->delete_in_progress);
2173 qdf_atomic_init(&peer->flush_in_progress);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002174
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302175 qdf_atomic_init(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002176
2177 /* keep one reference for attach */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302178 qdf_atomic_inc(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002179
2180 /* keep one reference for ol_rx_peer_map_handler */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302181 qdf_atomic_inc(&peer->ref_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002182
2183 peer->valid = 1;
2184
2185 ol_txrx_peer_find_hash_add(pdev, peer);
2186
Mohit Khanna47384bc2016-08-15 15:37:05 -07002187 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2188 "vdev %p created peer %p ref_cnt %d (%02x:%02x:%02x:%02x:%02x:%02x)\n",
2189 vdev, peer, qdf_atomic_read(&peer->ref_cnt),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002190 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2191 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2192 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
2193 /*
2194 * For every peer MAp message search and set if bss_peer
2195 */
Ankit Guptaa5076012016-09-14 11:32:19 -07002196 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2197 OL_TXRX_MAC_ADDR_LEN))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002198 peer->bss_peer = 1;
2199
2200 /*
2201 * The peer starts in the "disc" state while association is in progress.
2202 * Once association completes, the peer will get updated to "auth" state
2203 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2204 * or else to the "conn" state. For non-open mode, the peer will
2205 * progress to "auth" state once the authentication completes.
2206 */
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002207 peer->state = OL_TXRX_PEER_STATE_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002208 ol_txrx_peer_state_update(pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002209 OL_TXRX_PEER_STATE_DISC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002210
2211#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2212 peer->rssi_dbm = HTT_RSSI_INVALID;
2213#endif
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07002214 if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2215 !pdev->self_peer) {
2216 pdev->self_peer = peer;
2217 /*
2218 * No Tx in monitor mode, otherwise results in target assert.
2219 * Setting disable_intrabss_fwd to true
2220 */
2221 ol_vdev_rx_set_intrabss_fwd(vdev, true);
2222 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002223
2224 ol_txrx_local_peer_id_alloc(pdev, peer);
2225
2226 return peer;
2227}
2228
2229/*
2230 * Discarding tx filter - removes all data frames (disconnected state)
2231 */
2232static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2233{
2234 return A_ERROR;
2235}
2236
2237/*
2238 * Non-autentication tx filter - filters out data frames that are not
2239 * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2240 * data frames (connected state)
2241 */
2242static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2243{
2244 return
2245 (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2246 tx_msdu_info->htt.info.ethertype ==
2247 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2248}
2249
2250/*
2251 * Pass-through tx filter - lets all data frames through (authenticated state)
2252 */
2253static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2254{
2255 return A_OK;
2256}
2257
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002258/**
2259 * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2260 * @peer: handle to peer
2261 *
2262 * returns mac addrs for module which do not know peer type
2263 *
2264 * Return: the mac_addr from peer
2265 */
2266uint8_t *
2267ol_txrx_peer_get_peer_mac_addr(ol_txrx_peer_handle peer)
2268{
2269 if (!peer)
2270 return NULL;
2271
2272 return peer->mac_addr.raw;
2273}
2274
2275/**
2276 * ol_txrx_get_pn_info() - Returns pn info from peer
2277 * @peer: handle to peer
2278 * @last_pn_valid: return last_rmf_pn_valid value from peer.
2279 * @last_pn: return last_rmf_pn value from peer.
2280 * @rmf_pn_replays: return rmf_pn_replays value from peer.
2281 *
2282 * Return: NONE
2283 */
2284void
2285ol_txrx_get_pn_info(ol_txrx_peer_handle peer, uint8_t **last_pn_valid,
2286 uint64_t **last_pn, uint32_t **rmf_pn_replays)
2287{
2288 *last_pn_valid = &peer->last_rmf_pn_valid;
2289 *last_pn = &peer->last_rmf_pn;
2290 *rmf_pn_replays = &peer->rmf_pn_replays;
2291}
2292
2293/**
2294 * ol_txrx_get_opmode() - Return operation mode of vdev
2295 * @vdev: vdev handle
2296 *
2297 * Return: operation mode.
2298 */
2299int ol_txrx_get_opmode(ol_txrx_vdev_handle vdev)
2300{
2301 return vdev->opmode;
2302}
2303
2304/**
2305 * ol_txrx_get_peer_state() - Return peer state of peer
2306 * @peer: peer handle
2307 *
2308 * Return: return peer state
2309 */
2310int ol_txrx_get_peer_state(ol_txrx_peer_handle peer)
2311{
2312 return peer->state;
2313}
2314
2315/**
2316 * ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
2317 * @peer: peer handle
2318 *
2319 * Return: vdev handle from peer
2320 */
2321ol_txrx_vdev_handle
2322ol_txrx_get_vdev_for_peer(ol_txrx_peer_handle peer)
2323{
2324 return peer->vdev;
2325}
2326
2327/**
2328 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2329 * @vdev: vdev handle
2330 *
2331 * Return: vdev mac address
2332 */
2333uint8_t *
2334ol_txrx_get_vdev_mac_addr(ol_txrx_vdev_handle vdev)
2335{
2336 if (!vdev)
2337 return NULL;
2338
2339 return vdev->mac_addr.raw;
2340}
2341
2342/**
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002343 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002344 * vdev
2345 * @vdev: vdev handle
2346 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002347 * Return: Handle to struct qdf_mac_addr
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002348 */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002349struct qdf_mac_addr *
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002350ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2351{
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002352 return (struct qdf_mac_addr *)&(vdev->mac_addr);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002353}
2354
2355/**
2356 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2357 * @vdev: vdev handle
2358 *
2359 * Return: Handle to pdev
2360 */
2361ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2362{
2363 return vdev->pdev;
2364}
2365
2366/**
2367 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2368 * @vdev: vdev handle
2369 *
2370 * Return: Handle to control pdev
2371 */
2372ol_pdev_handle
2373ol_txrx_get_ctrl_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2374{
2375 return vdev->pdev->ctrl_pdev;
2376}
2377
2378/**
2379 * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2380 * @vdev: vdev handle
2381 *
2382 * Return: Rx Fwd disabled status
2383 */
2384uint8_t
2385ol_txrx_is_rx_fwd_disabled(ol_txrx_vdev_handle vdev)
2386{
2387 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2388 vdev->pdev->ctrl_pdev;
2389 return cfg->rx_fwd_disabled;
2390}
2391
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002392#ifdef QCA_IBSS_SUPPORT
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002393/**
2394 * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2395 * @vdev: vdev handle
2396 * @peer_num_delta: peer nums to be adjusted
2397 *
2398 * Return: -1 for failure or total peer nums after adjustment.
2399 */
2400int16_t
2401ol_txrx_update_ibss_add_peer_num_of_vdev(ol_txrx_vdev_handle vdev,
2402 int16_t peer_num_delta)
2403{
2404 int16_t new_peer_num;
2405
2406 new_peer_num = vdev->ibss_peer_num + peer_num_delta;
Naveen Rawatc45d1622016-07-05 12:20:09 -07002407 if (new_peer_num > MAX_PEERS || new_peer_num < 0)
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002408 return OL_TXRX_INVALID_NUM_PEERS;
2409
2410 vdev->ibss_peer_num = new_peer_num;
2411
2412 return new_peer_num;
2413}
2414
2415/**
2416 * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2417 * beat timer
2418 * @vdev: vdev handle
2419 * @timer_value_sec: new heart beat timer value
2420 *
2421 * Return: Old timer value set in vdev.
2422 */
2423uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(ol_txrx_vdev_handle vdev,
2424 uint16_t timer_value_sec)
2425{
2426 uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2427
2428 vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2429
2430 return old_timer_value;
2431}
Mahesh Kumar Kalikot Veetil32e4fc72016-09-09 17:05:22 -07002432#endif
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002433
2434/**
2435 * ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
2436 * @vdev: vdev handle
2437 * @callback: callback function to remove the peer.
2438 * @callback_context: handle for callback function
2439 * @remove_last_peer: Does it required to last peer.
2440 *
2441 * Return: NONE
2442 */
2443void
2444ol_txrx_remove_peers_for_vdev(ol_txrx_vdev_handle vdev,
2445 ol_txrx_vdev_peer_remove_cb callback,
2446 void *callback_context, bool remove_last_peer)
2447{
2448 ol_txrx_peer_handle peer, temp;
2449 /* remove all remote peers for vdev */
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002450 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002451
2452 temp = NULL;
2453 TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
2454 peer_list_elem) {
2455 if (temp) {
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002456 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2457 if (qdf_atomic_read(&temp->delete_in_progress) == 0) {
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002458 callback(callback_context, temp->mac_addr.raw,
2459 vdev->vdev_id, temp, false);
2460 }
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002461 qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002462 }
2463 /* self peer is deleted last */
2464 if (peer == TAILQ_FIRST(&vdev->peer_list)) {
2465 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
2466 "%s: self peer removed by caller ",
2467 __func__);
2468 break;
2469 } else
2470 temp = peer;
2471 }
2472
Mohit Khanna137b97d2016-04-21 16:11:33 -07002473 qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
2474
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002475 if (remove_last_peer) {
2476 /* remove IBSS bss peer last */
2477 peer = TAILQ_FIRST(&vdev->peer_list);
2478 callback(callback_context, (uint8_t *) &vdev->mac_addr,
2479 vdev->vdev_id, peer, false);
2480 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002481}
2482
2483/**
2484 * ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
2485 * @vdev: vdev handle
2486 * @callback: callback function to remove the peer.
2487 * @callback_context: handle for callback function
2488 *
2489 * Return: NONE
2490 */
2491void
2492ol_txrx_remove_peers_for_vdev_no_lock(ol_txrx_vdev_handle vdev,
2493 ol_txrx_vdev_peer_remove_cb callback,
2494 void *callback_context)
2495{
2496 ol_txrx_peer_handle peer = NULL;
2497
2498 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2499 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
2500 "%s: peer found for vdev id %d. deleting the peer",
2501 __func__, vdev->vdev_id);
2502 callback(callback_context, (uint8_t *)&vdev->mac_addr,
2503 vdev->vdev_id, peer, false);
2504 }
2505}
2506
2507/**
2508 * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2509 * @vdev: vdev handle
2510 * @ocb_set_chan: OCB channel information to be set in vdev.
2511 *
2512 * Return: NONE
2513 */
2514void ol_txrx_set_ocb_chan_info(ol_txrx_vdev_handle vdev,
2515 struct ol_txrx_ocb_set_chan ocb_set_chan)
2516{
2517 vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2518 vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2519}
2520
2521/**
2522 * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2523 * @vdev: vdev handle
2524 *
2525 * Return: handle to struct ol_txrx_ocb_chan_info
2526 */
2527struct ol_txrx_ocb_chan_info *
2528ol_txrx_get_ocb_chan_info(ol_txrx_vdev_handle vdev)
2529{
2530 return vdev->ocb_channel_info;
2531}
2532
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002533/**
2534 * @brief specify the peer's authentication state
2535 * @details
2536 * Specify the peer's authentication state (none, connected, authenticated)
2537 * to allow the data SW to determine whether to filter out invalid data frames.
2538 * (In the "connected" state, where security is enabled, but authentication
2539 * has not completed, tx and rx data frames other than EAPOL or WAPI should
2540 * be discarded.)
2541 * This function is only relevant for systems in which the tx and rx filtering
2542 * are done in the host rather than in the target.
2543 *
2544 * @param data_peer - which peer has changed its state
2545 * @param state - the new state of the peer
2546 *
Manjunathappa Prakash2593a642016-04-01 08:53:35 -07002547 * Return: QDF Status
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07002548 */
2549QDF_STATUS ol_txrx_peer_state_update(struct ol_txrx_pdev_t *pdev,
2550 uint8_t *peer_mac,
2551 enum ol_txrx_peer_state state)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002552{
2553 struct ol_txrx_peer_t *peer;
2554
Anurag Chouhanc5548422016-02-24 18:33:27 +05302555 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002556 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Pdev is NULL");
Anurag Chouhanc5548422016-02-24 18:33:27 +05302557 qdf_assert(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302558 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002559 }
2560
2561 peer = ol_txrx_peer_find_hash_find(pdev, peer_mac, 0, 1);
2562 if (NULL == peer) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302563 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
2564 "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
2565 __func__,
2566 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2567 peer_mac[4], peer_mac[5]);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302568 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002569 }
2570
2571 /* TODO: Should we send WMI command of the connection state? */
2572 /* avoid multiple auth state change. */
2573 if (peer->state == state) {
2574#ifdef TXRX_PRINT_VERBOSE_ENABLE
2575 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO3,
2576 "%s: no state change, returns directly\n",
2577 __func__);
2578#endif
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302579 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -07002580 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2581 "%s: peer %p peer->ref_cnt %d", __func__, peer,
2582 qdf_atomic_read(&peer->ref_cnt));
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302583 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002584 }
2585
2586 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: change from %d to %d\n",
2587 __func__, peer->state, state);
2588
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002589 peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002590 ? ol_tx_filter_pass_thru
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002591 : ((state == OL_TXRX_PEER_STATE_CONN)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002592 ? ol_tx_filter_non_auth
2593 : ol_tx_filter_discard);
2594
2595 if (peer->vdev->pdev->cfg.host_addba) {
Dhanashri Atreb08959a2016-03-01 17:28:03 -08002596 if (state == OL_TXRX_PEER_STATE_AUTH) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002597 int tid;
2598 /*
2599 * Pause all regular (non-extended) TID tx queues until
2600 * data arrives and ADDBA negotiation has completed.
2601 */
2602 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
2603 "%s: pause peer and unpause mgmt/non-qos\n",
2604 __func__);
2605 ol_txrx_peer_pause(peer); /* pause all tx queues */
2606 /* unpause mgmt and non-QoS tx queues */
2607 for (tid = OL_TX_NUM_QOS_TIDS;
2608 tid < OL_TX_NUM_TIDS; tid++)
2609 ol_txrx_peer_tid_unpause(peer, tid);
2610 }
2611 }
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302612 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -07002613 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2614 "%s: peer %p peer->ref_cnt %d", __func__, peer,
2615 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002616 /* Set the state after the Pause to avoid the race condiction
2617 with ADDBA check in tx path */
2618 peer->state = state;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302619 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002620}
2621
2622void
2623ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
2624{
2625 peer->keyinstalled = val;
2626}
2627
2628void
2629ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
2630 uint8_t *peer_mac,
2631 union ol_txrx_peer_update_param_t *param,
2632 enum ol_txrx_peer_update_select_t select)
2633{
2634 struct ol_txrx_peer_t *peer;
2635
2636 peer = ol_txrx_peer_find_hash_find(vdev->pdev, peer_mac, 0, 1);
2637 if (!peer) {
2638 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: peer is null",
2639 __func__);
2640 return;
2641 }
2642
2643 switch (select) {
2644 case ol_txrx_peer_update_qos_capable:
2645 {
2646 /* save qos_capable here txrx peer,
2647 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
2648 */
2649 peer->qos_capable = param->qos_capable;
2650 /*
2651 * The following function call assumes that the peer has a
2652 * single ID. This is currently true, and
2653 * is expected to remain true.
2654 */
2655 htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
2656 peer->peer_ids[0],
2657 peer->qos_capable);
2658 break;
2659 }
2660 case ol_txrx_peer_update_uapsdMask:
2661 {
2662 peer->uapsd_mask = param->uapsd_mask;
2663 htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
2664 peer->peer_ids[0],
2665 peer->uapsd_mask);
2666 break;
2667 }
2668 case ol_txrx_peer_update_peer_security:
2669 {
2670 enum ol_sec_type sec_type = param->sec_type;
2671 enum htt_sec_type peer_sec_type = htt_sec_type_none;
2672
2673 switch (sec_type) {
2674 case ol_sec_type_none:
2675 peer_sec_type = htt_sec_type_none;
2676 break;
2677 case ol_sec_type_wep128:
2678 peer_sec_type = htt_sec_type_wep128;
2679 break;
2680 case ol_sec_type_wep104:
2681 peer_sec_type = htt_sec_type_wep104;
2682 break;
2683 case ol_sec_type_wep40:
2684 peer_sec_type = htt_sec_type_wep40;
2685 break;
2686 case ol_sec_type_tkip:
2687 peer_sec_type = htt_sec_type_tkip;
2688 break;
2689 case ol_sec_type_tkip_nomic:
2690 peer_sec_type = htt_sec_type_tkip_nomic;
2691 break;
2692 case ol_sec_type_aes_ccmp:
2693 peer_sec_type = htt_sec_type_aes_ccmp;
2694 break;
2695 case ol_sec_type_wapi:
2696 peer_sec_type = htt_sec_type_wapi;
2697 break;
2698 default:
2699 peer_sec_type = htt_sec_type_none;
2700 break;
2701 }
2702
2703 peer->security[txrx_sec_ucast].sec_type =
2704 peer->security[txrx_sec_mcast].sec_type =
2705 peer_sec_type;
2706
2707 break;
2708 }
2709 default:
2710 {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05302711 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002712 "ERROR: unknown param %d in %s", select,
2713 __func__);
2714 break;
2715 }
2716 }
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302717 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna47384bc2016-08-15 15:37:05 -07002718 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2719 "%s: peer %p peer->ref_cnt %d", __func__, peer,
2720 qdf_atomic_read(&peer->ref_cnt));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002721}
2722
2723uint8_t
2724ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2725{
2726
2727 struct ol_txrx_peer_t *peer;
2728 peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
2729 if (peer)
2730 return peer->uapsd_mask;
2731 return 0;
2732}
2733
2734uint8_t
2735ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
2736{
2737
2738 struct ol_txrx_peer_t *peer_t =
2739 ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
2740 if (peer_t != NULL)
2741 return peer_t->qos_capable;
2742 return 0;
2743}
2744
2745void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
2746{
2747 struct ol_txrx_vdev_t *vdev;
2748 struct ol_txrx_pdev_t *pdev;
2749 int i;
2750
2751 /* preconditions */
2752 TXRX_ASSERT2(peer);
2753
2754 vdev = peer->vdev;
2755 if (NULL == vdev) {
2756 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
2757 "The vdev is not present anymore\n");
2758 return;
2759 }
2760
2761 pdev = vdev->pdev;
2762 if (NULL == pdev) {
2763 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
2764 "The pdev is not present anymore\n");
2765 return;
2766 }
2767
2768 /*
2769 * Check for the reference count before deleting the peer
2770 * as we noticed that sometimes we are re-entering this
2771 * function again which is leading to dead-lock.
2772 * (A double-free should never happen, so assert if it does.)
2773 */
2774
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302775 if (0 == qdf_atomic_read(&(peer->ref_cnt))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002776 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
2777 "The Peer is not present anymore\n");
Anurag Chouhanc5548422016-02-24 18:33:27 +05302778 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002779 return;
2780 }
2781
2782 /*
2783 * Hold the lock all the way from checking if the peer ref count
2784 * is zero until the peer references are removed from the hash
2785 * table and vdev list (if the peer ref count is zero).
2786 * This protects against a new HL tx operation starting to use the
2787 * peer object just after this function concludes it's done being used.
2788 * Furthermore, the lock needs to be held while checking whether the
2789 * vdev's list of peers is empty, to make sure that list is not modified
2790 * concurrently with the empty check.
2791 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302792 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07002793 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002794 u_int16_t peer_id;
2795
2796 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07002797 "Deleting peer %p (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002798 peer,
2799 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2800 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
Deepak Dhamdherec47cfe82016-08-22 01:00:13 -07002801 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002802
2803 peer_id = peer->local_id;
2804 /* remove the reference to the peer from the hash table */
2805 ol_txrx_peer_find_hash_remove(pdev, peer);
2806
2807 /* remove the peer from its parent vdev's list */
2808 TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
2809
2810 /* cleanup the Rx reorder queues for this peer */
2811 ol_rx_peer_cleanup(vdev, peer);
2812
2813 /* peer is removed from peer_list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302814 qdf_atomic_set(&peer->delete_in_progress, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002815
2816 /*
2817 * Set wait_delete_comp event if the current peer id matches
2818 * with registered peer id.
2819 */
2820 if (peer_id == vdev->wait_on_peer_id) {
Anurag Chouhance0dc992016-02-16 18:18:03 +05302821 qdf_event_set(&vdev->wait_delete_comp);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002822 vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2823 }
2824
2825 /* check whether the parent vdev has no peers left */
2826 if (TAILQ_EMPTY(&vdev->peer_list)) {
2827 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002828 * Check if the parent vdev was waiting for its peers
2829 * to be deleted, in order for it to be deleted too.
2830 */
2831 if (vdev->delete.pending) {
2832 ol_txrx_vdev_delete_cb vdev_delete_cb =
2833 vdev->delete.callback;
2834 void *vdev_delete_context =
2835 vdev->delete.context;
2836
Himanshu Agarwal31f28562015-12-11 10:35:10 +05302837 /*
2838 * Now that there are no references to the peer,
2839 * we can release the peer reference lock.
2840 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302841 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05302842
Mohit Khanna3aee1312016-07-28 19:07:05 -07002843 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002844 "%s: deleting vdev object %p "
2845 "(%02x:%02x:%02x:%02x:%02x:%02x)"
Mohit Khanna47384bc2016-08-15 15:37:05 -07002846 " - its last peer is done",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002847 __func__, vdev,
2848 vdev->mac_addr.raw[0],
2849 vdev->mac_addr.raw[1],
2850 vdev->mac_addr.raw[2],
2851 vdev->mac_addr.raw[3],
2852 vdev->mac_addr.raw[4],
2853 vdev->mac_addr.raw[5]);
2854 /* all peers are gone, go ahead and delete it */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302855 qdf_mem_free(vdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002856 if (vdev_delete_cb)
2857 vdev_delete_cb(vdev_delete_context);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05302858 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302859 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002860 }
Himanshu Agarwal31f28562015-12-11 10:35:10 +05302861 } else {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302862 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Himanshu Agarwal31f28562015-12-11 10:35:10 +05302863 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002864
Siddarth Poddarb2011f62016-04-27 20:45:42 +05302865 ol_txrx_peer_tx_queue_free(pdev, peer);
2866
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002867 /*
2868 * 'array' is allocated in addba handler and is supposed to be
2869 * freed in delba handler. There is the case (for example, in
2870 * SSR) where delba handler is not called. Because array points
2871 * to address of 'base' by default and is reallocated in addba
2872 * handler later, only free the memory when the array does not
2873 * point to base.
2874 */
2875 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
2876 if (peer->tids_rx_reorder[i].array !=
2877 &peer->tids_rx_reorder[i].base) {
2878 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
2879 "%s, delete reorder arr, tid:%d\n",
2880 __func__, i);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302881 qdf_mem_free(peer->tids_rx_reorder[i].array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002882 ol_rx_reorder_init(&peer->tids_rx_reorder[i],
2883 (uint8_t) i);
2884 }
2885 }
2886
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302887 qdf_mem_free(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002888 } else {
Mohit Khanna47384bc2016-08-15 15:37:05 -07002889 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2890 "%s: peer %p peer->ref_cnt = %d", __func__, peer,
2891 qdf_atomic_read(&peer->ref_cnt));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302892 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002893 }
2894}
2895
Dhanashri Atre12a08392016-02-17 13:10:34 -08002896/**
Mohit Khanna0696eef2016-04-14 16:14:08 -07002897 * ol_txrx_clear_peer_internal() - ol internal function to clear peer
2898 * @peer: pointer to ol txrx peer structure
2899 *
2900 * Return: QDF Status
2901 */
2902static QDF_STATUS
2903ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
2904{
2905 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
2906 /* Drop pending Rx frames in CDS */
2907 if (sched_ctx)
2908 cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
2909
2910 /* Purge the cached rx frame queue */
2911 ol_txrx_flush_rx_frames(peer, 1);
2912
2913 qdf_spin_lock_bh(&peer->peer_info_lock);
Mohit Khanna0696eef2016-04-14 16:14:08 -07002914 peer->state = OL_TXRX_PEER_STATE_DISC;
2915 qdf_spin_unlock_bh(&peer->peer_info_lock);
2916
2917 return QDF_STATUS_SUCCESS;
2918}
2919
2920/**
2921 * ol_txrx_clear_peer() - clear peer
2922 * @sta_id: sta id
2923 *
2924 * Return: QDF Status
2925 */
2926QDF_STATUS ol_txrx_clear_peer(uint8_t sta_id)
2927{
2928 struct ol_txrx_peer_t *peer;
2929 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2930
2931 if (!pdev) {
2932 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find pdev!",
2933 __func__);
2934 return QDF_STATUS_E_FAILURE;
2935 }
2936
2937 if (sta_id >= WLAN_MAX_STA_COUNT) {
2938 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid sta id %d", sta_id);
2939 return QDF_STATUS_E_INVAL;
2940 }
2941
2942
2943 peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
2944 if (!peer)
2945 return QDF_STATUS_E_FAULT;
2946
2947 return ol_txrx_clear_peer_internal(peer);
2948
2949}
2950
2951/**
Dhanashri Atre12a08392016-02-17 13:10:34 -08002952 * ol_txrx_peer_detach - Delete a peer's data object.
2953 * @data_peer - the object to delete
2954 *
2955 * When the host's control SW disassociates a peer, it calls
2956 * this function to delete the peer's data object. The reference
2957 * stored in the control peer object to the data peer
2958 * object (set up by a call to ol_peer_store()) is provided.
2959 *
2960 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002961void ol_txrx_peer_detach(ol_txrx_peer_handle peer)
2962{
2963 struct ol_txrx_vdev_t *vdev = peer->vdev;
2964
2965 /* redirect peer's rx delivery function to point to a discard func */
2966 peer->rx_opt_proc = ol_rx_discard;
2967
2968 peer->valid = 0;
2969
Mohit Khanna0696eef2016-04-14 16:14:08 -07002970 /* flush all rx packets before clearing up the peer local_id */
2971 ol_txrx_clear_peer_internal(peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002972 ol_txrx_local_peer_id_free(peer->vdev->pdev, peer);
2973
2974 /* debug print to dump rx reorder state */
2975 /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
2976
2977 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08002978 "%s:peer %p (%02x:%02x:%02x:%02x:%02x:%02x)",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002979 __func__, peer,
2980 peer->mac_addr.raw[0], peer->mac_addr.raw[1],
2981 peer->mac_addr.raw[2], peer->mac_addr.raw[3],
2982 peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002983
2984 if (peer->vdev->last_real_peer == peer)
2985 peer->vdev->last_real_peer = NULL;
2986
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302987 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002988 if (vdev->last_real_peer == peer)
2989 vdev->last_real_peer = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302990 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002991 htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
2992
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302993 qdf_spinlock_destroy(&peer->peer_info_lock);
2994 qdf_spinlock_destroy(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002995 /* set delete_in_progress to identify that wma
2996 * is waiting for unmap massage for this peer */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302997 qdf_atomic_set(&peer->delete_in_progress, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002998 /*
2999 * Remove the reference added during peer_attach.
3000 * The peer will still be left allocated until the
3001 * PEER_UNMAP message arrives to remove the other
3002 * reference, added by the PEER_MAP message.
3003 */
3004 ol_txrx_peer_unref_delete(peer);
3005}
3006
3007ol_txrx_peer_handle
3008ol_txrx_peer_find_by_addr(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac_addr)
3009{
3010 struct ol_txrx_peer_t *peer;
3011 peer = ol_txrx_peer_find_hash_find(pdev, peer_mac_addr, 0, 0);
3012 if (peer) {
3013 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08003014 "%s: Delete extra reference %p", __func__, peer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003015 /* release the extra reference */
3016 ol_txrx_peer_unref_delete(peer);
3017 }
3018 return peer;
3019}
3020
3021/**
3022 * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3023 * @txrx_pdev: Pointer to txrx pdev
3024 *
3025 * Return: none
3026 */
3027static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3028{
3029 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
3030 uint32_t total;
3031
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303032 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3033 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3034 else
3035 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003036
3037 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303038 "total tx credit %d num_free %d",
3039 total, pdev->tx_desc.num_free);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003040
3041 return;
3042}
3043
3044/**
3045 * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3046 * @timeout: timeout in ms
3047 *
3048 * Wait for tx queue to be empty, return timeout error if
3049 * queue doesn't empty before timeout occurs.
3050 *
3051 * Return:
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303052 * QDF_STATUS_SUCCESS if the queue empties,
3053 * QDF_STATUS_E_TIMEOUT in case of timeout,
3054 * QDF_STATUS_E_FAULT in case of missing handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003055 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303056QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003057{
Anurag Chouhan6d760662016-02-20 16:05:43 +05303058 ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003059
3060 if (txrx_pdev == NULL) {
3061 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
3062 "%s: txrx context is null", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303063 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003064 }
3065
3066 while (ol_txrx_get_tx_pending(txrx_pdev)) {
Anurag Chouhan512c7d52016-02-19 15:49:46 +05303067 qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003068 if (timeout <= 0) {
3069 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303070 "%s: tx frames are pending", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003071 ol_txrx_dump_tx_desc(txrx_pdev);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303072 return QDF_STATUS_E_TIMEOUT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003073 }
3074 timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3075 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303076 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003077}
3078
3079#ifndef QCA_WIFI_3_0_EMU
3080#define SUSPEND_DRAIN_WAIT 500
3081#else
3082#define SUSPEND_DRAIN_WAIT 3000
3083#endif
3084
Yue Ma1e11d792016-02-26 18:58:44 -08003085#ifdef FEATURE_RUNTIME_PM
3086/**
3087 * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3088 * @txrx_pdev: TXRX pdev context
3089 *
3090 * TXRX is ready to runtime suspend if there are no pending packets
3091 * in the tx queue.
3092 *
3093 * Return: QDF_STATUS
3094 */
3095QDF_STATUS ol_txrx_runtime_suspend(ol_txrx_pdev_handle txrx_pdev)
3096{
3097 if (ol_txrx_get_tx_pending(txrx_pdev))
3098 return QDF_STATUS_E_BUSY;
3099 else
3100 return QDF_STATUS_SUCCESS;
3101}
3102
3103/**
3104 * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3105 * @txrx_pdev: TXRX pdev context
3106 *
3107 * This is a dummy function for symmetry.
3108 *
3109 * Return: QDF_STATUS_SUCCESS
3110 */
3111QDF_STATUS ol_txrx_runtime_resume(ol_txrx_pdev_handle txrx_pdev)
3112{
3113 return QDF_STATUS_SUCCESS;
3114}
3115#endif
3116
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003117/**
3118 * ol_txrx_bus_suspend() - bus suspend
3119 *
3120 * Ensure that ol_txrx is ready for bus suspend
3121 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303122 * Return: QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003123 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303124QDF_STATUS ol_txrx_bus_suspend(void)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003125{
3126 return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3127}
3128
3129/**
3130 * ol_txrx_bus_resume() - bus resume
3131 *
3132 * Dummy function for symetry
3133 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303134 * Return: QDF_STATUS_SUCCESS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003135 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303136QDF_STATUS ol_txrx_bus_resume(void)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003137{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303138 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003139}
3140
Dhanashri Atreb08959a2016-03-01 17:28:03 -08003141/**
3142 * ol_txrx_get_tx_pending - Get the number of pending transmit
3143 * frames that are awaiting completion.
3144 *
3145 * @pdev - the data physical device object
3146 * Mainly used in clean up path to make sure all buffers have been freed
3147 *
3148 * Return: count of pending frames
3149 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003150int ol_txrx_get_tx_pending(ol_txrx_pdev_handle pdev_handle)
3151{
3152 struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
3153 uint32_t total;
3154
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303155 if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3156 total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3157 else
3158 total = ol_tx_get_desc_global_pool_size(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003159
Nirav Shah55b45a02016-01-21 10:00:16 +05303160 return total - ol_tx_get_total_free_desc(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003161}
3162
3163void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3164{
3165 ol_tx_desc_list tx_descs;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303166 /* First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
3167 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003168 * which is tha same with normal data send complete path*/
3169 htt_tx_pending_discard(pdev_handle->htt_pdev);
3170
3171 TAILQ_INIT(&tx_descs);
3172 ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3173 /* Discard Frames in Discard List */
3174 ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3175
3176 ol_tx_discard_target_frms(pdev_handle);
3177}
3178
3179/*--- debug features --------------------------------------------------------*/
3180
3181unsigned g_txrx_print_level = TXRX_PRINT_LEVEL_ERR; /* default */
3182
3183void ol_txrx_print_level_set(unsigned level)
3184{
3185#ifndef TXRX_PRINT_ENABLE
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303186 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003187 "The driver is compiled without TXRX prints enabled.\n"
3188 "To enable them, recompile with TXRX_PRINT_ENABLE defined");
3189#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303190 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003191 "TXRX printout level changed from %d to %d",
3192 g_txrx_print_level, level);
3193 g_txrx_print_level = level;
3194#endif
3195}
3196
3197struct ol_txrx_stats_req_internal {
3198 struct ol_txrx_stats_req base;
3199 int serviced; /* state of this request */
3200 int offset;
3201};
3202
3203static inline
3204uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3205{
3206 return (uint64_t) ((size_t) req);
3207}
3208
3209static inline
3210struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3211{
3212 return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3213}
3214
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003215void
3216ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3217 uint8_t cfg_stats_type, uint32_t cfg_val)
3218{
3219 uint64_t dummy_cookie = 0;
3220 htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3221 0 /* reset mask */,
3222 cfg_stats_type, cfg_val, dummy_cookie);
3223}
3224
3225A_STATUS
Nirav Shahd2310422016-01-21 18:58:06 +05303226ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev, struct ol_txrx_stats_req *req,
3227 bool response_expected)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003228{
3229 struct ol_txrx_pdev_t *pdev = vdev->pdev;
3230 uint64_t cookie;
3231 struct ol_txrx_stats_req_internal *non_volatile_req;
3232
3233 if (!pdev ||
3234 req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
3235 req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
3236 return A_ERROR;
3237 }
3238
3239 /*
3240 * Allocate a non-transient stats request object.
3241 * (The one provided as an argument is likely allocated on the stack.)
3242 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303243 non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003244 if (!non_volatile_req)
3245 return A_NO_MEMORY;
3246
3247 /* copy the caller's specifications */
3248 non_volatile_req->base = *req;
3249 non_volatile_req->serviced = 0;
3250 non_volatile_req->offset = 0;
3251
3252 /* use the non-volatile request object's address as the cookie */
3253 cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
3254
3255 if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
3256 req->stats_type_upload_mask,
3257 req->stats_type_reset_mask,
3258 HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
3259 cookie)) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303260 qdf_mem_free(non_volatile_req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003261 return A_ERROR;
3262 }
3263
3264 if (req->wait.blocking)
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303265 while (qdf_semaphore_acquire(req->wait.sem_ptr))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003266 ;
3267
Nirav Shahd2310422016-01-21 18:58:06 +05303268 if (response_expected == false)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303269 qdf_mem_free(non_volatile_req);
Nirav Shahd2310422016-01-21 18:58:06 +05303270
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003271 return A_OK;
3272}
Dhanashri Atre12a08392016-02-17 13:10:34 -08003273
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003274void
3275ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
3276 uint64_t cookie, uint8_t *stats_info_list)
3277{
3278 enum htt_dbg_stats_type type;
3279 enum htt_dbg_stats_status status;
3280 int length;
3281 uint8_t *stats_data;
3282 struct ol_txrx_stats_req_internal *req;
3283 int more = 0;
3284
3285 req = ol_txrx_u64_to_stats_ptr(cookie);
3286
3287 do {
3288 htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
3289 &length, &stats_data);
3290 if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
3291 break;
3292 if (status == HTT_DBG_STATS_STATUS_PRESENT ||
3293 status == HTT_DBG_STATS_STATUS_PARTIAL) {
3294 uint8_t *buf;
3295 int bytes = 0;
3296
3297 if (status == HTT_DBG_STATS_STATUS_PARTIAL)
3298 more = 1;
3299 if (req->base.print.verbose || req->base.print.concise)
3300 /* provide the header along with the data */
3301 htt_t2h_stats_print(stats_info_list,
3302 req->base.print.concise);
3303
3304 switch (type) {
3305 case HTT_DBG_STATS_WAL_PDEV_TXRX:
3306 bytes = sizeof(struct wlan_dbg_stats);
3307 if (req->base.copy.buf) {
3308 int lmt;
3309
3310 lmt = sizeof(struct wlan_dbg_stats);
3311 if (req->base.copy.byte_limit < lmt)
3312 lmt = req->base.copy.byte_limit;
3313 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303314 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003315 }
3316 break;
3317 case HTT_DBG_STATS_RX_REORDER:
3318 bytes = sizeof(struct rx_reorder_stats);
3319 if (req->base.copy.buf) {
3320 int lmt;
3321
3322 lmt = sizeof(struct rx_reorder_stats);
3323 if (req->base.copy.byte_limit < lmt)
3324 lmt = req->base.copy.byte_limit;
3325 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303326 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003327 }
3328 break;
3329 case HTT_DBG_STATS_RX_RATE_INFO:
3330 bytes = sizeof(wlan_dbg_rx_rate_info_t);
3331 if (req->base.copy.buf) {
3332 int lmt;
3333
3334 lmt = sizeof(wlan_dbg_rx_rate_info_t);
3335 if (req->base.copy.byte_limit < lmt)
3336 lmt = req->base.copy.byte_limit;
3337 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303338 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003339 }
3340 break;
3341
3342 case HTT_DBG_STATS_TX_RATE_INFO:
3343 bytes = sizeof(wlan_dbg_tx_rate_info_t);
3344 if (req->base.copy.buf) {
3345 int lmt;
3346
3347 lmt = sizeof(wlan_dbg_tx_rate_info_t);
3348 if (req->base.copy.byte_limit < lmt)
3349 lmt = req->base.copy.byte_limit;
3350 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303351 qdf_mem_copy(buf, stats_data, lmt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003352 }
3353 break;
3354
3355 case HTT_DBG_STATS_TX_PPDU_LOG:
3356 bytes = 0;
3357 /* TO DO: specify how many bytes are present */
3358 /* TO DO: add copying to the requestor's buf */
3359
3360 case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
3361 bytes = sizeof(struct rx_remote_buffer_mgmt_stats);
3362 if (req->base.copy.buf) {
3363 int limit;
3364
3365 limit = sizeof(struct rx_remote_buffer_mgmt_stats);
3366 if (req->base.copy.byte_limit < limit) {
3367 limit = req->base.copy.byte_limit;
3368 }
3369 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303370 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003371 }
3372 break;
3373
3374 case HTT_DBG_STATS_TXBF_INFO:
3375 bytes = sizeof(struct wlan_dbg_txbf_data_stats);
3376 if (req->base.copy.buf) {
3377 int limit;
3378
3379 limit = sizeof(struct wlan_dbg_txbf_data_stats);
3380 if (req->base.copy.byte_limit < limit)
3381 limit = req->base.copy.byte_limit;
3382 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303383 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003384 }
3385 break;
3386
3387 case HTT_DBG_STATS_SND_INFO:
3388 bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
3389 if (req->base.copy.buf) {
3390 int limit;
3391
3392 limit = sizeof(struct wlan_dbg_txbf_snd_stats);
3393 if (req->base.copy.byte_limit < limit)
3394 limit = req->base.copy.byte_limit;
3395 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303396 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003397 }
3398 break;
3399
3400 case HTT_DBG_STATS_TX_SELFGEN_INFO:
3401 bytes = sizeof(struct wlan_dbg_tx_selfgen_stats);
3402 if (req->base.copy.buf) {
3403 int limit;
3404
3405 limit = sizeof(struct wlan_dbg_tx_selfgen_stats);
3406 if (req->base.copy.byte_limit < limit)
3407 limit = req->base.copy.byte_limit;
3408 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303409 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003410 }
3411 break;
3412
3413 case HTT_DBG_STATS_ERROR_INFO:
3414 bytes =
3415 sizeof(struct wlan_dbg_wifi2_error_stats);
3416 if (req->base.copy.buf) {
3417 int limit;
3418
3419 limit =
3420 sizeof(struct wlan_dbg_wifi2_error_stats);
3421 if (req->base.copy.byte_limit < limit)
3422 limit = req->base.copy.byte_limit;
3423 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303424 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003425 }
3426 break;
3427
3428 case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
3429 bytes =
3430 sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
3431 if (req->base.copy.buf) {
3432 int limit;
3433
3434 limit = sizeof(struct
3435 rx_txbf_musu_ndpa_pkts_stats);
3436 if (req->base.copy.byte_limit < limit)
3437 limit =
3438 req->base.copy.byte_limit;
3439 buf = req->base.copy.buf + req->offset;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303440 qdf_mem_copy(buf, stats_data, limit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003441 }
3442 break;
3443
3444 default:
3445 break;
3446 }
3447 buf = req->base.copy.buf
3448 ? req->base.copy.buf
3449 : stats_data;
3450 if (req->base.callback.fp)
3451 req->base.callback.fp(req->base.callback.ctxt,
3452 type, buf, bytes);
3453 }
3454 stats_info_list += length;
3455 } while (1);
3456
3457 if (!more) {
3458 if (req->base.wait.blocking)
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303459 qdf_semaphore_release(req->base.wait.sem_ptr);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303460 qdf_mem_free(req);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003461 }
3462}
3463
3464#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
3465int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
3466{
3467 if (debug_specs & TXRX_DBG_MASK_OBJS) {
3468#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3469 ol_txrx_pdev_display(vdev->pdev, 0);
3470#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303471 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303472 "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003473#endif
3474 }
3475 if (debug_specs & TXRX_DBG_MASK_STATS) {
3476 ol_txrx_stats_display(vdev->pdev);
3477 }
3478 if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
3479#if defined(ENABLE_TXRX_PROT_ANALYZE)
3480 ol_txrx_prot_ans_display(vdev->pdev);
3481#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303482 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303483 "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003484#endif
3485 }
3486 if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
3487#if defined(ENABLE_RX_REORDER_TRACE)
3488 ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
3489#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303490 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303491 "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003492#endif
3493
3494 }
3495 return 0;
3496}
3497#endif
3498
3499int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
3500 int max_subfrms_ampdu, int max_subfrms_amsdu)
3501{
3502 return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
3503 max_subfrms_ampdu, max_subfrms_amsdu);
3504}
3505
3506#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
3507void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
3508{
3509 struct ol_txrx_vdev_t *vdev;
3510
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303511 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003512 "%*s%s:\n", indent, " ", "txrx pdev");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303513 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003514 "%*spdev object: %p", indent + 4, " ", pdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303515 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003516 "%*svdev list:", indent + 4, " ");
3517 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303518 ol_txrx_vdev_display(vdev, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003519 }
3520 ol_txrx_peer_find_display(pdev, indent + 4);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303521 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003522 "%*stx desc pool: %d elems @ %p", indent + 4, " ",
3523 pdev->tx_desc.pool_size, pdev->tx_desc.array);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303524 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003525 htt_display(pdev->htt_pdev, indent);
3526}
3527
3528void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
3529{
3530 struct ol_txrx_peer_t *peer;
3531
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303532 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003533 "%*stxrx vdev: %p\n", indent, " ", vdev);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303534 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003535 "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303536 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003537 "%*sMAC addr: %d:%d:%d:%d:%d:%d",
3538 indent + 4, " ",
3539 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
3540 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
3541 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303542 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003543 "%*speer list:", indent + 4, " ");
3544 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303545 ol_txrx_peer_display(peer, indent + 8);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003546 }
3547}
3548
3549void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
3550{
3551 int i;
3552
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303553 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003554 "%*stxrx peer: %p", indent, " ", peer);
3555 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
3556 if (peer->peer_ids[i] != HTT_INVALID_PEER) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303557 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003558 "%*sID: %d", indent + 4, " ",
3559 peer->peer_ids[i]);
3560 }
3561 }
3562}
3563#endif /* TXRX_DEBUG_LEVEL */
3564
3565#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
3566void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
3567{
3568 int msdu_idx;
3569 int seg_idx;
3570
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303571 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303572 "TSO Statistics:");
3573 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003574 "TSO pkts %lld, bytes %lld\n",
3575 pdev->stats.pub.tx.tso.tso_pkts.pkts,
3576 pdev->stats.pub.tx.tso.tso_pkts.bytes);
3577
Nirav Shahda008342016-05-17 18:50:40 +05303578 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3579 "TSO Histogram for numbers of segments:\n"
3580 "Single segment %d\n"
3581 " 2-5 segments %d\n"
3582 " 6-10 segments %d\n"
3583 "11-15 segments %d\n"
3584 "16-20 segments %d\n"
3585 " 20+ segments %d\n",
3586 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
3587 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
3588 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
3589 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
3590 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
3591 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
3592
3593 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3594 "TSO History Buffer: Total size %d, current_index %d",
3595 NUM_MAX_TSO_MSDUS,
3596 TXRX_STATS_TSO_MSDU_IDX(pdev));
3597
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003598 for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303599 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303600 "jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
3601 msdu_idx,
3602 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
3603 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
3604 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
3605 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
3606
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003607 for (seg_idx = 0;
3608 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx)) &&
3609 (seg_idx < NUM_MAX_TSO_SEGS));
3610 seg_idx++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +05303611 struct qdf_tso_seg_t tso_seg =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003612 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
3613
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303614 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303615 "seg idx: %d", seg_idx);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303616 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303617 "tso_enable: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003618 tso_seg.tso_flags.tso_enable);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303619 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303620 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003621 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
3622 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
3623 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
3624 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
3625 tso_seg.tso_flags.ns);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303626 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303627 "tcp_seq_num: 0x%x ip_id: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003628 tso_seg.tso_flags.tcp_seq_num,
3629 tso_seg.tso_flags.ip_id);
3630 }
Nirav Shahda008342016-05-17 18:50:40 +05303631 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, "\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003632 }
3633}
Dhanashri Atre83d373d2015-07-28 16:45:59 -07003634#else
3635void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
3636{
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303637 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Dhanashri Atre83d373d2015-07-28 16:45:59 -07003638 "TSO is not supported\n");
3639}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003640#endif
3641
3642/**
3643 * ol_txrx_stats() - update ol layer stats
3644 * @vdev_id: vdev_id
3645 * @buffer: pointer to buffer
3646 * @buf_len: length of the buffer
3647 *
3648 * Return: length of string
3649 */
3650int
3651ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned buf_len)
3652{
3653 uint32_t len = 0;
3654
3655 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
3656 if (!vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303657 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303658 "%s: vdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003659 snprintf(buffer, buf_len, "vdev not found");
3660 return len;
3661 }
3662
3663 len = scnprintf(buffer, buf_len,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303664 "\nTXRX stats:\n\nllQueue State : %s\n pause %u unpause %u\n overflow %u\n llQueue timer state : %s\n",
3665 ((vdev->ll_pause.is_q_paused == false) ?
3666 "UNPAUSED" : "PAUSED"),
3667 vdev->ll_pause.q_pause_cnt,
3668 vdev->ll_pause.q_unpause_cnt,
3669 vdev->ll_pause.q_overflow_cnt,
3670 ((vdev->ll_pause.is_q_timer_on == false)
3671 ? "NOT-RUNNING" : "RUNNING"));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003672 return len;
3673}
3674
3675void ol_txrx_stats_display(ol_txrx_pdev_handle pdev)
3676{
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303677 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shah6a4eee62016-04-25 10:15:04 +05303678 "TX PATH Statistics:");
3679 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303680 "sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
3681 pdev->stats.pub.tx.from_stack.pkts,
3682 pdev->stats.pub.tx.from_stack.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003683 pdev->stats.pub.tx.dropped.host_reject.pkts,
3684 pdev->stats.pub.tx.dropped.host_reject.bytes,
3685 pdev->stats.pub.tx.dropped.download_fail.pkts
3686 + pdev->stats.pub.tx.dropped.target_discard.pkts
3687 + pdev->stats.pub.tx.dropped.no_ack.pkts,
3688 pdev->stats.pub.tx.dropped.download_fail.bytes
3689 + pdev->stats.pub.tx.dropped.target_discard.bytes
3690 + pdev->stats.pub.tx.dropped.no_ack.bytes);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303691 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303692 "successfully delivered: %lld (%lld B), "
3693 "download fail: %lld (%lld B), "
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003694 "target discard: %lld (%lld B), "
3695 "no ack: %lld (%lld B)",
Nirav Shahda008342016-05-17 18:50:40 +05303696 pdev->stats.pub.tx.delivered.pkts,
3697 pdev->stats.pub.tx.delivered.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003698 pdev->stats.pub.tx.dropped.download_fail.pkts,
3699 pdev->stats.pub.tx.dropped.download_fail.bytes,
3700 pdev->stats.pub.tx.dropped.target_discard.pkts,
3701 pdev->stats.pub.tx.dropped.target_discard.bytes,
3702 pdev->stats.pub.tx.dropped.no_ack.pkts,
3703 pdev->stats.pub.tx.dropped.no_ack.bytes);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303704 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303705 "Tx completions per HTT message:\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003706 "Single Packet %d\n"
3707 " 2-10 Packets %d\n"
3708 "11-20 Packets %d\n"
3709 "21-30 Packets %d\n"
3710 "31-40 Packets %d\n"
3711 "41-50 Packets %d\n"
3712 "51-60 Packets %d\n"
3713 " 60+ Packets %d\n",
3714 pdev->stats.pub.tx.comp_histogram.pkts_1,
3715 pdev->stats.pub.tx.comp_histogram.pkts_2_10,
3716 pdev->stats.pub.tx.comp_histogram.pkts_11_20,
3717 pdev->stats.pub.tx.comp_histogram.pkts_21_30,
3718 pdev->stats.pub.tx.comp_histogram.pkts_31_40,
3719 pdev->stats.pub.tx.comp_histogram.pkts_41_50,
3720 pdev->stats.pub.tx.comp_histogram.pkts_51_60,
3721 pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
Nirav Shahda008342016-05-17 18:50:40 +05303722
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303723 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shah6a4eee62016-04-25 10:15:04 +05303724 "RX PATH Statistics:");
3725 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3726 "%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
Nirav Shahda008342016-05-17 18:50:40 +05303727 "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
3728 "msdus with frag_ind: %d msdus with offload_ind: %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003729 pdev->stats.priv.rx.normal.ppdus,
3730 pdev->stats.priv.rx.normal.mpdus,
3731 pdev->stats.pub.rx.delivered.pkts,
3732 pdev->stats.pub.rx.delivered.bytes,
Nirav Shah6a4eee62016-04-25 10:15:04 +05303733 pdev->stats.pub.rx.dropped_err.pkts,
3734 pdev->stats.pub.rx.dropped_err.bytes,
3735 pdev->stats.pub.rx.dropped_peer_invalid.pkts,
3736 pdev->stats.pub.rx.dropped_peer_invalid.bytes,
3737 pdev->stats.pub.rx.dropped_mic_err.pkts,
Nirav Shahda008342016-05-17 18:50:40 +05303738 pdev->stats.pub.rx.dropped_mic_err.bytes,
3739 pdev->stats.pub.rx.msdus_with_frag_ind,
3740 pdev->stats.pub.rx.msdus_with_offload_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003741
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303742 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003743 " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
3744 pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
3745 pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
3746 pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
Nirav Shah6a4eee62016-04-25 10:15:04 +05303747
3748 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Nirav Shahda008342016-05-17 18:50:40 +05303749 "Rx packets per HTT message:\n"
Nirav Shah6a4eee62016-04-25 10:15:04 +05303750 "Single Packet %d\n"
3751 " 2-10 Packets %d\n"
3752 "11-20 Packets %d\n"
3753 "21-30 Packets %d\n"
3754 "31-40 Packets %d\n"
3755 "41-50 Packets %d\n"
3756 "51-60 Packets %d\n"
3757 " 60+ Packets %d\n",
3758 pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
3759 pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
3760 pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
3761 pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
3762 pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
3763 pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
3764 pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
3765 pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003766}
3767
3768void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
3769{
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303770 qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003771}
3772
3773#if defined(ENABLE_TXRX_PROT_ANALYZE)
3774
3775void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
3776{
3777 ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
3778 ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
3779}
3780
3781#endif /* ENABLE_TXRX_PROT_ANALYZE */
3782
3783#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
3784int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
3785{
3786 return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
3787 OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
3788}
3789#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
3790
3791#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
3792A_STATUS
3793ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
3794 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
3795{
Anurag Chouhanc5548422016-02-24 18:33:27 +05303796 qdf_assert(pdev && peer && stats);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303797 qdf_spin_lock_bh(&pdev->peer_stat_mutex);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303798 qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303799 qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003800 return A_OK;
3801}
3802#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
3803
3804void ol_vdev_rx_set_intrabss_fwd(ol_txrx_vdev_handle vdev, bool val)
3805{
3806 if (NULL == vdev)
3807 return;
3808
3809 vdev->disable_intrabss_fwd = val;
3810}
3811
Nirav Shahc657ef52016-07-26 14:22:38 +05303812/**
3813 * ol_txrx_update_mac_id() - update mac_id for vdev
3814 * @vdev_id: vdev id
3815 * @mac_id: mac id
3816 *
3817 * Return: none
3818 */
3819void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id)
3820{
3821 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
3822
3823 if (NULL == vdev) {
3824 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3825 "%s: Invalid vdev_id %d", __func__, vdev_id);
3826 return;
3827 }
3828 vdev->mac_id = mac_id;
3829}
3830
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003831#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
3832
3833/**
3834 * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
3835 * @sta_id: sta_id
3836 *
3837 * Return: vdev handle
3838 * NULL if not found.
3839 */
3840static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
3841{
3842 struct ol_txrx_peer_t *peer = NULL;
3843 ol_txrx_pdev_handle pdev = NULL;
3844
3845 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303846 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303847 "Invalid sta id passed");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003848 return NULL;
3849 }
3850
Anurag Chouhan6d760662016-02-20 16:05:43 +05303851 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003852 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303853 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303854 "PDEV not found for sta_id [%d]", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003855 return NULL;
3856 }
3857
3858 peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
3859
3860 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303861 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303862 "PEER [%d] not found", sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003863 return NULL;
3864 }
3865
3866 return peer->vdev;
3867}
3868
3869/**
3870 * ol_txrx_register_tx_flow_control() - register tx flow control callback
3871 * @vdev_id: vdev_id
3872 * @flowControl: flow control callback
3873 * @osif_fc_ctx: callback context
3874 *
3875 * Return: 0 for sucess or error code
3876 */
3877int ol_txrx_register_tx_flow_control (uint8_t vdev_id,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303878 ol_txrx_tx_flow_control_fp flowControl,
3879 void *osif_fc_ctx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003880{
3881 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
3882 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303883 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303884 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003885 return -EINVAL;
3886 }
3887
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303888 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003889 vdev->osif_flow_control_cb = flowControl;
3890 vdev->osif_fc_ctx = osif_fc_ctx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303891 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003892 return 0;
3893}
3894
3895/**
3896 * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control callback
3897 * @vdev_id: vdev_id
3898 *
3899 * Return: 0 for success or error code
3900 */
3901int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
3902{
3903 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
3904 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303905 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303906 "%s: Invalid vdev_id", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003907 return -EINVAL;
3908 }
3909
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303910 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003911 vdev->osif_flow_control_cb = NULL;
3912 vdev->osif_fc_ctx = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303913 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003914 return 0;
3915}
3916
3917/**
3918 * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
3919 * @sta_id: sta id
3920 * @low_watermark: low watermark
3921 * @high_watermark_offset: high watermark offset value
3922 *
3923 * Return: true/false
3924 */
3925bool
3926ol_txrx_get_tx_resource(uint8_t sta_id,
3927 unsigned int low_watermark,
3928 unsigned int high_watermark_offset)
3929{
3930 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
3931 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303932 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303933 "%s: Invalid sta_id %d", __func__, sta_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003934 /* Return true so caller do not understand that resource
3935 * is less than low_watermark.
3936 * sta_id validation will be done in ol_tx_send_data_frame
3937 * and if sta_id is not registered then host will drop
3938 * packet.
3939 */
3940 return true;
3941 }
3942
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303943 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303944
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003945 if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
3946 vdev->tx_fl_lwm = (uint16_t) low_watermark;
3947 vdev->tx_fl_hwm =
3948 (uint16_t) (low_watermark + high_watermark_offset);
3949 /* Not enough free resource, stop TX OS Q */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05303950 qdf_atomic_set(&vdev->os_q_paused, 1);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303951 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003952 return false;
3953 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303954 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003955 return true;
3956}
3957
3958/**
3959 * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
3960 * @vdev_id: vdev id
3961 * @pause_q_depth: pause queue depth
3962 *
3963 * Return: 0 for success or error code
3964 */
3965int
3966ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
3967{
3968 ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
3969 if (NULL == vdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05303970 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303971 "%s: Invalid vdev_id %d", __func__, vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003972 return -EINVAL;
3973 }
3974
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303975 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003976 vdev->ll_pause.max_q_depth = pause_q_depth;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303977 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003978
3979 return 0;
3980}
3981
3982/**
3983 * ol_txrx_flow_control_cb() - call osif flow control callback
3984 * @vdev: vdev handle
3985 * @tx_resume: tx resume flag
3986 *
3987 * Return: none
3988 */
3989inline void ol_txrx_flow_control_cb(ol_txrx_vdev_handle vdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303990 bool tx_resume)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003991{
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303992 qdf_spin_lock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003993 if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
3994 vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05303995 qdf_spin_unlock_bh(&vdev->flow_control_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003996
3997 return;
3998}
3999#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
4000
4001#ifdef IPA_OFFLOAD
Leo Chang8e073612015-11-13 10:55:34 -08004002/**
4003 * ol_txrx_ipa_uc_get_resource() - Client request resource information
4004 * @pdev: handle to the HTT instance
4005 * @ce_sr_base_paddr: copy engine source ring base physical address
4006 * @ce_sr_ring_size: copy engine source ring size
4007 * @ce_reg_paddr: copy engine register physical address
4008 * @tx_comp_ring_base_paddr: tx comp ring base physical address
4009 * @tx_comp_ring_size: tx comp ring size
4010 * @tx_num_alloc_buffer: number of allocated tx buffer
4011 * @rx_rdy_ring_base_paddr: rx ready ring base physical address
4012 * @rx_rdy_ring_size: rx ready ring size
4013 * @rx_proc_done_idx_paddr: rx process done index physical address
4014 * @rx_proc_done_idx_vaddr: rx process done index virtual address
4015 * @rx2_rdy_ring_base_paddr: rx done ring base physical address
4016 * @rx2_rdy_ring_size: rx done ring size
4017 * @rx2_proc_done_idx_paddr: rx done index physical address
4018 * @rx2_proc_done_idx_vaddr: rx done index virtual address
4019 *
4020 * OL client will reuqest IPA UC related resource information
4021 * Resource information will be distributted to IPA module
4022 * All of the required resources should be pre-allocated
4023 *
4024 * Return: none
4025 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004026void
4027ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004028 struct ol_txrx_ipa_resources *ipa_res)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004029{
4030 htt_ipa_uc_get_resource(pdev->htt_pdev,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004031 &ipa_res->ce_sr_base_paddr,
4032 &ipa_res->ce_sr_ring_size,
4033 &ipa_res->ce_reg_paddr,
4034 &ipa_res->tx_comp_ring_base_paddr,
4035 &ipa_res->tx_comp_ring_size,
4036 &ipa_res->tx_num_alloc_buffer,
4037 &ipa_res->rx_rdy_ring_base_paddr,
4038 &ipa_res->rx_rdy_ring_size,
4039 &ipa_res->rx_proc_done_idx_paddr,
4040 &ipa_res->rx_proc_done_idx_vaddr,
4041 &ipa_res->rx2_rdy_ring_base_paddr,
4042 &ipa_res->rx2_rdy_ring_size,
4043 &ipa_res->rx2_proc_done_idx_paddr,
4044 &ipa_res->rx2_proc_done_idx_vaddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004045}
4046
Leo Chang8e073612015-11-13 10:55:34 -08004047/**
4048 * ol_txrx_ipa_uc_set_doorbell_paddr() - Client set IPA UC doorbell register
4049 * @pdev: handle to the HTT instance
4050 * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
4051 * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
4052 *
4053 * IPA UC let know doorbell register physical address
4054 * WLAN firmware will use this physical address to notify IPA UC
4055 *
4056 * Return: none
4057 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004058void
4059ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05304060 qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
4061 qdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004062{
4063 htt_ipa_uc_set_doorbell_paddr(pdev->htt_pdev,
4064 ipa_tx_uc_doorbell_paddr,
4065 ipa_rx_uc_doorbell_paddr);
4066}
4067
Leo Chang8e073612015-11-13 10:55:34 -08004068/**
4069 * ol_txrx_ipa_uc_set_active() - Client notify IPA UC data path active or not
4070 * @pdev: handle to the HTT instance
4071 * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
4072 * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
4073 *
4074 * IPA UC let know doorbell register physical address
4075 * WLAN firmware will use this physical address to notify IPA UC
4076 *
4077 * Return: none
4078 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004079void
4080ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev, bool uc_active, bool is_tx)
4081{
4082 htt_h2t_ipa_uc_set_active(pdev->htt_pdev, uc_active, is_tx);
4083}
4084
4085/**
4086 * ol_txrx_ipa_uc_fw_op_event_handler() - opcode event handler
4087 * @context: pdev context
4088 * @rxpkt: received packet
4089 * @staid: peer id
4090 *
4091 * Return: None
4092 */
4093void ol_txrx_ipa_uc_fw_op_event_handler(void *context,
4094 void *rxpkt,
4095 uint16_t staid)
4096{
4097 ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)context;
4098
Anurag Chouhanc5548422016-02-24 18:33:27 +05304099 if (qdf_unlikely(!pdev)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304100 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004101 "%s: Invalid context", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304102 qdf_mem_free(rxpkt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004103 return;
4104 }
4105
Govind Singh66615292015-12-28 23:07:54 +05304106 if (pdev->ipa_uc_op_cb) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004107 pdev->ipa_uc_op_cb(rxpkt, pdev->osif_dev);
Govind Singh66615292015-12-28 23:07:54 +05304108 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304109 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004110 "%s: ipa_uc_op_cb NULL", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304111 qdf_mem_free(rxpkt);
Govind Singh66615292015-12-28 23:07:54 +05304112 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004113}
4114
Govind Singh66615292015-12-28 23:07:54 +05304115#ifdef QCA_CONFIG_SMP
Leo Chang8e073612015-11-13 10:55:34 -08004116/**
4117 * ol_txrx_ipa_uc_op_response() - Handle OP command response from firmware
4118 * @pdev: handle to the HTT instance
4119 * @op_msg: op response message from firmware
4120 *
4121 * Return: none
4122 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004123void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg)
4124{
4125 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
4126 struct cds_ol_rx_pkt *pkt;
4127
Anurag Chouhanc5548422016-02-24 18:33:27 +05304128 if (qdf_unlikely(!sched_ctx))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004129 return;
4130
4131 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Anurag Chouhanc5548422016-02-24 18:33:27 +05304132 if (qdf_unlikely(!pkt)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304133 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304134 "%s: Not able to allocate context", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004135 return;
4136 }
4137
4138 pkt->callback = (cds_ol_rx_thread_cb) ol_txrx_ipa_uc_fw_op_event_handler;
4139 pkt->context = pdev;
4140 pkt->Rxpkt = (void *)op_msg;
4141 pkt->staId = 0;
4142 cds_indicate_rxpkt(sched_ctx, pkt);
4143}
Govind Singh66615292015-12-28 23:07:54 +05304144#else
4145void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev,
4146 uint8_t *op_msg)
4147{
4148 if (pdev->ipa_uc_op_cb) {
4149 pdev->ipa_uc_op_cb(op_msg, pdev->osif_dev);
4150 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304151 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Govind Singh66615292015-12-28 23:07:54 +05304152 "%s: IPA callback function is not registered", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304153 qdf_mem_free(op_msg);
Govind Singh66615292015-12-28 23:07:54 +05304154 return;
4155 }
4156}
4157#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004158
Leo Chang8e073612015-11-13 10:55:34 -08004159/**
4160 * ol_txrx_ipa_uc_register_op_cb() - Register OP handler function
4161 * @pdev: handle to the HTT instance
4162 * @op_cb: handler function pointer
4163 * @osif_dev: register client context
4164 *
4165 * Return: none
4166 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004167void ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
4168 ipa_uc_op_cb_type op_cb, void *osif_dev)
4169{
4170 pdev->ipa_uc_op_cb = op_cb;
4171 pdev->osif_dev = osif_dev;
4172}
4173
Leo Chang8e073612015-11-13 10:55:34 -08004174/**
4175 * ol_txrx_ipa_uc_get_stat() - Get firmware wdi status
4176 * @pdev: handle to the HTT instance
4177 *
4178 * Return: none
4179 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004180void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev)
4181{
4182 htt_h2t_ipa_uc_get_stats(pdev->htt_pdev);
4183}
4184#endif /* IPA_UC_OFFLOAD */
4185
Nirav Shahda008342016-05-17 18:50:40 +05304186/**
4187 * ol_txrx_display_stats_help() - print statistics help
4188 *
4189 * Return: none
4190 */
4191static void ol_txrx_display_stats_help(void)
4192{
4193 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4194 "iwpriv wlan0 dumpStats [option] - dump statistics");
4195 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4196 "iwpriv wlan0 clearStats [option] - clear statistics");
4197 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4198 "options:");
4199 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4200 " 1 -- TXRX Layer statistics");
4201 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4202 " 2 -- Bandwidth compute timer stats");
4203 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4204 " 3 -- TSO statistics");
4205 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4206 " 4 -- Network queue statistics");
4207 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4208 " 5 -- Flow control statistics");
4209 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4210 " 6 -- Per Layer statistics");
4211 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4212 " 7 -- Copy engine interrupt statistics");
4213
4214}
4215
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004216void ol_txrx_display_stats(uint16_t value)
4217{
4218 ol_txrx_pdev_handle pdev;
4219
Anurag Chouhan6d760662016-02-20 16:05:43 +05304220 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004221 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304222 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304223 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004224 return;
4225 }
4226
4227 switch (value) {
4228 case WLAN_TXRX_STATS:
4229 ol_txrx_stats_display(pdev);
4230 break;
4231 case WLAN_TXRX_TSO_STATS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004232 ol_txrx_stats_display_tso(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004233 break;
4234 case WLAN_DUMP_TX_FLOW_POOL_INFO:
4235 ol_tx_dump_flow_pool_info();
4236 break;
4237 case WLAN_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304238 qdf_nbuf_tx_desc_count_display();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004239 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304240#ifdef CONFIG_HL_SUPPORT
4241 case WLAN_SCHEDULER_STATS:
4242 ol_tx_sched_cur_state_display(pdev);
4243 ol_tx_sched_stats_display(pdev);
4244 break;
4245 case WLAN_TX_QUEUE_STATS:
4246 ol_tx_queue_log_display(pdev);
4247 break;
4248#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
4249 case WLAN_CREDIT_STATS:
4250 ol_tx_dump_group_credit_stats(pdev);
4251 break;
4252#endif
4253
4254#ifdef DEBUG_HL_LOGGING
4255 case WLAN_BUNDLE_STATS:
4256 htt_dump_bundle_stats(pdev->htt_pdev);
4257 break;
4258#endif
4259#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004260 default:
Nirav Shahda008342016-05-17 18:50:40 +05304261 ol_txrx_display_stats_help();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004262 break;
4263 }
4264}
4265
4266void ol_txrx_clear_stats(uint16_t value)
4267{
4268 ol_txrx_pdev_handle pdev;
4269
Anurag Chouhan6d760662016-02-20 16:05:43 +05304270 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004271 if (!pdev) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304272 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304273 "%s: pdev is NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004274 return;
4275 }
4276
4277 switch (value) {
4278 case WLAN_TXRX_STATS:
4279 ol_txrx_stats_clear(pdev);
4280 break;
4281 case WLAN_DUMP_TX_FLOW_POOL_INFO:
4282 ol_tx_clear_flow_pool_stats();
4283 break;
4284 case WLAN_TXRX_DESC_STATS:
Nirav Shahcbc6d722016-03-01 16:24:53 +05304285 qdf_nbuf_tx_desc_count_clear();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004286 break;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304287#ifdef CONFIG_HL_SUPPORT
4288 case WLAN_SCHEDULER_STATS:
4289 ol_tx_sched_stats_clear(pdev);
4290 break;
4291 case WLAN_TX_QUEUE_STATS:
4292 ol_tx_queue_log_clear(pdev);
4293 break;
4294#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
4295 case WLAN_CREDIT_STATS:
4296 ol_tx_clear_group_credit_stats(pdev);
4297 break;
4298#endif
4299 case WLAN_BUNDLE_STATS:
4300 htt_clear_bundle_stats(pdev->htt_pdev);
4301 break;
4302#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004303 default:
Nirav Shahda008342016-05-17 18:50:40 +05304304 ol_txrx_display_stats_help();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004305 break;
4306 }
4307}
4308
4309/**
4310 * ol_rx_data_cb() - data rx callback
4311 * @peer: peer
4312 * @buf_list: buffer list
Nirav Shah36a87bf2016-02-22 12:38:46 +05304313 * @staid: Station id
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004314 *
4315 * Return: None
4316 */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304317static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
4318 qdf_nbuf_t buf_list, uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004319{
Nirav Shah36a87bf2016-02-22 12:38:46 +05304320 void *cds_ctx = cds_get_global_context();
Mohit Khanna0696eef2016-04-14 16:14:08 -07004321 void *osif_dev;
Nirav Shahcbc6d722016-03-01 16:24:53 +05304322 qdf_nbuf_t buf, next_buf;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304323 QDF_STATUS ret;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004324 ol_txrx_rx_fp data_rx = NULL;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304325 struct ol_txrx_peer_t *peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004326
Nirav Shah36a87bf2016-02-22 12:38:46 +05304327 if (qdf_unlikely(!cds_ctx) || qdf_unlikely(!pdev))
4328 goto free_buf;
4329
4330 /* Do not use peer directly. Derive peer from staid to
4331 * make sure that peer is valid.
4332 */
4333 peer = ol_txrx_peer_find_by_local_id(pdev, staid);
4334 if (!peer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004335 goto free_buf;
4336
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304337 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atre50141c52016-04-07 13:15:29 -07004338 if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
4339 !peer->vdev->rx)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304340 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004341 goto free_buf;
4342 }
Dhanashri Atre182b0272016-02-17 15:35:07 -08004343
4344 data_rx = peer->vdev->rx;
Mohit Khanna0696eef2016-04-14 16:14:08 -07004345 osif_dev = peer->vdev->osif_dev;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304346 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004347
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304348 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004349 if (!list_empty(&peer->cached_bufq)) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304350 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004351 /* Flush the cached frames to HDD before passing new rx frame */
4352 ol_txrx_flush_rx_frames(peer, 0);
4353 } else
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304354 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004355
4356 buf = buf_list;
4357 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304358 next_buf = qdf_nbuf_queue_next(buf);
4359 qdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
Mohit Khanna0696eef2016-04-14 16:14:08 -07004360 ret = data_rx(osif_dev, buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304361 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004362 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Frame Rx to HDD failed");
Nirav Shah6a4eee62016-04-25 10:15:04 +05304363 if (pdev)
4364 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304365 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004366 }
4367 buf = next_buf;
4368 }
4369 return;
4370
4371free_buf:
4372 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%s:Dropping frames", __func__);
4373 buf = buf_list;
4374 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304375 next_buf = qdf_nbuf_queue_next(buf);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304376 if (pdev)
4377 TXRX_STATS_MSDU_INCR(pdev,
4378 rx.dropped_peer_invalid, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304379 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004380 buf = next_buf;
4381 }
4382}
4383
4384/**
4385 * ol_rx_data_process() - process rx frame
4386 * @peer: peer
4387 * @rx_buf_list: rx buffer list
4388 *
4389 * Return: None
4390 */
4391void ol_rx_data_process(struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05304392 qdf_nbuf_t rx_buf_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004393{
4394 /* Firmware data path active response will use shim RX thread
4395 * T2H MSG running on SIRQ context,
4396 * IPA kernel module API should not be called on SIRQ CTXT */
Nirav Shahcbc6d722016-03-01 16:24:53 +05304397 qdf_nbuf_t buf, next_buf;
Dhanashri Atre182b0272016-02-17 15:35:07 -08004398 ol_txrx_rx_fp data_rx = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304399 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004400
4401 if ((!peer) || (!pdev)) {
4402 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "peer/pdev is NULL");
4403 goto drop_rx_buf;
4404 }
4405
Dhanashri Atre182b0272016-02-17 15:35:07 -08004406 qdf_assert(peer->vdev);
4407
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304408 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004409 if (peer->state >= OL_TXRX_PEER_STATE_CONN)
Dhanashri Atre182b0272016-02-17 15:35:07 -08004410 data_rx = peer->vdev->rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304411 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004412
4413 /*
4414 * If there is a data frame from peer before the peer is
4415 * registered for data service, enqueue them on to pending queue
4416 * which will be flushed to HDD once that station is registered.
4417 */
4418 if (!data_rx) {
4419 struct ol_rx_cached_buf *cache_buf;
Manjunathappa Prakash92db7712016-05-27 00:19:34 -07004420
4421 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
4422 "Data on the peer before it is registered!!!");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004423 buf = rx_buf_list;
4424 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304425 next_buf = qdf_nbuf_queue_next(buf);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05304426 cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004427 if (!cache_buf) {
4428 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
4429 "Failed to allocate buf to cache the rx frames");
Nirav Shahcbc6d722016-03-01 16:24:53 +05304430 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004431 } else {
4432 /* Add NULL terminator */
Nirav Shahcbc6d722016-03-01 16:24:53 +05304433 qdf_nbuf_set_next(buf, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004434 cache_buf->buf = buf;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304435 qdf_spin_lock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004436 list_add_tail(&cache_buf->list,
4437 &peer->cached_bufq);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304438 qdf_spin_unlock_bh(&peer->bufq_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004439 }
4440 buf = next_buf;
4441 }
4442 } else {
4443#ifdef QCA_CONFIG_SMP
4444 /*
4445 * If the kernel is SMP, schedule rx thread to
4446 * better use multicores.
4447 */
4448 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
Nirav Shah36a87bf2016-02-22 12:38:46 +05304449 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004450 } else {
4451 p_cds_sched_context sched_ctx =
4452 get_cds_sched_ctxt();
4453 struct cds_ol_rx_pkt *pkt;
4454
4455 if (unlikely(!sched_ctx))
4456 goto drop_rx_buf;
4457
4458 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
4459 if (!pkt) {
4460 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304461 "No available Rx message buffer");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004462 goto drop_rx_buf;
4463 }
4464 pkt->callback = (cds_ol_rx_thread_cb)
4465 ol_rx_data_cb;
Nirav Shah36a87bf2016-02-22 12:38:46 +05304466 pkt->context = (void *)pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004467 pkt->Rxpkt = (void *)rx_buf_list;
4468 pkt->staId = peer->local_id;
4469 cds_indicate_rxpkt(sched_ctx, pkt);
4470 }
4471#else /* QCA_CONFIG_SMP */
Nirav Shah36a87bf2016-02-22 12:38:46 +05304472 ol_rx_data_cb(pdev, rx_buf_list, peer->local_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004473#endif /* QCA_CONFIG_SMP */
4474 }
4475
4476 return;
4477
4478drop_rx_buf:
4479 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Dropping rx packets");
4480 buf = rx_buf_list;
4481 while (buf) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05304482 next_buf = qdf_nbuf_queue_next(buf);
Nirav Shah6a4eee62016-04-25 10:15:04 +05304483 if (pdev)
4484 TXRX_STATS_MSDU_INCR(pdev,
4485 rx.dropped_peer_invalid, buf);
Nirav Shahcbc6d722016-03-01 16:24:53 +05304486 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004487 buf = next_buf;
4488 }
4489}
4490
4491/**
4492 * ol_txrx_register_peer() - register peer
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004493 * @sta_desc: sta descriptor
4494 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304495 * Return: QDF Status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004496 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08004497QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004498{
4499 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +05304500 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004501 union ol_txrx_peer_update_param_t param;
4502 struct privacy_exemption privacy_filter;
4503
4504 if (!pdev) {
4505 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Pdev is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304506 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004507 }
4508
4509 if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
4510 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid sta id :%d",
4511 sta_desc->sta_id);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304512 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004513 }
4514
4515 peer = ol_txrx_peer_find_by_local_id(pdev, sta_desc->sta_id);
4516 if (!peer)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304517 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004518
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304519 qdf_spin_lock_bh(&peer->peer_info_lock);
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004520 peer->state = OL_TXRX_PEER_STATE_CONN;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05304521 qdf_spin_unlock_bh(&peer->peer_info_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004522
4523 param.qos_capable = sta_desc->is_qos_enabled;
4524 ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
4525 ol_txrx_peer_update_qos_capable);
4526
4527 if (sta_desc->is_wapi_supported) {
4528 /*Privacy filter to accept unencrypted WAI frames */
4529 privacy_filter.ether_type = ETHERTYPE_WAI;
4530 privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
4531 privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
4532 ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
4533 }
4534
4535 ol_txrx_flush_rx_frames(peer, 0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304536 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004537}
4538
4539/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004540 * ol_txrx_register_ocb_peer - Function to register the OCB peer
4541 * @cds_ctx: Pointer to the global OS context
4542 * @mac_addr: MAC address of the self peer
4543 * @peer_id: Pointer to the peer ID
4544 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304545 * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004546 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304547QDF_STATUS ol_txrx_register_ocb_peer(void *cds_ctx, uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004548 uint8_t *peer_id)
4549{
4550 ol_txrx_pdev_handle pdev;
4551 ol_txrx_peer_handle peer;
4552
4553 if (!cds_ctx) {
4554 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Invalid context",
4555 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304556 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004557 }
4558
Anurag Chouhan6d760662016-02-20 16:05:43 +05304559 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004560 if (!pdev) {
4561 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find pdev!",
4562 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304563 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004564 }
4565
4566 peer = ol_txrx_find_peer_by_addr(pdev, mac_addr, peer_id);
4567 if (!peer) {
4568 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find OCB peer!",
4569 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304570 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004571 }
4572
4573 ol_txrx_set_ocb_peer(pdev, peer);
4574
4575 /* Set peer state to connected */
4576 ol_txrx_peer_state_update(pdev, peer->mac_addr.raw,
Dhanashri Atreb08959a2016-03-01 17:28:03 -08004577 OL_TXRX_PEER_STATE_AUTH);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004578
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304579 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004580}
4581
4582/**
4583 * ol_txrx_set_ocb_peer - Function to store the OCB peer
4584 * @pdev: Handle to the HTT instance
4585 * @peer: Pointer to the peer
4586 */
4587void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
4588 struct ol_txrx_peer_t *peer)
4589{
4590 if (pdev == NULL)
4591 return;
4592
4593 pdev->ocb_peer = peer;
4594 pdev->ocb_peer_valid = (NULL != peer);
4595}
4596
4597/**
4598 * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
4599 * @pdev: Handle to the HTT instance
4600 * @peer: Pointer to the returned peer
4601 *
4602 * Return: true if the peer is valid, false if not
4603 */
4604bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
4605 struct ol_txrx_peer_t **peer)
4606{
4607 int rc;
4608
4609 if ((pdev == NULL) || (peer == NULL)) {
4610 rc = false;
4611 goto exit;
4612 }
4613
4614 if (pdev->ocb_peer_valid) {
4615 *peer = pdev->ocb_peer;
4616 rc = true;
4617 } else {
4618 rc = false;
4619 }
4620
4621exit:
4622 return rc;
4623}
4624
4625#ifdef QCA_LL_TX_FLOW_CONTROL_V2
4626/**
4627 * ol_txrx_register_pause_cb() - register pause callback
4628 * @pause_cb: pause callback
4629 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05304630 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004631 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304632QDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004633{
Anurag Chouhan6d760662016-02-20 16:05:43 +05304634 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004635 if (!pdev || !pause_cb) {
4636 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "pdev or pause_cb is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304637 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004638 }
4639 pdev->pause_cb = pause_cb;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05304640 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004641}
4642#endif
4643
4644#if defined(FEATURE_LRO)
Dhanashri Atre8d978172015-10-30 15:12:03 -07004645/**
4646 * ol_txrx_lro_flush_handler() - LRO flush handler
4647 * @context: dev handle
4648 * @rxpkt: rx data
4649 * @staid: station id
4650 *
4651 * This function handles an LRO flush indication.
4652 * If the rx thread is enabled, it will be invoked by the rx
4653 * thread else it will be called in the tasklet context
4654 *
4655 * Return: none
4656 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004657void ol_txrx_lro_flush_handler(void *context,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304658 void *rxpkt,
4659 uint16_t staid)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004660{
4661 ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)context;
4662
Anurag Chouhanc5548422016-02-24 18:33:27 +05304663 if (qdf_unlikely(!pdev)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304664 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304665 "%s: Invalid context", __func__);
Anurag Chouhanc5548422016-02-24 18:33:27 +05304666 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004667 return;
4668 }
4669
4670 if (pdev->lro_info.lro_flush_cb)
4671 pdev->lro_info.lro_flush_cb(pdev->lro_info.lro_data);
4672 else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304673 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304674 "%s: lro_flush_cb NULL", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004675}
4676
Dhanashri Atre8d978172015-10-30 15:12:03 -07004677/**
4678 * ol_txrx_lro_flush() - LRO flush callback
4679 * @data: opaque data pointer
4680 *
4681 * This is the callback registered with CE to trigger
4682 * an LRO flush
4683 *
4684 * Return: none
4685 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004686void ol_txrx_lro_flush(void *data)
4687{
4688 p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
4689 struct cds_ol_rx_pkt *pkt;
4690 ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)data;
4691
Anurag Chouhanc5548422016-02-24 18:33:27 +05304692 if (qdf_unlikely(!sched_ctx))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004693 return;
4694
4695 if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
4696 ol_txrx_lro_flush_handler((void *)pdev, NULL, 0);
4697 } else {
4698 pkt = cds_alloc_ol_rx_pkt(sched_ctx);
Anurag Chouhanc5548422016-02-24 18:33:27 +05304699 if (qdf_unlikely(!pkt)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05304700 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05304701 "%s: Not able to allocate context", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004702 return;
4703 }
4704
Dhanashri Atre8d978172015-10-30 15:12:03 -07004705 pkt->callback =
4706 (cds_ol_rx_thread_cb) ol_txrx_lro_flush_handler;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004707 pkt->context = pdev;
4708 pkt->Rxpkt = NULL;
4709 pkt->staId = 0;
4710 cds_indicate_rxpkt(sched_ctx, pkt);
4711 }
4712}
4713
Dhanashri Atre8d978172015-10-30 15:12:03 -07004714/**
4715 * ol_register_lro_flush_cb() - register the LRO flush callback
4716 * @handler: callback function
4717 * @data: opaque data pointer to be passed back
4718 *
4719 * Store the LRO flush callback provided and in turn
4720 * register OL's LRO flush handler with CE
4721 *
4722 * Return: none
4723 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004724void ol_register_lro_flush_cb(void (handler)(void *), void *data)
4725{
Komal Seelam3d202862016-02-24 18:43:24 +05304726 struct hif_opaque_softc *hif_device =
Anurag Chouhan6d760662016-02-20 16:05:43 +05304727 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
4728 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004729
Manjunathappa Prakashef45aba2016-04-29 11:09:15 -07004730 if (pdev != NULL) {
4731 pdev->lro_info.lro_flush_cb = handler;
4732 pdev->lro_info.lro_data = data;
4733 } else
4734 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev NULL!", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004735
Komal Seelam7fde14c2016-02-02 13:05:57 +05304736 hif_lro_flush_cb_register(hif_device, ol_txrx_lro_flush, pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004737}
Dhanashri Atre8d978172015-10-30 15:12:03 -07004738
4739/**
4740 * ol_deregister_lro_flush_cb() - deregister the LRO flush
4741 * callback
4742 *
4743 * Remove the LRO flush callback provided and in turn
4744 * deregister OL's LRO flush handler with CE
4745 *
4746 * Return: none
4747 */
4748void ol_deregister_lro_flush_cb(void)
4749{
Komal Seelam3d202862016-02-24 18:43:24 +05304750 struct hif_opaque_softc *hif_device =
Anurag Chouhan6d760662016-02-20 16:05:43 +05304751 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
4752 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Dhanashri Atre8d978172015-10-30 15:12:03 -07004753
Komal Seelam7fde14c2016-02-02 13:05:57 +05304754 hif_lro_flush_cb_deregister(hif_device);
Dhanashri Atre8d978172015-10-30 15:12:03 -07004755
Manjunathappa Prakashef45aba2016-04-29 11:09:15 -07004756 if (pdev != NULL) {
4757 pdev->lro_info.lro_flush_cb = NULL;
4758 pdev->lro_info.lro_data = NULL;
4759 } else
4760 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev NULL!", __func__);
Dhanashri Atre8d978172015-10-30 15:12:03 -07004761}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004762#endif /* FEATURE_LRO */
Dhanashri Atre12a08392016-02-17 13:10:34 -08004763
4764/**
4765 * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
4766 * @vdev_id: vdev_id
4767 *
4768 * Return: vdev handle
4769 * NULL if not found.
4770 */
4771ol_txrx_vdev_handle ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
4772{
4773 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
4774 ol_txrx_vdev_handle vdev = NULL;
4775
4776 if (qdf_unlikely(!pdev))
4777 return NULL;
4778
4779 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4780 if (vdev->vdev_id == vdev_id)
4781 break;
4782 }
4783
4784 return vdev;
4785}
Nirav Shah2e583a02016-04-30 14:06:12 +05304786
4787/**
4788 * ol_txrx_set_wisa_mode() - set wisa mode
4789 * @vdev: vdev handle
4790 * @enable: enable flag
4791 *
4792 * Return: QDF STATUS
4793 */
4794QDF_STATUS ol_txrx_set_wisa_mode(ol_txrx_vdev_handle vdev, bool enable)
4795{
4796 if (!vdev)
4797 return QDF_STATUS_E_INVAL;
4798
4799 vdev->is_wisa_mode_enable = enable;
4800 return QDF_STATUS_SUCCESS;
4801}