blob: 30586bcd13b7d260fb3a5861e1874a611f891763 [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Vivek Natarajan95f004f2019-01-10 22:15:46 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Dhanashri Atre14049172016-11-11 18:32:36 -080021#include <qdf_net_types.h>
Dhanashri Atre0da31222017-03-23 12:30:58 -070022#include <qdf_lro.h>
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +053023#include <qdf_module.h>
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053024#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070025#include <hal_api.h>
26#include <hif.h>
27#include <htt.h>
28#include <wdi_event.h>
29#include <queue.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070030#include "dp_types.h"
31#include "dp_internal.h"
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +053032#include "dp_tx.h"
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070033#include "dp_tx_desc.h"
Leo Chang5ea93a42016-11-03 12:39:49 -070034#include "dp_rx.h"
Kai Chen52ef33f2019-03-05 18:33:40 -080035#include "dp_rx_mon.h"
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +053036#ifdef DP_RATETABLE_SUPPORT
37#include "dp_ratetable.h"
38#endif
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080039#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080040#include <wlan_cfg.h>
Ishank Jainbc2d91f2017-01-03 18:14:54 +053041#include "cdp_txrx_cmn_struct.h"
Prathyusha Guduri184b6402018-02-04 23:01:49 +053042#include "cdp_txrx_stats_struct.h"
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -070043#include "cdp_txrx_cmn_reg.h"
Dhanashri Atre14049172016-11-11 18:32:36 -080044#include <qdf_util.h>
Ishank Jain1e7401c2017-02-17 15:38:39 +053045#include "dp_peer.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080046#include "dp_rx_mon.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053047#include "htt_stats.h"
nobeljdebe2b32019-04-23 11:18:47 -070048#include "htt_ppdu_stats.h"
49#include "dp_htt.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070050#include "qdf_mem.h" /* qdf_mem_malloc,free */
Vivek126db5d2018-07-25 22:05:04 +053051#include "cfg_ucfg_api.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070052#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070053#include "cdp_txrx_flow_ctrl_v2.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070054#else
55static inline void
56cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
57{
58 return;
59}
60#endif
Yun Parkfde6b9e2017-06-26 17:13:11 -070061#include "dp_ipa.h"
Ruchi, Agrawal234753c2018-06-28 14:53:37 +053062#include "dp_cal_client_api.h"
Amir Patelcb990262019-05-28 15:12:48 +053063#ifdef FEATURE_WDS
64#include "dp_txrx_wds.h"
65#endif
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070066#ifdef CONFIG_MCL
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -070067extern int con_mode_monitor;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070068#ifndef REMOVE_PKT_LOG
69#include <pktlog_ac_api.h>
70#include <pktlog_ac.h>
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070071#endif
72#endif
Kai Chen52ef33f2019-03-05 18:33:40 -080073
74#ifdef WLAN_RX_PKT_CAPTURE_ENH
75#include "dp_rx_mon_feature.h"
76#else
77/*
78 * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
79 * @pdev_handle: DP_PDEV handle
80 * @val: user provided value
81 *
82 * Return: QDF_STATUS
83 */
84static QDF_STATUS
85dp_config_enh_rx_capture(struct cdp_pdev *pdev_handle, int val)
86{
87 return QDF_STATUS_E_INVAL;
88}
89#endif
90
nobeljdebe2b32019-04-23 11:18:47 -070091#ifdef WLAN_TX_PKT_CAPTURE_ENH
92#include "dp_tx_capture.h"
93#else
94/*
95 * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
96 * @pdev_handle: DP_PDEV handle
97 * @val: user provided value
98 *
99 * Return: QDF_STATUS
100 */
101static QDF_STATUS
102dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, int val)
103{
104 return QDF_STATUS_E_INVAL;
105}
106#endif
107
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530108void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
109static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
110static struct dp_soc *
111dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
112 struct ol_if_ops *ol_ops, uint16_t device_id);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -0700113static void dp_pktlogmod_exit(struct dp_pdev *handle);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530114static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +0530115 uint8_t *peer_mac_addr,
116 struct cdp_ctrl_objmgr_peer *ctrl_peer);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530117static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530118static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
119static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
Krunal Soni03ba0f52019-02-12 11:44:46 -0800120#ifdef ENABLE_VERBOSE_DEBUG
121bool is_dp_verbose_debug_enabled;
122#endif
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -0700123
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +0530124static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
125 enum hal_ring_type ring_type,
126 int ring_num);
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700127#define DP_INTR_POLL_TIMER_MS 10
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530128/* Generic AST entry aging timer value */
129#define DP_AST_AGING_TIMER_DEFAULT_MS 1000
Ishank Jainbc2d91f2017-01-03 18:14:54 +0530130#define DP_MCS_LENGTH (6*MAX_MCS)
Venkata Sharath Chandra Manchala69a0ed32018-12-12 14:22:11 -0800131
Ishank Jain6290a3c2017-03-21 10:49:39 +0530132#define DP_CURR_FW_STATS_AVAIL 19
133#define DP_HTT_DBG_EXT_STATS_MAX 256
Prathyusha Guduri43bb0562018-02-12 18:30:54 +0530134#define DP_MAX_SLEEP_TIME 100
Krunal Sonid9dea642018-12-18 00:25:03 -0800135#ifndef QCA_WIFI_3_0_EMU
136#define SUSPEND_DRAIN_WAIT 500
137#else
138#define SUSPEND_DRAIN_WAIT 3000
139#endif
Ishank Jain949674c2017-02-27 17:09:29 +0530140
Yun Parkfde6b9e2017-06-26 17:13:11 -0700141#ifdef IPA_OFFLOAD
142/* Exclude IPA rings from the interrupt context */
Yun Park601d0d82017-08-28 21:49:31 -0700143#define TX_RING_MASK_VAL 0xb
Yun Parkfde6b9e2017-06-26 17:13:11 -0700144#define RX_RING_MASK_VAL 0x7
145#else
146#define TX_RING_MASK_VAL 0xF
147#define RX_RING_MASK_VAL 0xF
148#endif
Venkateswara Swamy Bandarued15e74a2017-08-18 19:13:10 +0530149
sumedh baikady72b1c712017-08-24 12:11:46 -0700150#define STR_MAXLEN 64
Soumya Bhat89647ef2017-11-16 17:23:48 +0530151
Vivek126db5d2018-07-25 22:05:04 +0530152#define RNG_ERR "SRNG setup failed for"
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +0530153
154/* Threshold for peer's cached buf queue beyond which frames are dropped */
155#define DP_RX_CACHED_BUFQ_THRESH 64
156
Ishank Jain949674c2017-02-27 17:09:29 +0530157/**
158 * default_dscp_tid_map - Default DSCP-TID mapping
159 *
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530160 * DSCP TID
161 * 000000 0
162 * 001000 1
163 * 010000 2
164 * 011000 3
165 * 100000 4
166 * 101000 5
167 * 110000 6
168 * 111000 7
Ishank Jain949674c2017-02-27 17:09:29 +0530169 */
170static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
171 0, 0, 0, 0, 0, 0, 0, 0,
172 1, 1, 1, 1, 1, 1, 1, 1,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530173 2, 2, 2, 2, 2, 2, 2, 2,
174 3, 3, 3, 3, 3, 3, 3, 3,
175 4, 4, 4, 4, 4, 4, 4, 4,
Ishank Jain949674c2017-02-27 17:09:29 +0530176 5, 5, 5, 5, 5, 5, 5, 5,
177 6, 6, 6, 6, 6, 6, 6, 6,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530178 7, 7, 7, 7, 7, 7, 7, 7,
Ishank Jain949674c2017-02-27 17:09:29 +0530179};
180
Debasis Dasc39a68d2019-01-28 17:02:06 +0530181/**
182 * default_pcp_tid_map - Default PCP-TID mapping
183 *
184 * PCP TID
185 * 000 0
186 * 001 1
187 * 010 2
188 * 011 3
189 * 100 4
190 * 101 5
191 * 110 6
192 * 111 7
193 */
194static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
195 0, 1, 2, 3, 4, 5, 6, 7,
196};
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530197
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700198/**
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530199 * @brief Cpu to tx ring map
200 */
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530201#ifdef CONFIG_WIN
nobeljdebe2b32019-04-23 11:18:47 -0700202#ifdef WLAN_TX_PKT_CAPTURE_ENH
203uint8_t
204dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
205 {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
206 {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
207 {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
208 {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
209 {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
210 {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
211};
212#else
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530213static uint8_t
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530214dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530215 {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
216 {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
217 {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530218 {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
219 {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530220};
nobeljdebe2b32019-04-23 11:18:47 -0700221#endif
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530222#else
223static uint8_t
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530224dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530225 {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
226 {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
227 {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530228 {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
229 {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530230};
231#endif
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530232
233/**
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800234 * @brief Select the type of statistics
235 */
236enum dp_stats_type {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530237 STATS_FW = 0,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800238 STATS_HOST = 1,
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530239 STATS_TYPE_MAX = 2,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800240};
241
242/**
243 * @brief General Firmware statistics options
244 *
245 */
246enum dp_fw_stats {
247 TXRX_FW_STATS_INVALID = -1,
248};
249
250/**
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530251 * dp_stats_mapping_table - Firmware and Host statistics
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800252 * currently supported
253 */
254const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
Ishank Jain6290a3c2017-03-21 10:49:39 +0530255 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
256 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
257 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
258 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
259 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
260 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
261 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
262 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
263 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
264 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
265 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800266 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530267 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
268 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
269 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
270 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
271 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
272 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
273 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
274 /* Last ENUM for HTT FW STATS */
275 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800276 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530277 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
278 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
279 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800280 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530281 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
sumedh baikady72b1c712017-08-24 12:11:46 -0700282 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
Kai Chen783e0382018-01-25 16:29:08 -0800283 {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -0700284 {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
Venkata Sharath Chandra Manchalaf167af12018-10-09 20:23:02 -0700285 {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
286 {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
Mohit Khannae5a6e942018-11-28 14:22:48 -0800287 {TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800288};
289
Mohit Khannadba82f22018-07-12 10:59:17 -0700290/* MCL specific functions */
291#ifdef CONFIG_MCL
292/**
293 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
294 * @soc: pointer to dp_soc handle
295 * @intr_ctx_num: interrupt context number for which mon mask is needed
296 *
297 * For MCL, monitor mode rings are being processed in timer contexts (polled).
298 * This function is returning 0, since in interrupt mode(softirq based RX),
299 * we donot want to process monitor mode rings in a softirq.
300 *
301 * So, in case packet log is enabled for SAP/STA/P2P modes,
302 * regular interrupt processing will not process monitor mode rings. It would be
303 * done in a separate timer context.
304 *
305 * Return: 0
306 */
307static inline
308uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
309{
310 return 0;
311}
312
313/*
314 * dp_service_mon_rings()- timer to reap monitor rings
315 * reqd as we are not getting ppdu end interrupts
316 * @arg: SoC Handle
317 *
318 * Return:
319 *
320 */
321static void dp_service_mon_rings(void *arg)
322{
323 struct dp_soc *soc = (struct dp_soc *)arg;
324 int ring = 0, work_done, mac_id;
325 struct dp_pdev *pdev = NULL;
326
327 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
328 pdev = soc->pdev_list[ring];
329 if (!pdev)
330 continue;
331 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
332 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
333 pdev->pdev_id);
334 work_done = dp_mon_process(soc, mac_for_pdev,
335 QCA_NAPI_BUDGET);
336
337 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
338 FL("Reaped %d descs from Monitor rings"),
339 work_done);
340 }
341 }
342
343 qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
344}
345
346#ifndef REMOVE_PKT_LOG
347/**
348 * dp_pkt_log_init() - API to initialize packet log
349 * @ppdev: physical device handle
350 * @scn: HIF context
351 *
352 * Return: none
353 */
354void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
355{
356 struct dp_pdev *handle = (struct dp_pdev *)ppdev;
357
358 if (handle->pkt_log_init) {
359 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
360 "%s: Packet log not initialized", __func__);
361 return;
362 }
363
364 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800365 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Mohit Khannadba82f22018-07-12 10:59:17 -0700366
367 if (pktlogmod_init(scn)) {
368 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
369 "%s: pktlogmod_init failed", __func__);
370 handle->pkt_log_init = false;
371 } else {
372 handle->pkt_log_init = true;
373 }
374}
375
376/**
377 * dp_pkt_log_con_service() - connect packet log service
378 * @ppdev: physical device handle
379 * @scn: device context
380 *
381 * Return: none
382 */
383static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
384{
385 struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
386
387 dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
388 pktlog_htc_attach();
389}
390
391/**
Mohit Khanna16816ae2018-10-30 14:12:03 -0700392 * dp_get_num_rx_contexts() - get number of RX contexts
393 * @soc_hdl: cdp opaque soc handle
394 *
395 * Return: number of RX contexts
396 */
397static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
398{
399 int i;
400 int num_rx_contexts = 0;
401
402 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
403
404 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
405 if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
406 num_rx_contexts++;
407
408 return num_rx_contexts;
409}
410
411/**
Mohit Khannadba82f22018-07-12 10:59:17 -0700412 * dp_pktlogmod_exit() - API to cleanup pktlog info
413 * @handle: Pdev handle
414 *
415 * Return: none
416 */
417static void dp_pktlogmod_exit(struct dp_pdev *handle)
418{
419 void *scn = (void *)handle->soc->hif_handle;
420
421 if (!scn) {
422 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
423 "%s: Invalid hif(scn) handle", __func__);
424 return;
425 }
426
427 pktlogmod_exit(scn);
428 handle->pkt_log_init = false;
429}
430#endif
431#else
432static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
433
434/**
435 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
436 * @soc: pointer to dp_soc handle
437 * @intr_ctx_num: interrupt context number for which mon mask is needed
438 *
439 * Return: mon mask value
440 */
441static inline
442uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
443{
444 return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
445}
446#endif
447
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700448/**
449 * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
450 * @cdp_opaque_vdev: pointer to cdp_vdev
451 *
452 * Return: pointer to dp_vdev
453 */
454static
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530455struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700456{
457 return (struct dp_vdev *)cdp_opaque_vdev;
458}
459
460
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530461static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
462 struct cdp_peer *peer_hdl,
463 uint8_t *mac_addr,
464 enum cdp_txrx_ast_entry_type type,
465 uint32_t flags)
466{
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530467 return dp_peer_add_ast((struct dp_soc *)soc_hdl,
468 (struct dp_peer *)peer_hdl,
469 mac_addr,
470 type,
471 flags);
472}
473
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530474static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
475 struct cdp_peer *peer_hdl,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530476 uint8_t *wds_macaddr,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530477 uint32_t flags)
478{
phadiman0381f562018-06-29 15:40:52 +0530479 int status = -1;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530480 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530481 struct dp_ast_entry *ast_entry = NULL;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530482 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530483
484 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530485 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
486 peer->vdev->pdev->pdev_id);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530487
phadiman0381f562018-06-29 15:40:52 +0530488 if (ast_entry) {
489 status = dp_peer_update_ast(soc,
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530490 peer,
491 ast_entry, flags);
phadiman0381f562018-06-29 15:40:52 +0530492 }
493
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530494 qdf_spin_unlock_bh(&soc->ast_lock);
495
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530496 return status;
497}
498
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530499/*
500 * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530501 * @soc_handle: Datapath SOC handle
502 * @wds_macaddr: WDS entry MAC Address
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530503 * Return: None
504 */
505static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530506 uint8_t *wds_macaddr,
507 uint8_t *peer_mac_addr,
508 void *vdev_handle)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530509{
510 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
511 struct dp_ast_entry *ast_entry = NULL;
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530512 struct dp_ast_entry *tmp_ast_entry;
513 struct dp_peer *peer;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530514 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530515 struct dp_pdev *pdev;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530516
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530517 if (!vdev)
518 return;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530519
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530520 pdev = vdev->pdev;
521
522 if (peer_mac_addr) {
523 peer = dp_peer_find_hash_find(soc, peer_mac_addr,
524 0, vdev->vdev_id);
525 if (!peer)
526 return;
527 qdf_spin_lock_bh(&soc->ast_lock);
528 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
529 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
530 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
531 dp_peer_del_ast(soc, ast_entry);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530532 }
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530533 qdf_spin_unlock_bh(&soc->ast_lock);
534 dp_peer_unref_delete(peer);
phadiman0381f562018-06-29 15:40:52 +0530535
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530536 } else if (wds_macaddr) {
537 qdf_spin_lock_bh(&soc->ast_lock);
538 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
539 pdev->pdev_id);
540
541 if (ast_entry) {
542 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
543 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
544 dp_peer_del_ast(soc, ast_entry);
545 }
546 qdf_spin_unlock_bh(&soc->ast_lock);
547 }
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530548}
549
550/*
551 * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530552 * @soc: Datapath SOC handle
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530553 *
554 * Return: None
555 */
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530556static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
Santosh Anbu76693bc2018-04-23 16:38:54 +0530557 void *vdev_hdl)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530558{
559 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
560 struct dp_pdev *pdev;
561 struct dp_vdev *vdev;
562 struct dp_peer *peer;
563 struct dp_ast_entry *ase, *temp_ase;
564 int i;
565
566 qdf_spin_lock_bh(&soc->ast_lock);
567
568 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
569 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530570 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530571 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
572 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
573 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Chaithanya Garrepallia822b192018-08-07 20:41:41 +0530574 if ((ase->type ==
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530575 CDP_TXRX_AST_TYPE_WDS_HM) ||
576 (ase->type ==
577 CDP_TXRX_AST_TYPE_WDS_HM_SEC))
578 dp_peer_del_ast(soc, ase);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530579 }
580 }
581 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530582 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530583 }
584
585 qdf_spin_unlock_bh(&soc->ast_lock);
586}
587
588/*
589 * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
590 * @soc: Datapath SOC handle
591 *
592 * Return: None
593 */
594static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
595{
596 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
597 struct dp_pdev *pdev;
598 struct dp_vdev *vdev;
599 struct dp_peer *peer;
600 struct dp_ast_entry *ase, *temp_ase;
601 int i;
602
603 qdf_spin_lock_bh(&soc->ast_lock);
604
605 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
606 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530607 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530608 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
609 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
610 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Chaithanya Garrepallia822b192018-08-07 20:41:41 +0530611 if ((ase->type ==
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530612 CDP_TXRX_AST_TYPE_STATIC) ||
613 (ase->type ==
614 CDP_TXRX_AST_TYPE_SELF) ||
615 (ase->type ==
616 CDP_TXRX_AST_TYPE_STA_BSS))
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530617 continue;
618 dp_peer_del_ast(soc, ase);
619 }
620 }
621 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530622 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530623 }
624
625 qdf_spin_unlock_bh(&soc->ast_lock);
626}
627
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530628/**
629 * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
630 * and return ast entry information
631 * of first ast entry found in the
632 * table with given mac address
633 *
634 * @soc : data path soc handle
635 * @ast_mac_addr : AST entry mac address
636 * @ast_entry_info : ast entry information
637 *
638 * return : true if ast entry found with ast_mac_addr
639 * false if ast entry not found
640 */
641static bool dp_peer_get_ast_info_by_soc_wifi3
642 (struct cdp_soc_t *soc_hdl,
643 uint8_t *ast_mac_addr,
644 struct cdp_ast_entry_info *ast_entry_info)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530645{
Amir Patelcb990262019-05-28 15:12:48 +0530646 struct dp_ast_entry *ast_entry = NULL;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530647 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530648
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530649 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530650
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530651 ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530652 if (!ast_entry || !ast_entry->peer) {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530653 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530654 return false;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530655 }
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530656 if (ast_entry->delete_in_progress && !ast_entry->callback) {
657 qdf_spin_unlock_bh(&soc->ast_lock);
658 return false;
659 }
660 ast_entry_info->type = ast_entry->type;
661 ast_entry_info->pdev_id = ast_entry->pdev_id;
662 ast_entry_info->vdev_id = ast_entry->vdev_id;
663 ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
664 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
665 &ast_entry->peer->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800666 QDF_MAC_ADDR_SIZE);
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530667 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530668 return true;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530669}
670
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530671/**
672 * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
673 * and return ast entry information
674 * if mac address and pdev_id matches
675 *
676 * @soc : data path soc handle
677 * @ast_mac_addr : AST entry mac address
678 * @pdev_id : pdev_id
679 * @ast_entry_info : ast entry information
680 *
681 * return : true if ast entry found with ast_mac_addr
682 * false if ast entry not found
683 */
684static bool dp_peer_get_ast_info_by_pdevid_wifi3
685 (struct cdp_soc_t *soc_hdl,
686 uint8_t *ast_mac_addr,
687 uint8_t pdev_id,
688 struct cdp_ast_entry_info *ast_entry_info)
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530689{
690 struct dp_ast_entry *ast_entry;
691 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
692
693 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530694
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530695 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530696
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530697 if (!ast_entry || !ast_entry->peer) {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530698 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530699 return false;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530700 }
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530701 if (ast_entry->delete_in_progress && !ast_entry->callback) {
702 qdf_spin_unlock_bh(&soc->ast_lock);
703 return false;
704 }
705 ast_entry_info->type = ast_entry->type;
706 ast_entry_info->pdev_id = ast_entry->pdev_id;
707 ast_entry_info->vdev_id = ast_entry->vdev_id;
708 ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
709 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
710 &ast_entry->peer->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800711 QDF_MAC_ADDR_SIZE);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530712 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530713 return true;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530714}
715
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530716/**
717 * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
718 * with given mac address
719 *
720 * @soc : data path soc handle
721 * @ast_mac_addr : AST entry mac address
722 * @callback : callback function to called on ast delete response from FW
723 * @cookie : argument to be passed to callback
724 *
725 * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
726 * is sent
727 * QDF_STATUS_E_INVAL false if ast entry not found
728 */
729static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
730 uint8_t *mac_addr,
731 txrx_ast_free_cb callback,
732 void *cookie)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530733
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530734{
735 struct dp_soc *soc = (struct dp_soc *)soc_handle;
Amir Patelcb990262019-05-28 15:12:48 +0530736 struct dp_ast_entry *ast_entry = NULL;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530737 txrx_ast_free_cb cb = NULL;
738 void *arg = NULL;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530739
740 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530741 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
742 if (!ast_entry) {
743 qdf_spin_unlock_bh(&soc->ast_lock);
744 return -QDF_STATUS_E_INVAL;
745 }
746
747 if (ast_entry->callback) {
748 cb = ast_entry->callback;
749 arg = ast_entry->cookie;
750 }
751
752 ast_entry->callback = callback;
753 ast_entry->cookie = cookie;
754
755 /*
756 * if delete_in_progress is set AST delete is sent to target
757 * and host is waiting for response should not send delete
758 * again
759 */
760 if (!ast_entry->delete_in_progress)
761 dp_peer_del_ast(soc, ast_entry);
762
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530763 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530764 if (cb) {
765 cb(soc->ctrl_psoc,
766 soc,
767 arg,
768 CDP_TXRX_AST_DELETE_IN_PROGRESS);
769 }
770 return QDF_STATUS_SUCCESS;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530771}
772
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530773/**
774 * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
775 * table if mac address and pdev_id matches
776 *
777 * @soc : data path soc handle
778 * @ast_mac_addr : AST entry mac address
779 * @pdev_id : pdev id
780 * @callback : callback function to called on ast delete response from FW
781 * @cookie : argument to be passed to callback
782 *
783 * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
784 * is sent
785 * QDF_STATUS_E_INVAL false if ast entry not found
786 */
787
788static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
789 uint8_t *mac_addr,
790 uint8_t pdev_id,
791 txrx_ast_free_cb callback,
792 void *cookie)
793
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530794{
795 struct dp_soc *soc = (struct dp_soc *)soc_handle;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530796 struct dp_ast_entry *ast_entry;
797 txrx_ast_free_cb cb = NULL;
798 void *arg = NULL;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530799
800 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530801 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
802
803 if (!ast_entry) {
804 qdf_spin_unlock_bh(&soc->ast_lock);
805 return -QDF_STATUS_E_INVAL;
806 }
807
808 if (ast_entry->callback) {
809 cb = ast_entry->callback;
810 arg = ast_entry->cookie;
811 }
812
813 ast_entry->callback = callback;
814 ast_entry->cookie = cookie;
815
816 /*
817 * if delete_in_progress is set AST delete is sent to target
818 * and host is waiting for response should not sent delete
819 * again
820 */
821 if (!ast_entry->delete_in_progress)
822 dp_peer_del_ast(soc, ast_entry);
823
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530824 qdf_spin_unlock_bh(&soc->ast_lock);
825
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530826 if (cb) {
827 cb(soc->ctrl_psoc,
828 soc,
829 arg,
830 CDP_TXRX_AST_DELETE_IN_PROGRESS);
831 }
832 return QDF_STATUS_SUCCESS;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530833}
834
Houston Hoffman648a9182017-05-21 23:27:50 -0700835/**
836 * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
837 * @ring_num: ring num of the ring being queried
838 * @grp_mask: the grp_mask array for the ring type in question.
839 *
840 * The grp_mask array is indexed by group number and the bit fields correspond
841 * to ring numbers. We are finding which interrupt group a ring belongs to.
842 *
843 * Return: the index in the grp_mask array with the ring number.
844 * -QDF_STATUS_E_NOENT if no entry is found
845 */
846static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
847{
848 int ext_group_num;
849 int mask = 1 << ring_num;
850
851 for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
852 ext_group_num++) {
853 if (mask & grp_mask[ext_group_num])
854 return ext_group_num;
855 }
856
857 return -QDF_STATUS_E_NOENT;
858}
859
860static int dp_srng_calculate_msi_group(struct dp_soc *soc,
861 enum hal_ring_type ring_type,
862 int ring_num)
863{
864 int *grp_mask;
865
866 switch (ring_type) {
867 case WBM2SW_RELEASE:
868 /* dp_tx_comp_handler - soc->tx_comp_ring */
869 if (ring_num < 3)
870 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
871
872 /* dp_rx_wbm_err_process - soc->rx_rel_ring */
873 else if (ring_num == 3) {
874 /* sw treats this as a separate ring type */
875 grp_mask = &soc->wlan_cfg_ctx->
876 int_rx_wbm_rel_ring_mask[0];
877 ring_num = 0;
878 } else {
879 qdf_assert(0);
880 return -QDF_STATUS_E_NOENT;
881 }
882 break;
883
884 case REO_EXCEPTION:
885 /* dp_rx_err_process - &soc->reo_exception_ring */
886 grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
887 break;
888
889 case REO_DST:
890 /* dp_rx_process - soc->reo_dest_ring */
891 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
892 break;
893
894 case REO_STATUS:
895 /* dp_reo_status_ring_handler - soc->reo_status_ring */
896 grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
897 break;
898
899 /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
900 case RXDMA_MONITOR_STATUS:
901 /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
902 case RXDMA_MONITOR_DST:
903 /* dp_mon_process */
904 grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
905 break;
Karunakar Dasineniea027c52017-09-20 16:27:46 -0700906 case RXDMA_DST:
907 /* dp_rxdma_err_process */
908 grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
909 break;
Houston Hoffman648a9182017-05-21 23:27:50 -0700910
Houston Hoffman648a9182017-05-21 23:27:50 -0700911 case RXDMA_BUF:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700912 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
913 break;
914
915 case RXDMA_MONITOR_BUF:
Houston Hoffman648a9182017-05-21 23:27:50 -0700916 /* TODO: support low_thresh interrupt */
917 return -QDF_STATUS_E_NOENT;
918 break;
919
920 case TCL_DATA:
921 case TCL_CMD:
922 case REO_CMD:
923 case SW2WBM_RELEASE:
924 case WBM_IDLE_LINK:
925 /* normally empty SW_TO_HW rings */
926 return -QDF_STATUS_E_NOENT;
927 break;
928
929 case TCL_STATUS:
930 case REO_REINJECT:
Houston Hoffman648a9182017-05-21 23:27:50 -0700931 /* misc unused rings */
932 return -QDF_STATUS_E_NOENT;
933 break;
934
935 case CE_SRC:
936 case CE_DST:
937 case CE_DST_STATUS:
938 /* CE_rings - currently handled by hif */
939 default:
940 return -QDF_STATUS_E_NOENT;
941 break;
942 }
943
944 return dp_srng_find_ring_in_mask(ring_num, grp_mask);
945}
946
947static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
948 *ring_params, int ring_type, int ring_num)
949{
950 int msi_group_number;
951 int msi_data_count;
952 int ret;
953 uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
954
955 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
956 &msi_data_count, &msi_data_start,
957 &msi_irq_start);
958
959 if (ret)
960 return;
961
962 msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
963 ring_num);
964 if (msi_group_number < 0) {
Houston Hoffman41b912c2017-08-30 14:27:51 -0700965 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Houston Hoffman648a9182017-05-21 23:27:50 -0700966 FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
967 ring_type, ring_num);
968 ring_params->msi_addr = 0;
969 ring_params->msi_data = 0;
970 return;
971 }
972
973 if (msi_group_number > msi_data_count) {
974 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
975 FL("2 msi_groups will share an msi; msi_group_num %d"),
976 msi_group_number);
977
978 QDF_ASSERT(0);
979 }
980
981 pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
982
983 ring_params->msi_addr = addr_low;
984 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
985 ring_params->msi_data = (msi_group_number % msi_data_count)
986 + msi_data_start;
987 ring_params->flags |= HAL_SRNG_MSI_INTR;
988}
989
990/**
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530991 * dp_print_ast_stats() - Dump AST table contents
992 * @soc: Datapath soc handle
993 *
994 * return void
995 */
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +0530996#ifdef FEATURE_AST
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530997void dp_print_ast_stats(struct dp_soc *soc)
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530998{
999 uint8_t i;
1000 uint8_t num_entries = 0;
1001 struct dp_vdev *vdev;
1002 struct dp_pdev *pdev;
1003 struct dp_peer *peer;
1004 struct dp_ast_entry *ase, *tmp_ase;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05301005 char type[CDP_TXRX_AST_TYPE_MAX][10] = {
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +05301006 "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1007 "DA", "HMWDS_SEC"};
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301008
1009 DP_PRINT_STATS("AST Stats:");
1010 DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
1011 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
1012 DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
1013 DP_PRINT_STATS("AST Table:");
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05301014
1015 qdf_spin_lock_bh(&soc->ast_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301016 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1017 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05301018 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301019 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1020 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1021 DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
Amir Patelcb990262019-05-28 15:12:48 +05301022 DP_PRINT_STATS("%6d mac_addr = %pM",
1023 ++num_entries,
1024 ase->mac_addr.raw);
1025 DP_PRINT_STATS(" peer_mac_addr = %pM",
1026 ase->peer->mac_addr.raw);
1027 DP_PRINT_STATS(" peer_id = %u",
1028 ase->peer->peer_ids[0]);
1029 DP_PRINT_STATS(" type = %s",
1030 type[ase->type]);
1031 DP_PRINT_STATS(" next_hop = %d",
1032 ase->next_hop);
1033 DP_PRINT_STATS(" is_active = %d",
1034 ase->is_active);
1035 DP_PRINT_STATS(" is_bss = %d",
1036 ase->is_bss);
1037 DP_PRINT_STATS(" ast_idx = %d",
1038 ase->ast_idx);
1039 DP_PRINT_STATS(" ast_hash = %d",
1040 ase->ast_hash_value);
1041 DP_PRINT_STATS("delete_in_progress= %d",
1042 ase->delete_in_progress
1043 );
1044 DP_PRINT_STATS(" pdev_id = %d",
1045 ase->pdev_id);
1046 DP_PRINT_STATS(" vdev_id = %d",
1047 ase->vdev_id);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301048 }
1049 }
1050 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05301051 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301052 }
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05301053 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301054}
1055#else
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301056void dp_print_ast_stats(struct dp_soc *soc)
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301057{
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05301058 DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301059 return;
1060}
1061#endif
1062
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05301063/**
1064 * dp_print_peer_table() - Dump all Peer stats
1065 * @vdev: Datapath Vdev handle
1066 *
1067 * return void
1068 */
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05301069static void dp_print_peer_table(struct dp_vdev *vdev)
1070{
1071 struct dp_peer *peer = NULL;
1072
1073 DP_PRINT_STATS("Dumping Peer Table Stats:");
1074 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1075 if (!peer) {
1076 DP_PRINT_STATS("Invalid Peer");
1077 return;
1078 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301079 DP_PRINT_STATS(" peer_mac_addr = %pM"
1080 " nawds_enabled = %d"
1081 " bss_peer = %d"
1082 " wapi = %d"
1083 " wds_enabled = %d"
1084 " delete in progress = %d"
1085 " peer id = %d",
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05301086 peer->mac_addr.raw,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301087 peer->nawds_enabled,
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05301088 peer->bss_peer,
1089 peer->wapi,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301090 peer->wds_enabled,
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05301091 peer->delete_in_progress,
1092 peer->peer_ids[0]);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05301093 }
1094}
1095
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301096/*
Karunakar Dasineni1cd51dd2019-05-07 16:01:59 -07001097 * dp_srng_mem_alloc() - Allocate memory for SRNG
1098 * @soc : Data path soc handle
1099 * @srng : SRNG pointer
1100 * @align : Align size
1101 *
1102 * return: QDF_STATUS_SUCCESS on successful allocation
1103 * QDF_STATUS_E_NOMEM on failure
1104 */
1105static QDF_STATUS
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301106dp_srng_mem_alloc(struct dp_soc *soc, struct dp_srng *srng, uint32_t align,
1107 bool cached)
Karunakar Dasineni1cd51dd2019-05-07 16:01:59 -07001108{
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301109 uint32_t align_alloc_size;
1110
1111 if (!cached) {
1112 srng->base_vaddr_unaligned =
1113 qdf_mem_alloc_consistent(soc->osdev,
1114 soc->osdev->dev,
1115 srng->alloc_size,
1116 &srng->base_paddr_unaligned);
1117 } else {
1118 srng->base_vaddr_unaligned = qdf_mem_malloc(srng->alloc_size);
1119 srng->base_paddr_unaligned =
1120 qdf_mem_virt_to_phys(srng->base_vaddr_unaligned);
1121 }
1122
Karunakar Dasineni1cd51dd2019-05-07 16:01:59 -07001123 if (!srng->base_vaddr_unaligned) {
1124 return QDF_STATUS_E_NOMEM;
1125 }
1126
1127 /* Re-allocate additional bytes to align base address only if
1128 * above allocation returns unaligned address. Reason for
1129 * trying exact size allocation above is, OS tries to allocate
1130 * blocks of size power-of-2 pages and then free extra pages.
1131 * e.g., of a ring size of 1MB, the allocation below will
1132 * request 1MB plus 7 bytes for alignment, which will cause a
1133 * 2MB block allocation,and that is failing sometimes due to
1134 * memory fragmentation.
1135 * dp_srng_mem_alloc should be replaced with
1136 * qdf_aligned_mem_alloc_consistent after fixing some known
1137 * shortcomings with this QDF function
1138 */
1139 if ((unsigned long)(srng->base_paddr_unaligned) &
1140 (align - 1)) {
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301141 align_alloc_size = srng->alloc_size + align - 1;
1142
1143 if (!cached) {
1144 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1145 srng->alloc_size,
1146 srng->base_vaddr_unaligned,
1147 srng->base_paddr_unaligned, 0);
1148
1149 srng->base_vaddr_unaligned =
1150 qdf_mem_alloc_consistent(soc->osdev,
1151 soc->osdev->dev,
1152 align_alloc_size,
1153 &srng->base_paddr_unaligned);
1154
1155 } else {
1156 qdf_mem_free(srng->base_vaddr_unaligned);
1157 srng->base_vaddr_unaligned =
1158 qdf_mem_malloc(align_alloc_size);
1159
1160 srng->base_paddr_unaligned =
1161 qdf_mem_virt_to_phys(srng->base_vaddr_unaligned);
1162 }
1163
1164 srng->alloc_size = align_alloc_size;
Karunakar Dasineni1cd51dd2019-05-07 16:01:59 -07001165
1166 if (!srng->base_vaddr_unaligned) {
1167 return QDF_STATUS_E_NOMEM;
1168 }
1169 }
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301170
Karunakar Dasineni1cd51dd2019-05-07 16:01:59 -07001171 return QDF_STATUS_SUCCESS;
1172}
1173
1174
1175/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001176 * dp_setup_srng - Internal function to setup SRNG rings used by data path
1177 */
1178static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301179 int ring_type, int ring_num, int mac_id,
1180 uint32_t num_entries, bool cached)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001181{
1182 void *hal_soc = soc->hal_soc;
1183 uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1184 /* TODO: See if we should get align size from hal */
1185 uint32_t ring_base_align = 8;
1186 struct hal_srng_params ring_params;
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -08001187 uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001188
Houston Hoffman648a9182017-05-21 23:27:50 -07001189 /* TODO: Currently hal layer takes care of endianness related settings.
1190 * See if these settings need to passed from DP layer
1191 */
1192 ring_params.flags = 0;
1193
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -08001194 num_entries = (num_entries > max_entries) ? max_entries : num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001195 srng->hal_srng = NULL;
Karunakar Dasineni1cd51dd2019-05-07 16:01:59 -07001196 srng->alloc_size = num_entries * entry_size;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001197 srng->num_entries = num_entries;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301198
phadimana1f79822019-02-15 15:02:37 +05301199 if (!dp_is_soc_reinit(soc)) {
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301200 if (dp_srng_mem_alloc(soc, srng, ring_base_align, cached) !=
Karunakar Dasineni1cd51dd2019-05-07 16:01:59 -07001201 QDF_STATUS_SUCCESS) {
1202 dp_err("alloc failed - ring_type: %d, ring_num %d",
1203 ring_type, ring_num);
1204 return QDF_STATUS_E_NOMEM;
1205 }
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301206
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301207 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001208
Karunakar Dasineni1cd51dd2019-05-07 16:01:59 -07001209 ring_params.ring_base_paddr =
1210 (qdf_dma_addr_t)qdf_align(
1211 (unsigned long)(srng->base_paddr_unaligned),
1212 ring_base_align);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001213
Karunakar Dasineni1cd51dd2019-05-07 16:01:59 -07001214 ring_params.ring_base_vaddr =
1215 (void *)((unsigned long)(srng->base_vaddr_unaligned) +
1216 ((unsigned long)(ring_params.ring_base_paddr) -
1217 (unsigned long)(srng->base_paddr_unaligned)));
1218
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001219 ring_params.num_entries = num_entries;
1220
Krunal Sonic96a1162019-02-21 11:33:26 -08001221 dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1222 ring_type, ring_num,
1223 (void *)ring_params.ring_base_vaddr,
1224 (void *)ring_params.ring_base_paddr,
1225 ring_params.num_entries);
Mohit Khanna81179cb2018-08-16 20:50:43 -07001226
psimhac983d7e2017-07-26 15:20:07 -07001227 if (soc->intr_mode == DP_INTR_MSI) {
1228 dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
Krunal Sonic96a1162019-02-21 11:33:26 -08001229 dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1230 ring_type, ring_num);
psimhac983d7e2017-07-26 15:20:07 -07001231
1232 } else {
1233 ring_params.msi_data = 0;
1234 ring_params.msi_addr = 0;
Krunal Sonic96a1162019-02-21 11:33:26 -08001235 dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1236 ring_type, ring_num);
psimhac983d7e2017-07-26 15:20:07 -07001237 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001238
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +05301239 /*
1240 * Setup interrupt timer and batch counter thresholds for
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001241 * interrupt mitigation based on ring type
1242 */
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +05301243 if (ring_type == REO_DST) {
1244 ring_params.intr_timer_thres_us =
1245 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1246 ring_params.intr_batch_cntr_thres_entries =
1247 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1248 } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1249 ring_params.intr_timer_thres_us =
1250 wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1251 ring_params.intr_batch_cntr_thres_entries =
1252 wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1253 } else {
1254 ring_params.intr_timer_thres_us =
1255 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1256 ring_params.intr_batch_cntr_thres_entries =
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001257 wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +05301258 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001259
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001260 /* Enable low threshold interrupts for rx buffer rings (regular and
1261 * monitor buffer rings.
1262 * TODO: See if this is required for any other ring
1263 */
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001264 if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1265 (ring_type == RXDMA_MONITOR_STATUS)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001266 /* TODO: Setting low threshold to 1/8th of ring size
1267 * see if this needs to be configurable
1268 */
1269 ring_params.low_threshold = num_entries >> 3;
1270 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
Karunakar Dasinenibef3b1b2018-03-28 22:23:57 -07001271 ring_params.intr_timer_thres_us =
1272 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1273 ring_params.intr_batch_cntr_thres_entries = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001274 }
1275
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301276 if (cached) {
1277 ring_params.flags |= HAL_SRNG_CACHED_DESC;
1278 srng->cached = 1;
1279 }
1280
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001281 srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08001282 mac_id, &ring_params);
Manoj Ekbote376116e2017-12-19 10:44:41 -08001283
1284 if (!srng->hal_srng) {
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301285 if (cached) {
1286 qdf_mem_free(srng->base_vaddr_unaligned);
1287 } else {
1288 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1289 srng->alloc_size,
1290 srng->base_vaddr_unaligned,
1291 srng->base_paddr_unaligned, 0);
1292 }
Manoj Ekbote376116e2017-12-19 10:44:41 -08001293 }
1294
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001295 return 0;
1296}
1297
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301298/*
1299 * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1300 * @soc: DP SOC handle
1301 * @srng: source ring structure
1302 * @ring_type: type of ring
1303 * @ring_num: ring number
1304 *
1305 * Return: None
1306 */
1307static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1308 int ring_type, int ring_num)
1309{
Vinay Adellaa06e8c82018-12-14 20:31:49 +05301310 if (!srng->hal_srng) {
1311 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1312 FL("Ring type: %d, num:%d not setup"),
1313 ring_type, ring_num);
1314 return;
1315 }
1316
1317 hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1318 srng->hal_srng = NULL;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301319}
1320
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001321/**
1322 * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1323 * Any buffers allocated and attached to ring entries are expected to be freed
1324 * before calling this function.
1325 */
1326static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1327 int ring_type, int ring_num)
1328{
phadimana1f79822019-02-15 15:02:37 +05301329 if (!dp_is_soc_reinit(soc)) {
Vinay Adellaa06e8c82018-12-14 20:31:49 +05301330 if (!srng->hal_srng && (srng->alloc_size == 0)) {
1331 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1332 FL("Ring type: %d, num:%d not setup"),
1333 ring_type, ring_num);
1334 return;
1335 }
1336
1337 if (srng->hal_srng) {
1338 hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1339 srng->hal_srng = NULL;
1340 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001341 }
1342
Vinay Adellaa06e8c82018-12-14 20:31:49 +05301343 if (srng->alloc_size) {
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301344 if (!srng->cached) {
1345 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1346 srng->alloc_size,
1347 srng->base_vaddr_unaligned,
1348 srng->base_paddr_unaligned, 0);
1349 } else {
1350 qdf_mem_free(srng->base_vaddr_unaligned);
1351 }
Vinay Adellaa06e8c82018-12-14 20:31:49 +05301352 srng->alloc_size = 0;
1353 }
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05301354 srng->hal_srng = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001355}
1356
1357/* TODO: Need this interface from HIF */
1358void *hif_get_hal_handle(void *hif_handle);
1359
1360/*
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301361 * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1362 * @dp_ctx: DP SOC handle
1363 * @budget: Number of frames/descriptors that can be processed in one shot
1364 *
1365 * Return: remaining budget/quota for the soc device
1366 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001367static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301368{
1369 struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
Mohit Khannae5a6e942018-11-28 14:22:48 -08001370 struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301371 struct dp_soc *soc = int_ctx->soc;
1372 int ring = 0;
1373 uint32_t work_done = 0;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301374 int budget = dp_budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301375 uint8_t tx_mask = int_ctx->tx_ring_mask;
1376 uint8_t rx_mask = int_ctx->rx_ring_mask;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301377 uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1378 uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001379 uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301380 uint32_t remaining_quota = dp_budget;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001381 struct dp_pdev *pdev = NULL;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001382 int mac_id;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301383
Mohit Khannae5a6e942018-11-28 14:22:48 -08001384 dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1385 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1386 reo_status_mask,
1387 int_ctx->rx_mon_ring_mask,
1388 int_ctx->host2rxdma_ring_mask,
1389 int_ctx->rxdma2host_ring_mask);
1390
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301391 /* Process Tx completion interrupts first to return back buffers */
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301392 while (tx_mask) {
1393 if (tx_mask & 0x1) {
Mohit Khannae5a6e942018-11-28 14:22:48 -08001394 work_done = dp_tx_comp_handler(int_ctx,
1395 soc,
1396 soc->tx_comp_ring[ring].hal_srng,
1397 remaining_quota);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301398
Mohit Khannae5a6e942018-11-28 14:22:48 -08001399 if (work_done) {
1400 intr_stats->num_tx_ring_masks[ring]++;
1401 dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1402 tx_mask, ring, budget,
1403 work_done);
1404 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301405
1406 budget -= work_done;
1407 if (budget <= 0)
1408 goto budget_done;
1409
1410 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301411 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301412 tx_mask = tx_mask >> 1;
1413 ring++;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301414 }
1415
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301416 /* Process REO Exception ring interrupt */
1417 if (rx_err_mask) {
1418 work_done = dp_rx_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301419 soc->reo_exception_ring.hal_srng,
1420 remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301421
Mohit Khannae5a6e942018-11-28 14:22:48 -08001422 if (work_done) {
1423 intr_stats->num_rx_err_ring_masks++;
1424 dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1425 work_done, budget);
1426 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301427
1428 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301429 if (budget <= 0) {
1430 goto budget_done;
1431 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301432 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301433 }
1434
1435 /* Process Rx WBM release ring interrupt */
1436 if (rx_wbm_rel_mask) {
1437 work_done = dp_rx_wbm_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301438 soc->rx_rel_ring.hal_srng, remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301439
Mohit Khannae5a6e942018-11-28 14:22:48 -08001440 if (work_done) {
1441 intr_stats->num_rx_wbm_rel_ring_masks++;
1442 dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1443 work_done, budget);
1444 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301445
1446 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301447 if (budget <= 0) {
1448 goto budget_done;
1449 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301450 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301451 }
1452
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301453 /* Process Rx interrupts */
1454 if (rx_mask) {
1455 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
Mohit Khannae5a6e942018-11-28 14:22:48 -08001456 if (!(rx_mask & (1 << ring)))
1457 continue;
1458 work_done = dp_rx_process(int_ctx,
1459 soc->reo_dest_ring[ring].hal_srng,
1460 ring,
1461 remaining_quota);
1462 if (work_done) {
1463 intr_stats->num_rx_ring_masks[ring]++;
Krunal Sonic96a1162019-02-21 11:33:26 -08001464 dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1465 rx_mask, ring,
1466 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301467 budget -= work_done;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301468 if (budget <= 0)
1469 goto budget_done;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301470 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301471 }
1472 }
1473 }
1474
Mohit Khannae5a6e942018-11-28 14:22:48 -08001475 if (reo_status_mask) {
1476 if (dp_reo_status_ring_handler(soc))
1477 int_ctx->intr_stats.num_reo_status_ring_masks++;
1478 }
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001479
Karunakar Dasineni10185472017-06-19 16:32:06 -07001480 /* Process LMAC interrupts */
Kai Chen6eca1a62017-01-12 10:17:53 -08001481 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001482 pdev = soc->pdev_list[ring];
Jeff Johnsona8edf332019-03-18 09:51:52 -07001483 if (!pdev)
Karunakar Dasineni10185472017-06-19 16:32:06 -07001484 continue;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001485 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1486 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1487 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001488 if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1489 work_done = dp_mon_process(soc, mac_for_pdev,
Mohit Khannae5a6e942018-11-28 14:22:48 -08001490 remaining_quota);
1491 if (work_done)
1492 intr_stats->num_rx_mon_ring_masks++;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001493 budget -= work_done;
1494 if (budget <= 0)
1495 goto budget_done;
1496 remaining_quota = budget;
1497 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001498
chenguocd0f3132018-02-28 15:53:50 -08001499 if (int_ctx->rxdma2host_ring_mask &
1500 (1 << mac_for_pdev)) {
1501 work_done = dp_rxdma_err_process(soc,
Mohit Khannae5a6e942018-11-28 14:22:48 -08001502 mac_for_pdev,
1503 remaining_quota);
1504 if (work_done)
1505 intr_stats->num_rxdma2host_ring_masks++;
chenguocd0f3132018-02-28 15:53:50 -08001506 budget -= work_done;
1507 if (budget <= 0)
1508 goto budget_done;
1509 remaining_quota = budget;
1510 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001511
chenguocd0f3132018-02-28 15:53:50 -08001512 if (int_ctx->host2rxdma_ring_mask &
1513 (1 << mac_for_pdev)) {
1514 union dp_rx_desc_list_elem_t *desc_list = NULL;
1515 union dp_rx_desc_list_elem_t *tail = NULL;
1516 struct dp_srng *rx_refill_buf_ring =
1517 &pdev->rx_refill_buf_ring;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001518
Mohit Khannae5a6e942018-11-28 14:22:48 -08001519 intr_stats->num_host2rxdma_ring_masks++;
chenguocd0f3132018-02-28 15:53:50 -08001520 DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1521 1);
1522 dp_rx_buffers_replenish(soc, mac_for_pdev,
Mohit Khannae5a6e942018-11-28 14:22:48 -08001523 rx_refill_buf_ring,
1524 &soc->rx_desc_buf[mac_for_pdev],
1525 0, &desc_list, &tail);
chenguocd0f3132018-02-28 15:53:50 -08001526 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001527 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001528 }
1529
Dhanashri Atre0da31222017-03-23 12:30:58 -07001530 qdf_lro_flush(int_ctx->lro_ctx);
Mohit Khannae5a6e942018-11-28 14:22:48 -08001531 intr_stats->num_masks++;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001532
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301533budget_done:
1534 return dp_budget - budget;
1535}
1536
1537/* dp_interrupt_timer()- timer poll for interrupts
1538 *
1539 * @arg: SoC Handle
1540 *
1541 * Return:
1542 *
1543 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001544static void dp_interrupt_timer(void *arg)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301545{
1546 struct dp_soc *soc = (struct dp_soc *) arg;
1547 int i;
1548
Ravi Joshi86e98262017-03-01 13:47:03 -08001549 if (qdf_atomic_read(&soc->cmn_init_done)) {
1550 for (i = 0;
1551 i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1552 dp_service_srngs(&soc->intr_ctx[i], 0xffff);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301553
Ravi Joshi86e98262017-03-01 13:47:03 -08001554 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1555 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301556}
1557
1558/*
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001559 * dp_soc_attach_poll() - Register handlers for DP interrupts
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301560 * @txrx_soc: DP SOC handle
1561 *
1562 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1563 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1564 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1565 *
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001566 * Return: 0 for success, nonzero for failure.
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301567 */
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301568static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301569{
1570 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1571 int i;
1572
psimhac983d7e2017-07-26 15:20:07 -07001573 soc->intr_mode = DP_INTR_POLL;
1574
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301575 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Houston Hoffman648a9182017-05-21 23:27:50 -07001576 soc->intr_ctx[i].dp_intr_id = i;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07001577 soc->intr_ctx[i].tx_ring_mask =
1578 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1579 soc->intr_ctx[i].rx_ring_mask =
1580 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1581 soc->intr_ctx[i].rx_mon_ring_mask =
1582 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1583 soc->intr_ctx[i].rx_err_ring_mask =
1584 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1585 soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1586 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1587 soc->intr_ctx[i].reo_status_ring_mask =
1588 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1589 soc->intr_ctx[i].rxdma2host_ring_mask =
1590 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301591 soc->intr_ctx[i].soc = soc;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001592 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301593 }
1594
1595 qdf_timer_init(soc->osdev, &soc->int_timer,
1596 dp_interrupt_timer, (void *)soc,
1597 QDF_TIMER_TYPE_WAKE_APPS);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301598
1599 return QDF_STATUS_SUCCESS;
1600}
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301601
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301602static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001603#if defined(CONFIG_MCL)
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301604/*
1605 * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1606 * @txrx_soc: DP SOC handle
1607 *
1608 * Call the appropriate attach function based on the mode of operation.
1609 * This is a WAR for enabling monitor mode.
1610 *
1611 * Return: 0 for success. nonzero for failure.
1612 */
1613static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1614{
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001615 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1616
1617 if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1618 con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001619 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1620 "%s: Poll mode", __func__);
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301621 return dp_soc_attach_poll(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301622 } else {
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001623
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001624 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1625 "%s: Interrupt mode", __func__);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301626 return dp_soc_interrupt_attach(txrx_soc);
1627 }
1628}
1629#else
Venkateswara Swamy Bandaru37ce7092018-08-09 17:00:30 +05301630#if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1631static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1632{
1633 return dp_soc_attach_poll(txrx_soc);
1634}
1635#else
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301636static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1637{
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301638 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1639
1640 if (hif_is_polled_mode_enabled(soc->hif_handle))
1641 return dp_soc_attach_poll(txrx_soc);
1642 else
1643 return dp_soc_interrupt_attach(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301644}
1645#endif
Venkateswara Swamy Bandaru37ce7092018-08-09 17:00:30 +05301646#endif
Houston Hoffman648a9182017-05-21 23:27:50 -07001647
1648static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1649 int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1650{
1651 int j;
1652 int num_irq = 0;
1653
1654 int tx_mask =
1655 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1656 int rx_mask =
1657 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1658 int rx_mon_mask =
1659 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1660 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1661 soc->wlan_cfg_ctx, intr_ctx_num);
1662 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1663 soc->wlan_cfg_ctx, intr_ctx_num);
1664 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1665 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001666 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1667 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001668 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1669 soc->wlan_cfg_ctx, intr_ctx_num);
Keyur Parekh11865212018-10-12 18:03:12 -07001670 int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1671 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001672
1673 for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1674
1675 if (tx_mask & (1 << j)) {
1676 irq_id_map[num_irq++] =
1677 (wbm2host_tx_completions_ring1 - j);
1678 }
1679
1680 if (rx_mask & (1 << j)) {
1681 irq_id_map[num_irq++] =
1682 (reo2host_destination_ring1 - j);
1683 }
1684
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001685 if (rxdma2host_ring_mask & (1 << j)) {
1686 irq_id_map[num_irq++] =
1687 rxdma2host_destination_ring_mac1 -
1688 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1689 }
1690
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001691 if (host2rxdma_ring_mask & (1 << j)) {
1692 irq_id_map[num_irq++] =
1693 host2rxdma_host_buf_ring_mac1 -
1694 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1695 }
1696
Keyur Parekh11865212018-10-12 18:03:12 -07001697 if (host2rxdma_mon_ring_mask & (1 << j)) {
1698 irq_id_map[num_irq++] =
1699 host2rxdma_monitor_ring1 -
1700 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1701 }
1702
Houston Hoffman648a9182017-05-21 23:27:50 -07001703 if (rx_mon_mask & (1 << j)) {
1704 irq_id_map[num_irq++] =
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001705 ppdu_end_interrupts_mac1 -
1706 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001707 irq_id_map[num_irq++] =
1708 rxdma2host_monitor_status_ring_mac1 -
1709 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Houston Hoffman648a9182017-05-21 23:27:50 -07001710 }
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001711
Houston Hoffman648a9182017-05-21 23:27:50 -07001712 if (rx_wbm_rel_ring_mask & (1 << j))
1713 irq_id_map[num_irq++] = wbm2host_rx_release;
1714
1715 if (rx_err_ring_mask & (1 << j))
1716 irq_id_map[num_irq++] = reo2host_exception;
1717
1718 if (reo_status_ring_mask & (1 << j))
1719 irq_id_map[num_irq++] = reo2host_status;
1720
1721 }
1722 *num_irq_r = num_irq;
1723}
1724
1725static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1726 int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1727 int msi_vector_count, int msi_vector_start)
1728{
1729 int tx_mask = wlan_cfg_get_tx_ring_mask(
1730 soc->wlan_cfg_ctx, intr_ctx_num);
1731 int rx_mask = wlan_cfg_get_rx_ring_mask(
1732 soc->wlan_cfg_ctx, intr_ctx_num);
1733 int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1734 soc->wlan_cfg_ctx, intr_ctx_num);
1735 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1736 soc->wlan_cfg_ctx, intr_ctx_num);
1737 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1738 soc->wlan_cfg_ctx, intr_ctx_num);
1739 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1740 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001741 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1742 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001743
1744 unsigned int vector =
1745 (intr_ctx_num % msi_vector_count) + msi_vector_start;
1746 int num_irq = 0;
1747
psimhac983d7e2017-07-26 15:20:07 -07001748 soc->intr_mode = DP_INTR_MSI;
1749
Houston Hoffman648a9182017-05-21 23:27:50 -07001750 if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001751 rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
Houston Hoffman648a9182017-05-21 23:27:50 -07001752 irq_id_map[num_irq++] =
1753 pld_get_msi_irq(soc->osdev->dev, vector);
1754
1755 *num_irq_r = num_irq;
1756}
1757
1758static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1759 int *irq_id_map, int *num_irq)
1760{
1761 int msi_vector_count, ret;
1762 uint32_t msi_base_data, msi_vector_start;
1763
1764 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1765 &msi_vector_count,
1766 &msi_base_data,
1767 &msi_vector_start);
1768 if (ret)
1769 return dp_soc_interrupt_map_calculate_integrated(soc,
1770 intr_ctx_num, irq_id_map, num_irq);
1771
1772 else
1773 dp_soc_interrupt_map_calculate_msi(soc,
1774 intr_ctx_num, irq_id_map, num_irq,
1775 msi_vector_count, msi_vector_start);
1776}
1777
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301778/*
1779 * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1780 * @txrx_soc: DP SOC handle
1781 *
1782 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1783 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1784 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1785 *
1786 * Return: 0 for success. nonzero for failure.
1787 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001788static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301789{
1790 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1791
1792 int i = 0;
1793 int num_irq = 0;
1794
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301795 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Leo Chang5ea93a42016-11-03 12:39:49 -07001796 int ret = 0;
1797
1798 /* Map of IRQ ids registered with one interrupt context */
1799 int irq_id_map[HIF_MAX_GRP_IRQ];
1800
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301801 int tx_mask =
1802 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1803 int rx_mask =
1804 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1805 int rx_mon_mask =
Mohit Khannadba82f22018-07-12 10:59:17 -07001806 dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
Nandha Kishore Easwaran82ac62e2017-06-20 17:55:07 +05301807 int rx_err_ring_mask =
1808 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1809 int rx_wbm_rel_ring_mask =
1810 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1811 int reo_status_ring_mask =
1812 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni10185472017-06-19 16:32:06 -07001813 int rxdma2host_ring_mask =
1814 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001815 int host2rxdma_ring_mask =
1816 wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
Keyur Parekh11865212018-10-12 18:03:12 -07001817 int host2rxdma_mon_ring_mask =
1818 wlan_cfg_get_host2rxdma_mon_ring_mask(
1819 soc->wlan_cfg_ctx, i);
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301820
Houston Hoffman648a9182017-05-21 23:27:50 -07001821 soc->intr_ctx[i].dp_intr_id = i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301822 soc->intr_ctx[i].tx_ring_mask = tx_mask;
1823 soc->intr_ctx[i].rx_ring_mask = rx_mask;
1824 soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301825 soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
Karunakar Dasineni10185472017-06-19 16:32:06 -07001826 soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001827 soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301828 soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1829 soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
Keyur Parekh11865212018-10-12 18:03:12 -07001830 soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1831 host2rxdma_mon_ring_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301832
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301833 soc->intr_ctx[i].soc = soc;
1834
1835 num_irq = 0;
1836
Houston Hoffman648a9182017-05-21 23:27:50 -07001837 dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1838 &num_irq);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301839
Houston Hoffmandef86a32017-04-21 20:23:45 -07001840 ret = hif_register_ext_group(soc->hif_handle,
1841 num_irq, irq_id_map, dp_service_srngs,
1842 &soc->intr_ctx[i], "dp_intr",
chenguof2548862017-11-08 16:33:25 +08001843 HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301844
1845 if (ret) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301846 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1847 FL("failed, ret = %d"), ret);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301848
1849 return QDF_STATUS_E_FAILURE;
1850 }
Dhanashri Atre0da31222017-03-23 12:30:58 -07001851 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301852 }
1853
Venkateswara Swamy Bandaru19dc8b22017-03-13 15:09:24 +05301854 hif_configure_ext_group_interrupts(soc->hif_handle);
1855
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301856 return QDF_STATUS_SUCCESS;
1857}
1858
1859/*
1860 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1861 * @txrx_soc: DP SOC handle
1862 *
1863 * Return: void
1864 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001865static void dp_soc_interrupt_detach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301866{
1867 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Leo Chang5ea93a42016-11-03 12:39:49 -07001868 int i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301869
psimhac983d7e2017-07-26 15:20:07 -07001870 if (soc->intr_mode == DP_INTR_POLL) {
1871 qdf_timer_stop(&soc->int_timer);
1872 qdf_timer_free(&soc->int_timer);
psimhaa079b8c2017-08-02 17:27:14 -07001873 } else {
1874 hif_deregister_exec_group(soc->hif_handle, "dp_intr");
psimhac983d7e2017-07-26 15:20:07 -07001875 }
1876
Leo Chang5ea93a42016-11-03 12:39:49 -07001877 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1878 soc->intr_ctx[i].tx_ring_mask = 0;
1879 soc->intr_ctx[i].rx_ring_mask = 0;
1880 soc->intr_ctx[i].rx_mon_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001881 soc->intr_ctx[i].rx_err_ring_mask = 0;
1882 soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1883 soc->intr_ctx[i].reo_status_ring_mask = 0;
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001884 soc->intr_ctx[i].rxdma2host_ring_mask = 0;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001885 soc->intr_ctx[i].host2rxdma_ring_mask = 0;
Keyur Parekh11865212018-10-12 18:03:12 -07001886 soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001887
Dhanashri Atre0da31222017-03-23 12:30:58 -07001888 qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
Leo Chang5ea93a42016-11-03 12:39:49 -07001889 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301890}
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301891
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001892#define AVG_MAX_MPDUS_PER_TID 128
1893#define AVG_TIDS_PER_CLIENT 2
1894#define AVG_FLOWS_PER_TID 2
1895#define AVG_MSDUS_PER_FLOW 128
1896#define AVG_MSDUS_PER_MPDU 4
1897
1898/*
1899 * Allocate and setup link descriptor pool that will be used by HW for
1900 * various link and queue descriptors and managed by WBM
1901 */
1902static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1903{
1904 int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1905 int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1906 uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1907 uint32_t num_mpdus_per_link_desc =
1908 hal_num_mpdus_per_link_desc(soc->hal_soc);
1909 uint32_t num_msdus_per_link_desc =
1910 hal_num_msdus_per_link_desc(soc->hal_soc);
1911 uint32_t num_mpdu_links_per_queue_desc =
1912 hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1913 uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1914 uint32_t total_link_descs, total_mem_size;
1915 uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1916 uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1917 uint32_t num_link_desc_banks;
1918 uint32_t last_bank_size = 0;
1919 uint32_t entry_size, num_entries;
1920 int i;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001921 uint32_t desc_id = 0;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301922 qdf_dma_addr_t *baseaddr = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001923
1924 /* Only Tx queue descriptors are allocated from common link descriptor
1925 * pool Rx queue descriptors are not included in this because (REO queue
1926 * extension descriptors) they are expected to be allocated contiguously
1927 * with REO queue descriptors
1928 */
1929 num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1930 AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1931
1932 num_mpdu_queue_descs = num_mpdu_link_descs /
1933 num_mpdu_links_per_queue_desc;
1934
1935 num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1936 AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1937 num_msdus_per_link_desc;
1938
1939 num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1940 AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1941
1942 num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1943 num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1944
1945 /* Round up to power of 2 */
1946 total_link_descs = 1;
1947 while (total_link_descs < num_entries)
1948 total_link_descs <<= 1;
1949
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301950 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1951 FL("total_link_descs: %u, link_desc_size: %d"),
1952 total_link_descs, link_desc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001953 total_mem_size = total_link_descs * link_desc_size;
1954
1955 total_mem_size += link_desc_align;
1956
1957 if (total_mem_size <= max_alloc_size) {
1958 num_link_desc_banks = 0;
1959 last_bank_size = total_mem_size;
1960 } else {
1961 num_link_desc_banks = (total_mem_size) /
1962 (max_alloc_size - link_desc_align);
1963 last_bank_size = total_mem_size %
1964 (max_alloc_size - link_desc_align);
1965 }
1966
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301967 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1968 FL("total_mem_size: %d, num_link_desc_banks: %u"),
1969 total_mem_size, num_link_desc_banks);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001970
1971 for (i = 0; i < num_link_desc_banks; i++) {
phadimana1f79822019-02-15 15:02:37 +05301972 if (!dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301973 baseaddr = &soc->link_desc_banks[i].
1974 base_paddr_unaligned;
1975 soc->link_desc_banks[i].base_vaddr_unaligned =
1976 qdf_mem_alloc_consistent(soc->osdev,
1977 soc->osdev->dev,
1978 max_alloc_size,
1979 baseaddr);
1980 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001981 soc->link_desc_banks[i].size = max_alloc_size;
1982
1983 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1984 soc->link_desc_banks[i].base_vaddr_unaligned) +
1985 ((unsigned long)(
1986 soc->link_desc_banks[i].base_vaddr_unaligned) %
1987 link_desc_align));
1988
1989 soc->link_desc_banks[i].base_paddr = (unsigned long)(
1990 soc->link_desc_banks[i].base_paddr_unaligned) +
1991 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1992 (unsigned long)(
1993 soc->link_desc_banks[i].base_vaddr_unaligned));
1994
1995 if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301996 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1997 FL("Link descriptor memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001998 goto fail;
1999 }
Karunakar Dasineni80756372019-05-02 23:49:31 -07002000 qdf_minidump_log((void *)(soc->link_desc_banks[i].base_vaddr),
2001 soc->link_desc_banks[i].size, "link_desc_bank");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002002 }
2003
2004 if (last_bank_size) {
2005 /* Allocate last bank in case total memory required is not exact
2006 * multiple of max_alloc_size
2007 */
phadimana1f79822019-02-15 15:02:37 +05302008 if (!dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05302009 baseaddr = &soc->link_desc_banks[i].
2010 base_paddr_unaligned;
2011 soc->link_desc_banks[i].base_vaddr_unaligned =
2012 qdf_mem_alloc_consistent(soc->osdev,
2013 soc->osdev->dev,
2014 last_bank_size,
2015 baseaddr);
2016 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002017 soc->link_desc_banks[i].size = last_bank_size;
2018
2019 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
2020 (soc->link_desc_banks[i].base_vaddr_unaligned) +
2021 ((unsigned long)(
2022 soc->link_desc_banks[i].base_vaddr_unaligned) %
2023 link_desc_align));
2024
2025 soc->link_desc_banks[i].base_paddr =
2026 (unsigned long)(
2027 soc->link_desc_banks[i].base_paddr_unaligned) +
2028 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2029 (unsigned long)(
2030 soc->link_desc_banks[i].base_vaddr_unaligned));
Karunakar Dasineni80756372019-05-02 23:49:31 -07002031
2032 qdf_minidump_log((void *)(soc->link_desc_banks[i].base_vaddr),
2033 soc->link_desc_banks[i].size, "link_desc_bank");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002034 }
2035
2036
2037 /* Allocate and setup link descriptor idle list for HW internal use */
2038 entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
2039 total_mem_size = entry_size * total_link_descs;
2040
2041 if (total_mem_size <= max_alloc_size) {
2042 void *desc;
2043
2044 if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302045 WBM_IDLE_LINK, 0, 0, total_link_descs, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302046 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2047 FL("Link desc idle ring setup failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002048 goto fail;
2049 }
2050
Karunakar Dasineni80756372019-05-02 23:49:31 -07002051 qdf_minidump_log(
2052 (void *)(soc->wbm_idle_link_ring.base_vaddr_unaligned),
2053 soc->wbm_idle_link_ring.alloc_size,
2054 "wbm_idle_link_ring");
2055
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002056 hal_srng_access_start_unlocked(soc->hal_soc,
2057 soc->wbm_idle_link_ring.hal_srng);
2058
2059 for (i = 0; i < MAX_LINK_DESC_BANKS &&
2060 soc->link_desc_banks[i].base_paddr; i++) {
2061 uint32_t num_entries = (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07002062 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002063 soc->link_desc_banks[i].base_vaddr) -
2064 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07002065 soc->link_desc_banks[i].base_vaddr_unaligned)))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002066 / link_desc_size;
2067 unsigned long paddr = (unsigned long)(
2068 soc->link_desc_banks[i].base_paddr);
2069
2070 while (num_entries && (desc = hal_srng_src_get_next(
2071 soc->hal_soc,
2072 soc->wbm_idle_link_ring.hal_srng))) {
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002073 hal_set_link_desc_addr(desc,
2074 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002075 num_entries--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002076 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002077 paddr += link_desc_size;
2078 }
2079 }
2080 hal_srng_access_end_unlocked(soc->hal_soc,
2081 soc->wbm_idle_link_ring.hal_srng);
2082 } else {
2083 uint32_t num_scatter_bufs;
2084 uint32_t num_entries_per_buf;
2085 uint32_t rem_entries;
2086 uint8_t *scatter_buf_ptr;
2087 uint16_t scatter_buf_num;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05302088 uint32_t buf_size = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002089
2090 soc->wbm_idle_scatter_buf_size =
2091 hal_idle_list_scatter_buf_size(soc->hal_soc);
2092 num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2093 soc->hal_soc, soc->wbm_idle_scatter_buf_size);
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002094 num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2095 soc->hal_soc, total_mem_size,
2096 soc->wbm_idle_scatter_buf_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002097
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07002098 if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2099 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2100 FL("scatter bufs size out of bounds"));
2101 goto fail;
2102 }
2103
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002104 for (i = 0; i < num_scatter_bufs; i++) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05302105 baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
phadimana1f79822019-02-15 15:02:37 +05302106 if (!dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05302107 buf_size = soc->wbm_idle_scatter_buf_size;
2108 soc->wbm_idle_scatter_buf_base_vaddr[i] =
2109 qdf_mem_alloc_consistent(soc->osdev,
2110 soc->osdev->
2111 dev,
2112 buf_size,
2113 baseaddr);
2114 }
Jeff Johnsona8edf332019-03-18 09:51:52 -07002115 if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302116 QDF_TRACE(QDF_MODULE_ID_DP,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05302117 QDF_TRACE_LEVEL_ERROR,
2118 FL("Scatter lst memory alloc fail"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002119 goto fail;
2120 }
2121 }
2122
2123 /* Populate idle list scatter buffers with link descriptor
2124 * pointers
2125 */
2126 scatter_buf_num = 0;
2127 scatter_buf_ptr = (uint8_t *)(
2128 soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2129 rem_entries = num_entries_per_buf;
2130
2131 for (i = 0; i < MAX_LINK_DESC_BANKS &&
2132 soc->link_desc_banks[i].base_paddr; i++) {
2133 uint32_t num_link_descs =
2134 (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07002135 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002136 soc->link_desc_banks[i].base_vaddr) -
2137 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07002138 soc->link_desc_banks[i].base_vaddr_unaligned)))
2139 / link_desc_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002140 unsigned long paddr = (unsigned long)(
2141 soc->link_desc_banks[i].base_paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002142
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002143 while (num_link_descs) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002144 hal_set_link_desc_addr((void *)scatter_buf_ptr,
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002145 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002146 num_link_descs--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002147 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002148 paddr += link_desc_size;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002149 rem_entries--;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002150 if (rem_entries) {
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002151 scatter_buf_ptr += entry_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002152 } else {
2153 rem_entries = num_entries_per_buf;
2154 scatter_buf_num++;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002155
2156 if (scatter_buf_num >= num_scatter_bufs)
2157 break;
2158
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002159 scatter_buf_ptr = (uint8_t *)(
2160 soc->wbm_idle_scatter_buf_base_vaddr[
2161 scatter_buf_num]);
2162 }
2163 }
2164 }
2165 /* Setup link descriptor idle list in HW */
2166 hal_setup_link_idle_list(soc->hal_soc,
2167 soc->wbm_idle_scatter_buf_base_paddr,
2168 soc->wbm_idle_scatter_buf_base_vaddr,
2169 num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
Leo Chang5ea93a42016-11-03 12:39:49 -07002170 (uint32_t)(scatter_buf_ptr -
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002171 (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2172 scatter_buf_num-1])), total_link_descs);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002173 }
2174 return 0;
2175
2176fail:
2177 if (soc->wbm_idle_link_ring.hal_srng) {
Balamurugan Mahalingam3715aa42018-08-22 02:13:14 +05302178 dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2179 WBM_IDLE_LINK, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002180 }
2181
2182 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2183 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07002184 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002185 soc->wbm_idle_scatter_buf_size,
2186 soc->wbm_idle_scatter_buf_base_vaddr[i],
2187 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08002188 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002189 }
2190 }
2191
2192 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2193 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07002194 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002195 soc->link_desc_banks[i].size,
2196 soc->link_desc_banks[i].base_vaddr_unaligned,
2197 soc->link_desc_banks[i].base_paddr_unaligned,
2198 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08002199 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002200 }
2201 }
2202 return QDF_STATUS_E_FAILURE;
2203}
2204
2205/*
2206 * Free link descriptor pool that was setup HW
2207 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08002208static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002209{
2210 int i;
2211
2212 if (soc->wbm_idle_link_ring.hal_srng) {
Manoj Ekbote525bcab2017-09-01 17:23:32 -07002213 dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002214 WBM_IDLE_LINK, 0);
2215 }
2216
2217 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2218 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07002219 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002220 soc->wbm_idle_scatter_buf_size,
2221 soc->wbm_idle_scatter_buf_base_vaddr[i],
2222 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08002223 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002224 }
2225 }
2226
2227 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2228 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07002229 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002230 soc->link_desc_banks[i].size,
2231 soc->link_desc_banks[i].base_vaddr_unaligned,
2232 soc->link_desc_banks[i].base_paddr_unaligned,
2233 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08002234 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002235 }
2236 }
2237}
2238
Mohit Khanna81179cb2018-08-16 20:50:43 -07002239#ifdef IPA_OFFLOAD
2240#define REO_DST_RING_SIZE_QCA6290 1023
2241#ifndef QCA_WIFI_QCA8074_VP
2242#define REO_DST_RING_SIZE_QCA8074 1023
2243#else
2244#define REO_DST_RING_SIZE_QCA8074 8
2245#endif /* QCA_WIFI_QCA8074_VP */
2246
2247#else
2248
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302249#define REO_DST_RING_SIZE_QCA6290 1024
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05302250#ifndef QCA_WIFI_QCA8074_VP
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302251#define REO_DST_RING_SIZE_QCA8074 2048
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05302252#else
2253#define REO_DST_RING_SIZE_QCA8074 8
Mohit Khanna81179cb2018-08-16 20:50:43 -07002254#endif /* QCA_WIFI_QCA8074_VP */
2255#endif /* IPA_OFFLOAD */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002256
Amir Patelcb990262019-05-28 15:12:48 +05302257#ifndef FEATURE_WDS
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302258static void dp_soc_wds_attach(struct dp_soc *soc)
2259{
2260}
2261
2262static void dp_soc_wds_detach(struct dp_soc *soc)
2263{
2264}
2265#endif
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302266/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302267 * dp_soc_reset_ring_map() - Reset cpu ring map
2268 * @soc: Datapath soc handler
2269 *
2270 * This api resets the default cpu ring map
2271 */
2272
2273static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2274{
2275 uint8_t i;
2276 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2277
2278 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302279 switch (nss_config) {
2280 case dp_nss_cfg_first_radio:
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302281 /*
2282 * Setting Tx ring map for one nss offloaded radio
2283 */
2284 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302285 break;
2286
2287 case dp_nss_cfg_second_radio:
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302288 /*
2289 * Setting Tx ring for two nss offloaded radios
2290 */
2291 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302292 break;
2293
2294 case dp_nss_cfg_dbdc:
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302295 /*
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302296 * Setting Tx ring map for 2 nss offloaded radios
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302297 */
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302298 soc->tx_ring_map[i] =
2299 dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2300 break;
2301
2302 case dp_nss_cfg_dbtc:
2303 /*
2304 * Setting Tx ring map for 3 nss offloaded radios
2305 */
2306 soc->tx_ring_map[i] =
2307 dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2308 break;
2309
2310 default:
2311 dp_err("tx_ring_map failed due to invalid nss cfg");
2312 break;
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302313 }
2314 }
2315}
2316
Aniruddha Paule3a03342017-09-19 16:42:10 +05302317/*
2318 * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2319 * @dp_soc - DP soc handle
2320 * @ring_type - ring type
2321 * @ring_num - ring_num
2322 *
2323 * return 0 or 1
2324 */
2325static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2326{
2327 uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2328 uint8_t status = 0;
2329
2330 switch (ring_type) {
2331 case WBM2SW_RELEASE:
2332 case REO_DST:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002333 case RXDMA_BUF:
Aniruddha Paule3a03342017-09-19 16:42:10 +05302334 status = ((nss_config) & (1 << ring_num));
2335 break;
2336 default:
2337 break;
2338 }
2339
2340 return status;
2341}
2342
2343/*
2344 * dp_soc_reset_intr_mask() - reset interrupt mask
2345 * @dp_soc - DP Soc handle
2346 *
2347 * Return: Return void
2348 */
2349static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2350{
2351 uint8_t j;
2352 int *grp_mask = NULL;
2353 int group_number, mask, num_ring;
2354
2355 /* number of tx ring */
2356 num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2357
2358 /*
2359 * group mask for tx completion ring.
2360 */
2361 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2362
2363 /* loop and reset the mask for only offloaded ring */
2364 for (j = 0; j < num_ring; j++) {
2365 if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2366 continue;
2367 }
2368
2369 /*
2370 * Group number corresponding to tx offloaded ring.
2371 */
2372 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2373 if (group_number < 0) {
2374 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002375 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302376 WBM2SW_RELEASE, j);
2377 return;
2378 }
2379
2380 /* reset the tx mask for offloaded ring */
2381 mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2382 mask &= (~(1 << j));
2383
2384 /*
2385 * reset the interrupt mask for offloaded ring.
2386 */
2387 wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2388 }
2389
2390 /* number of rx rings */
2391 num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2392
2393 /*
2394 * group mask for reo destination ring.
2395 */
2396 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2397
2398 /* loop and reset the mask for only offloaded ring */
2399 for (j = 0; j < num_ring; j++) {
2400 if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2401 continue;
2402 }
2403
2404 /*
2405 * Group number corresponding to rx offloaded ring.
2406 */
2407 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2408 if (group_number < 0) {
2409 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002410 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302411 REO_DST, j);
2412 return;
2413 }
2414
2415 /* set the interrupt mask for offloaded ring */
2416 mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2417 mask &= (~(1 << j));
2418
2419 /*
2420 * set the interrupt mask to zero for rx offloaded radio.
2421 */
2422 wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2423 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002424
2425 /*
2426 * group mask for Rx buffer refill ring
2427 */
2428 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2429
2430 /* loop and reset the mask for only offloaded ring */
2431 for (j = 0; j < MAX_PDEV_CNT; j++) {
2432 if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2433 continue;
2434 }
2435
2436 /*
2437 * Group number corresponding to rx offloaded ring.
2438 */
2439 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2440 if (group_number < 0) {
2441 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2442 FL("ring not part of any group; ring_type: %d,ring_num %d"),
2443 REO_DST, j);
2444 return;
2445 }
2446
2447 /* set the interrupt mask for offloaded ring */
2448 mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2449 group_number);
2450 mask &= (~(1 << j));
2451
2452 /*
2453 * set the interrupt mask to zero for rx offloaded radio.
2454 */
2455 wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2456 group_number, mask);
2457 }
Aniruddha Paule3a03342017-09-19 16:42:10 +05302458}
2459
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302460#ifdef IPA_OFFLOAD
2461/**
2462 * dp_reo_remap_config() - configure reo remap register value based
2463 * nss configuration.
2464 * based on offload_radio value below remap configuration
2465 * get applied.
2466 * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2467 * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2468 * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2469 * 3 - both Radios handled by NSS (remap not required)
2470 * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2471 *
2472 * @remap1: output parameter indicates reo remap 1 register value
2473 * @remap2: output parameter indicates reo remap 2 register value
2474 * Return: bool type, true if remap is configured else false.
2475 */
jiad09526ac2019-04-12 17:42:40 +08002476bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302477{
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302478 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2479 (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2480
2481 *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2482 (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2483
Mohit Khanna81179cb2018-08-16 20:50:43 -07002484 dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2485
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302486 return true;
2487}
2488#else
2489static bool dp_reo_remap_config(struct dp_soc *soc,
2490 uint32_t *remap1,
2491 uint32_t *remap2)
2492{
2493 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2494
2495 switch (offload_radio) {
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302496 case dp_nss_cfg_default:
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302497 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2498 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2499 (0x3 << 18) | (0x4 << 21)) << 8;
2500
2501 *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2502 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2503 (0x3 << 18) | (0x4 << 21)) << 8;
2504 break;
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302505 case dp_nss_cfg_first_radio:
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302506 *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2507 (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2508 (0x2 << 18) | (0x3 << 21)) << 8;
2509
2510 *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2511 (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2512 (0x4 << 18) | (0x2 << 21)) << 8;
2513 break;
2514
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302515 case dp_nss_cfg_second_radio:
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302516 *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2517 (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2518 (0x1 << 18) | (0x3 << 21)) << 8;
2519
2520 *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2521 (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2522 (0x4 << 18) | (0x1 << 21)) << 8;
2523 break;
2524
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302525 case dp_nss_cfg_dbdc:
2526 case dp_nss_cfg_dbtc:
2527 /* return false if both or all are offloaded to NSS */
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302528 return false;
2529 }
Mohit Khanna81179cb2018-08-16 20:50:43 -07002530
2531 dp_debug("remap1 %x remap2 %x offload_radio %u",
2532 *remap1, *remap2, offload_radio);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302533 return true;
2534}
2535#endif
2536
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302537/*
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302538 * dp_reo_frag_dst_set() - configure reo register to set the
2539 * fragment destination ring
2540 * @soc : Datapath soc
2541 * @frag_dst_ring : output parameter to set fragment destination ring
2542 *
2543 * Based on offload_radio below fragment destination rings is selected
2544 * 0 - TCL
2545 * 1 - SW1
2546 * 2 - SW2
2547 * 3 - SW3
2548 * 4 - SW4
2549 * 5 - Release
2550 * 6 - FW
2551 * 7 - alternate select
2552 *
2553 * return: void
2554 */
2555static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2556{
2557 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2558
2559 switch (offload_radio) {
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302560 case dp_nss_cfg_default:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302561 *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2562 break;
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302563 case dp_nss_cfg_dbdc:
2564 case dp_nss_cfg_dbtc:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302565 *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2566 break;
2567 default:
2568 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2569 FL("dp_reo_frag_dst_set invalid offload radio config"));
2570 break;
2571 }
2572}
2573
Krunal Soni03ba0f52019-02-12 11:44:46 -08002574#ifdef ENABLE_VERBOSE_DEBUG
2575static void dp_enable_verbose_debug(struct dp_soc *soc)
2576{
2577 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2578
2579 soc_cfg_ctx = soc->wlan_cfg_ctx;
2580
2581 if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
2582 is_dp_verbose_debug_enabled = true;
Krunal Soni9911b442019-02-22 15:39:03 -08002583
2584 if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
2585 hal_set_verbose_debug(true);
2586 else
2587 hal_set_verbose_debug(false);
Krunal Soni03ba0f52019-02-12 11:44:46 -08002588}
2589#else
2590static void dp_enable_verbose_debug(struct dp_soc *soc)
2591{
2592}
2593#endif
2594
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302595/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002596 * dp_soc_cmn_setup() - Common SoC level initializion
2597 * @soc: Datapath SOC handle
2598 *
2599 * This is an internal function used to setup common SOC data structures,
2600 * to be called from PDEV attach after receiving HW mode capabilities from FW
2601 */
2602static int dp_soc_cmn_setup(struct dp_soc *soc)
2603{
2604 int i;
Dhanashri Atre14049172016-11-11 18:32:36 -08002605 struct hal_reo_params reo_params;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302606 int tx_ring_size;
2607 int tx_comp_ring_size;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302608 int reo_dst_ring_size;
Vivek126db5d2018-07-25 22:05:04 +05302609 uint32_t entries;
2610 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002611
Ravi Joshi86e98262017-03-01 13:47:03 -08002612 if (qdf_atomic_read(&soc->cmn_init_done))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002613 return 0;
2614
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002615 if (dp_hw_link_desc_pool_setup(soc))
2616 goto fail1;
2617
Vivek126db5d2018-07-25 22:05:04 +05302618 soc_cfg_ctx = soc->wlan_cfg_ctx;
Krunal Soni03ba0f52019-02-12 11:44:46 -08002619
2620 dp_enable_verbose_debug(soc);
2621
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002622 /* Setup SRNG rings */
2623 /* Common rings */
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302624 entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
2625
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002626 if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302627 entries, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302628 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2629 FL("dp_srng_setup failed for wbm_desc_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002630 goto fail1;
2631 }
2632
Karunakar Dasineni80756372019-05-02 23:49:31 -07002633 qdf_minidump_log(
2634 (void *)(soc->wbm_desc_rel_ring.base_vaddr_unaligned),
2635 soc->wbm_desc_rel_ring.alloc_size, "wbm_desc_rel_ring");
2636
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302637 soc->num_tcl_data_rings = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002638 /* Tx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302639 if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002640 soc->num_tcl_data_rings =
Vivek126db5d2018-07-25 22:05:04 +05302641 wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302642 tx_comp_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302643 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302644 tx_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302645 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002646 for (i = 0; i < soc->num_tcl_data_rings; i++) {
2647 if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302648 TCL_DATA, i, 0, tx_ring_size, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302649 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002650 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302651 FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002652 goto fail1;
2653 }
Yun Parkfde6b9e2017-06-26 17:13:11 -07002654 /*
2655 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2656 * count
2657 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002658 if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302659 WBM2SW_RELEASE, i, 0,
2660 tx_comp_ring_size,
2661 WLAN_CFG_DST_RING_CACHED_DESC)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302662 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002663 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302664 FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002665 goto fail1;
2666 }
2667 }
2668 } else {
2669 /* This will be incremented during per pdev ring setup */
2670 soc->num_tcl_data_rings = 0;
2671 }
2672
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302673 if (dp_tx_soc_attach(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302674 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2675 FL("dp_tx_soc_attach failed"));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302676 goto fail1;
2677 }
2678
Vivek126db5d2018-07-25 22:05:04 +05302679 entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002680 /* TCL command and status rings */
2681 if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302682 entries, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302683 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2684 FL("dp_srng_setup failed for tcl_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002685 goto fail1;
2686 }
2687
Vivek126db5d2018-07-25 22:05:04 +05302688 entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002689 if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302690 entries, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302691 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2692 FL("dp_srng_setup failed for tcl_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002693 goto fail1;
2694 }
2695
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302696 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002697
2698 /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2699 * descriptors
2700 */
2701
2702 /* Rx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302703 if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002704 soc->num_reo_dest_rings =
Vivek126db5d2018-07-25 22:05:04 +05302705 wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
Dhanashri Atre14049172016-11-11 18:32:36 -08002706 QDF_TRACE(QDF_MODULE_ID_DP,
Aditya Sathishded018e2018-07-02 16:25:21 +05302707 QDF_TRACE_LEVEL_INFO,
2708 FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002709 for (i = 0; i < soc->num_reo_dest_rings; i++) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002710 if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302711 i, 0, reo_dst_ring_size,
2712 WLAN_CFG_DST_RING_CACHED_DESC)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302713 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302714 QDF_TRACE_LEVEL_ERROR,
2715 FL(RNG_ERR "reo_dest_ring [%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002716 goto fail1;
2717 }
2718 }
2719 } else {
2720 /* This will be incremented during per pdev ring setup */
2721 soc->num_reo_dest_rings = 0;
2722 }
2723
Vivek126db5d2018-07-25 22:05:04 +05302724 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002725 /* LMAC RxDMA to SW Rings configuration */
Vivek126db5d2018-07-25 22:05:04 +05302726 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002727 /* Only valid for MCL */
2728 struct dp_pdev *pdev = soc->pdev_list[0];
2729
2730 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2731 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302732 RXDMA_DST, 0, i, entries, 0)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002733 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302734 QDF_TRACE_LEVEL_ERROR,
2735 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002736 goto fail1;
2737 }
2738 }
2739 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002740 /* TBD: call dp_rx_init to setup Rx SW descriptors */
2741
2742 /* REO reinjection ring */
Vivek126db5d2018-07-25 22:05:04 +05302743 entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002744 if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302745 entries, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302746 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302747 FL("dp_srng_setup failed for reo_reinject_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002748 goto fail1;
2749 }
2750
2751
2752 /* Rx release ring */
2753 if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302754 wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx),
2755 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302756 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302757 FL("dp_srng_setup failed for rx_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002758 goto fail1;
2759 }
2760
2761
2762 /* Rx exception ring */
Vivek126db5d2018-07-25 22:05:04 +05302763 entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2764 if (dp_srng_setup(soc, &soc->reo_exception_ring,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302765 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302766 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302767 FL("dp_srng_setup failed for reo_exception_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002768 goto fail1;
2769 }
2770
2771
2772 /* REO command and status rings */
2773 if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302774 wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx),
2775 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302776 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2777 FL("dp_srng_setup failed for reo_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002778 goto fail1;
2779 }
2780
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07002781 hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2782 TAILQ_INIT(&soc->rx.reo_cmd_list);
2783 qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2784
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002785 if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302786 wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx),
2787 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302788 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2789 FL("dp_srng_setup failed for reo_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002790 goto fail1;
2791 }
2792
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302793
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302794 /* Reset the cpu ring map if radio is NSS offloaded */
Vivek126db5d2018-07-25 22:05:04 +05302795 if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302796 dp_soc_reset_cpu_ring_map(soc);
Aniruddha Paule3a03342017-09-19 16:42:10 +05302797 dp_soc_reset_intr_mask(soc);
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302798 }
2799
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002800 /* Setup HW REO */
Dhanashri Atre14049172016-11-11 18:32:36 -08002801 qdf_mem_zero(&reo_params, sizeof(reo_params));
2802
Vivek126db5d2018-07-25 22:05:04 +05302803 if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
Dhanashri Atre14049172016-11-11 18:32:36 -08002804
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302805 /*
2806 * Reo ring remap is not required if both radios
2807 * are offloaded to NSS
2808 */
2809 if (!dp_reo_remap_config(soc,
2810 &reo_params.remap1,
2811 &reo_params.remap2))
2812 goto out;
2813
2814 reo_params.rx_hash_enabled = true;
2815 }
2816
psimhafc2f91b2018-01-10 15:30:03 -08002817 /* setup the global rx defrag waitlist */
2818 TAILQ_INIT(&soc->rx.defrag.waitlist);
2819 soc->rx.defrag.timeout_ms =
Vivek126db5d2018-07-25 22:05:04 +05302820 wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
Karunakar Dasinenif8ec0cb2019-01-29 13:07:05 -08002821 soc->rx.defrag.next_flush_ms = 0;
psimhafc2f91b2018-01-10 15:30:03 -08002822 soc->rx.flags.defrag_timeout_check =
Vivek126db5d2018-07-25 22:05:04 +05302823 wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
Lin Baif1c577e2018-05-22 20:45:42 +08002824 qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
psimhafc2f91b2018-01-10 15:30:03 -08002825
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302826out:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302827 /*
2828 * set the fragment destination ring
2829 */
2830 dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2831
Dhanashri Atre14049172016-11-11 18:32:36 -08002832 hal_reo_setup(soc->hal_soc, &reo_params);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002833
Ravi Joshi86e98262017-03-01 13:47:03 -08002834 qdf_atomic_set(&soc->cmn_init_done, 1);
Amir Patelcb990262019-05-28 15:12:48 +05302835
Pamidipati, Vijayb113bbc2019-01-22 22:06:36 +05302836 dp_soc_wds_attach(soc);
2837
Om Prakash Tripathi12126822017-08-03 10:21:24 +05302838 qdf_nbuf_queue_init(&soc->htt_stats.msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002839 return 0;
2840fail1:
2841 /*
2842 * Cleanup will be done as part of soc_detach, which will
2843 * be called on pdev attach failure
2844 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002845 return QDF_STATUS_E_FAILURE;
2846}
2847
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002848static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002849
Mohit Khanna16816ae2018-10-30 14:12:03 -07002850static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
Dhanashri Atre14049172016-11-11 18:32:36 -08002851{
2852 struct cdp_lro_hash_config lro_hash;
Mohit Khanna16816ae2018-10-30 14:12:03 -07002853 QDF_STATUS status;
Dhanashri Atre14049172016-11-11 18:32:36 -08002854
2855 if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
Mohit Khanna16816ae2018-10-30 14:12:03 -07002856 !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
2857 !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2858 dp_err("LRO, GRO and RX hash disabled");
2859 return QDF_STATUS_E_FAILURE;
Dhanashri Atre14049172016-11-11 18:32:36 -08002860 }
2861
2862 qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2863
Mohit Khanna16816ae2018-10-30 14:12:03 -07002864 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
2865 wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
Dhanashri Atre14049172016-11-11 18:32:36 -08002866 lro_hash.lro_enable = 1;
2867 lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2868 lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
Houston Hoffman41b912c2017-08-30 14:27:51 -07002869 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2870 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
Dhanashri Atre14049172016-11-11 18:32:36 -08002871 }
2872
Houston Hoffman41b912c2017-08-30 14:27:51 -07002873 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
Dhanashri Atre14049172016-11-11 18:32:36 -08002874 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2875 LRO_IPV4_SEED_ARR_SZ));
Dhanashri Atre14049172016-11-11 18:32:36 -08002876 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2877 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2878 LRO_IPV6_SEED_ARR_SZ));
2879
Dhanashri Atre14049172016-11-11 18:32:36 -08002880 qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2881
Mohit Khanna16816ae2018-10-30 14:12:03 -07002882 if (!soc->cdp_soc.ol_ops->lro_hash_config) {
2883 QDF_BUG(0);
2884 dp_err("lro_hash_config not configured");
2885 return QDF_STATUS_E_FAILURE;
2886 }
2887
2888 status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
2889 &lro_hash);
2890 if (!QDF_IS_STATUS_SUCCESS(status)) {
2891 dp_err("failed to send lro_hash_config to FW %u", status);
2892 return status;
2893 }
2894
2895 dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2896 lro_hash.lro_enable, lro_hash.tcp_flag,
2897 lro_hash.tcp_flag_mask);
2898
2899 dp_info("toeplitz_hash_ipv4:");
2900 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2901 (void *)lro_hash.toeplitz_hash_ipv4,
2902 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2903 LRO_IPV4_SEED_ARR_SZ));
2904
2905 dp_info("toeplitz_hash_ipv6:");
2906 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2907 (void *)lro_hash.toeplitz_hash_ipv6,
2908 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2909 LRO_IPV6_SEED_ARR_SZ));
2910
2911 return status;
Dhanashri Atre14049172016-11-11 18:32:36 -08002912}
2913
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002914/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002915* dp_rxdma_ring_setup() - configure the RX DMA rings
2916* @soc: data path SoC handle
2917* @pdev: Physical device handle
2918*
2919* Return: 0 - success, > 0 - failure
2920*/
2921#ifdef QCA_HOST2FW_RXBUF_RING
2922static int dp_rxdma_ring_setup(struct dp_soc *soc,
2923 struct dp_pdev *pdev)
2924{
Vivek126db5d2018-07-25 22:05:04 +05302925 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2926 int max_mac_rings;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002927 int i;
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302928 int ring_size;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002929
Vivek126db5d2018-07-25 22:05:04 +05302930 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2931 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302932 ring_size = wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
Vivek126db5d2018-07-25 22:05:04 +05302933
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002934 for (i = 0; i < max_mac_rings; i++) {
Krunal Sonic96a1162019-02-21 11:33:26 -08002935 dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002936 if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05302937 RXDMA_BUF, 1, i, ring_size, 0)) {
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002938 QDF_TRACE(QDF_MODULE_ID_DP,
2939 QDF_TRACE_LEVEL_ERROR,
2940 FL("failed rx mac ring setup"));
2941 return QDF_STATUS_E_FAILURE;
2942 }
2943 }
2944 return QDF_STATUS_SUCCESS;
2945}
2946#else
2947static int dp_rxdma_ring_setup(struct dp_soc *soc,
2948 struct dp_pdev *pdev)
2949{
2950 return QDF_STATUS_SUCCESS;
2951}
2952#endif
Ishank Jain949674c2017-02-27 17:09:29 +05302953
2954/**
2955 * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2956 * @pdev - DP_PDEV handle
2957 *
2958 * Return: void
2959 */
2960static inline void
2961dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2962{
2963 uint8_t map_id;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05302964 struct dp_soc *soc = pdev->soc;
2965
2966 if (!soc)
2967 return;
2968
Ishank Jain949674c2017-02-27 17:09:29 +05302969 for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05302970 qdf_mem_copy(pdev->dscp_tid_map[map_id],
2971 default_dscp_tid_map,
2972 sizeof(default_dscp_tid_map));
Ishank Jain949674c2017-02-27 17:09:29 +05302973 }
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05302974
2975 for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
2976 hal_tx_set_dscp_tid_map(soc->hal_soc,
2977 default_dscp_tid_map,
2978 map_id);
Ishank Jain949674c2017-02-27 17:09:29 +05302979 }
2980}
2981
Debasis Dasc39a68d2019-01-28 17:02:06 +05302982/**
2983 * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
2984 * @pdev - DP_PDEV handle
2985 *
2986 * Return: void
2987 */
2988static inline void
2989dp_pcp_tid_map_setup(struct dp_pdev *pdev)
2990{
2991 struct dp_soc *soc = pdev->soc;
2992
2993 if (!soc)
2994 return;
2995
2996 qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
2997 sizeof(default_pcp_tid_map));
2998 hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
2999}
3000
Yun Park47e6af82018-01-17 12:15:01 -08003001#ifdef IPA_OFFLOAD
3002/**
3003 * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3004 * @soc: data path instance
3005 * @pdev: core txrx pdev context
3006 *
3007 * Return: QDF_STATUS_SUCCESS: success
3008 * QDF_STATUS_E_RESOURCES: Error return
3009 */
3010static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3011 struct dp_pdev *pdev)
3012{
Vivek126db5d2018-07-25 22:05:04 +05303013 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3014 int entries;
3015
3016 soc_cfg_ctx = soc->wlan_cfg_ctx;
3017 entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3018
Yun Park47e6af82018-01-17 12:15:01 -08003019 /* Setup second Rx refill buffer ring */
3020 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303021 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id, entries, 0)
3022 ) {
Yun Park47e6af82018-01-17 12:15:01 -08003023 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3024 FL("dp_srng_setup failed second rx refill ring"));
3025 return QDF_STATUS_E_FAILURE;
3026 }
3027 return QDF_STATUS_SUCCESS;
3028}
3029
3030/**
3031 * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3032 * @soc: data path instance
3033 * @pdev: core txrx pdev context
3034 *
3035 * Return: void
3036 */
3037static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3038 struct dp_pdev *pdev)
3039{
3040 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3041 IPA_RX_REFILL_BUF_RING_IDX);
3042}
3043
3044#else
Yun Park47e6af82018-01-17 12:15:01 -08003045static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3046 struct dp_pdev *pdev)
3047{
3048 return QDF_STATUS_SUCCESS;
3049}
3050
3051static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3052 struct dp_pdev *pdev)
3053{
3054}
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003055#endif
Yun Park47e6af82018-01-17 12:15:01 -08003056
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003057#if !defined(DISABLE_MON_CONFIG)
3058/**
3059 * dp_mon_rings_setup() - Initialize Monitor rings based on target
3060 * @soc: soc handle
3061 * @pdev: physical device handle
3062 *
3063 * Return: nonzero on failure and zero on success
3064 */
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003065static
3066QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3067{
3068 int mac_id = 0;
3069 int pdev_id = pdev->pdev_id;
Vivek126db5d2018-07-25 22:05:04 +05303070 int entries;
3071 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3072
3073 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003074
3075 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3076 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3077
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003078 if (soc->wlan_cfg_ctx->rxdma1_enable) {
3079 entries =
3080 wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3081 if (dp_srng_setup(soc,
3082 &pdev->rxdma_mon_buf_ring[mac_id],
3083 RXDMA_MONITOR_BUF, 0, mac_for_pdev,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303084 entries, 0)) {
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003085 QDF_TRACE(QDF_MODULE_ID_DP,
3086 QDF_TRACE_LEVEL_ERROR,
3087 FL(RNG_ERR "rxdma_mon_buf_ring "));
3088 return QDF_STATUS_E_NOMEM;
3089 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003090
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003091 entries =
3092 wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3093 if (dp_srng_setup(soc,
3094 &pdev->rxdma_mon_dst_ring[mac_id],
3095 RXDMA_MONITOR_DST, 0, mac_for_pdev,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303096 entries, 0)) {
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003097 QDF_TRACE(QDF_MODULE_ID_DP,
3098 QDF_TRACE_LEVEL_ERROR,
3099 FL(RNG_ERR "rxdma_mon_dst_ring"));
3100 return QDF_STATUS_E_NOMEM;
3101 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003102
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003103 entries =
3104 wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3105 if (dp_srng_setup(soc,
3106 &pdev->rxdma_mon_status_ring[mac_id],
3107 RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303108 entries, 0)) {
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003109 QDF_TRACE(QDF_MODULE_ID_DP,
3110 QDF_TRACE_LEVEL_ERROR,
3111 FL(RNG_ERR "rxdma_mon_status_ring"));
3112 return QDF_STATUS_E_NOMEM;
3113 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003114
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003115 entries =
3116 wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3117 if (dp_srng_setup(soc,
3118 &pdev->rxdma_mon_desc_ring[mac_id],
3119 RXDMA_MONITOR_DESC, 0, mac_for_pdev,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303120 entries, 0)) {
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003121 QDF_TRACE(QDF_MODULE_ID_DP,
3122 QDF_TRACE_LEVEL_ERROR,
3123 FL(RNG_ERR "rxdma_mon_desc_ring"));
3124 return QDF_STATUS_E_NOMEM;
3125 }
3126 } else {
3127 entries =
3128 wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3129 if (dp_srng_setup(soc,
3130 &pdev->rxdma_mon_status_ring[mac_id],
3131 RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303132 entries, 0)) {
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003133 QDF_TRACE(QDF_MODULE_ID_DP,
3134 QDF_TRACE_LEVEL_ERROR,
3135 FL(RNG_ERR "rxdma_mon_status_ring"));
3136 return QDF_STATUS_E_NOMEM;
3137 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003138 }
3139 }
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003140
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003141 return QDF_STATUS_SUCCESS;
3142}
3143#else
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003144static
3145QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003146{
3147 return QDF_STATUS_SUCCESS;
3148}
Yun Park47e6af82018-01-17 12:15:01 -08003149#endif
3150
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303151/*dp_iterate_update_peer_list - update peer stats on cal client timer
3152 * @pdev_hdl: pdev handle
3153 */
3154#ifdef ATH_SUPPORT_EXT_STAT
3155void dp_iterate_update_peer_list(void *pdev_hdl)
3156{
3157 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
Amir Patelee49ad52018-12-18 13:23:36 +05303158 struct dp_soc *soc = pdev->soc;
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303159 struct dp_vdev *vdev = NULL;
3160 struct dp_peer *peer = NULL;
3161
Amir Patel594a3d02018-12-27 12:43:45 +05303162 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Amir Patel17b91782019-01-08 12:17:15 +05303163 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303164 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3165 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3166 dp_cal_client_update_peer_stats(&peer->stats);
3167 }
3168 }
Amir Patelee49ad52018-12-18 13:23:36 +05303169 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Amir Patel17b91782019-01-08 12:17:15 +05303170 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303171}
3172#else
3173void dp_iterate_update_peer_list(void *pdev_hdl)
3174{
3175}
3176#endif
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05303177
3178/*
3179 * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3180 * @pdev: Datapath PDEV handle
3181 *
3182 * Return: QDF_STATUS_SUCCESS: Success
3183 * QDF_STATUS_E_NOMEM: Error
3184 */
3185static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3186{
3187 pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3188
3189 if (!pdev->ppdu_tlv_buf) {
3190 QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3191 return QDF_STATUS_E_NOMEM;
3192 }
3193
3194 return QDF_STATUS_SUCCESS;
3195}
3196
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003197/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003198* dp_pdev_attach_wifi3() - attach txrx pdev
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05303199* @ctrl_pdev: Opaque PDEV object
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003200* @txrx_soc: Datapath SOC handle
3201* @htc_handle: HTC handle for host-target interface
3202* @qdf_osdev: QDF OS device
3203* @pdev_id: PDEV ID
3204*
3205* Return: DP PDEV handle on success, NULL on failure
3206*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003207static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05303208 struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -07003209 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003210{
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303211 int ring_size;
Vivek126db5d2018-07-25 22:05:04 +05303212 int entries;
3213 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3214 int nss_cfg;
Amir Patelac7d9462019-03-28 16:16:01 +05303215 void *sojourn_buf;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05303216
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003217 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303218 struct dp_pdev *pdev = NULL;
3219
phadimana1f79822019-02-15 15:02:37 +05303220 if (dp_is_soc_reinit(soc))
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303221 pdev = soc->pdev_list[pdev_id];
3222 else
3223 pdev = qdf_mem_malloc(sizeof(*pdev));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003224
3225 if (!pdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303226 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3227 FL("DP PDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003228 goto fail0;
3229 }
Karunakar Dasineni80756372019-05-02 23:49:31 -07003230 qdf_minidump_log((void *)pdev, sizeof(*pdev), "dp_pdev");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003231
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303232 /*
3233 * Variable to prevent double pdev deinitialization during
3234 * radio detach execution .i.e. in the absence of any vdev.
3235 */
3236 pdev->pdev_deinit = 0;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +05303237 pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3238
3239 if (!pdev->invalid_peer) {
3240 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3241 FL("Invalid peer memory allocation failed"));
3242 qdf_mem_free(pdev);
3243 goto fail0;
3244 }
3245
Vivek126db5d2018-07-25 22:05:04 +05303246 soc_cfg_ctx = soc->wlan_cfg_ctx;
3247 pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303248
3249 if (!pdev->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303250 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3251 FL("pdev cfg_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303252
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +05303253 qdf_mem_free(pdev->invalid_peer);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303254 qdf_mem_free(pdev);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303255 goto fail0;
3256 }
3257
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303258 /*
3259 * set nss pdev config based on soc config
3260 */
Vivek126db5d2018-07-25 22:05:04 +05303261 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303262 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
Vivek126db5d2018-07-25 22:05:04 +05303263 (nss_cfg & (1 << pdev_id)));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303264
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003265 pdev->soc = soc;
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05303266 pdev->ctrl_pdev = ctrl_pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003267 pdev->pdev_id = pdev_id;
3268 soc->pdev_list[pdev_id] = pdev;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303269
3270 pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08003271 soc->pdev_count++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003272
3273 TAILQ_INIT(&pdev->vdev_list);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303274 qdf_spinlock_create(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003275 pdev->vdev_count = 0;
3276
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05303277 qdf_spinlock_create(&pdev->tx_mutex);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303278 qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3279 TAILQ_INIT(&pdev->neighbour_peers_list);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05303280 pdev->neighbour_peers_added = false;
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05303281 pdev->monitor_configured = false;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303282
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003283 if (dp_soc_cmn_setup(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303284 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3285 FL("dp_soc_cmn_setup failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303286 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003287 }
3288
3289 /* Setup per PDEV TCL rings if configured */
3290 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303291 ring_size =
Vivek126db5d2018-07-25 22:05:04 +05303292 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05303293
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003294 if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303295 pdev_id, pdev_id, ring_size, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303296 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3297 FL("dp_srng_setup failed for tcl_data_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303298 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003299 }
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303300
3301 ring_size =
3302 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3303
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003304 if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303305 WBM2SW_RELEASE, pdev_id, pdev_id,
3306 ring_size, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303307 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3308 FL("dp_srng_setup failed for tx_comp_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303309 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003310 }
3311 soc->num_tcl_data_rings++;
3312 }
3313
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303314 /* Tx specific init */
3315 if (dp_tx_pdev_attach(pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303316 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3317 FL("dp_tx_pdev_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303318 goto fail1;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303319 }
3320
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303321 ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003322 /* Setup per PDEV REO rings if configured */
Vivek126db5d2018-07-25 22:05:04 +05303323 if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003324 if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303325 pdev_id, pdev_id, ring_size, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303326 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3327 FL("dp_srng_setup failed for reo_dest_ringn"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303328 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003329 }
3330 soc->num_reo_dest_rings++;
3331
3332 }
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303333
3334 ring_size =
3335 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc->wlan_cfg_ctx);
3336
Dhanashri Atre7351d172016-10-12 13:08:09 -07003337 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303338 ring_size, 0)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303339 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3340 FL("dp_srng_setup failed rx refill ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303341 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003342 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003343
3344 if (dp_rxdma_ring_setup(soc, pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303345 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003346 FL("RXDMA ring config failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303347 goto fail1;
Dhanashri Atre7351d172016-10-12 13:08:09 -07003348 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003349
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003350 if (dp_mon_rings_setup(soc, pdev)) {
3351 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3352 FL("MONITOR rings setup failed"));
3353 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08003354 }
3355
Vivek126db5d2018-07-25 22:05:04 +05303356 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003357 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3358 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
Chaithanya Garrepalliab234e52019-05-28 12:10:49 +05303359 0, pdev_id, entries, 0)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003360 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05303361 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003362 goto fail1;
3363 }
Pramod Simhae382ff82017-06-05 18:09:26 -07003364 }
3365
Yun Park47e6af82018-01-17 12:15:01 -08003366 if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
Yun Park601d0d82017-08-28 21:49:31 -07003367 goto fail1;
Yun Park601d0d82017-08-28 21:49:31 -07003368
Yun Parkfde6b9e2017-06-26 17:13:11 -07003369 if (dp_ipa_ring_resource_setup(soc, pdev))
3370 goto fail1;
3371
3372 if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
Yun Park601d0d82017-08-28 21:49:31 -07003373 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3374 FL("dp_ipa_uc_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07003375 goto fail1;
3376 }
3377
Leo Chang5ea93a42016-11-03 12:39:49 -07003378 /* Rx specific init */
3379 if (dp_rx_pdev_attach(pdev)) {
Yun Parkfde6b9e2017-06-26 17:13:11 -07003380 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003381 FL("dp_rx_pdev_attach failed"));
3382 goto fail1;
Leo Chang5ea93a42016-11-03 12:39:49 -07003383 }
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003384
Ishank Jainbc2d91f2017-01-03 18:14:54 +05303385 DP_STATS_INIT(pdev);
Leo Chang5ea93a42016-11-03 12:39:49 -07003386
nobeljd124b742017-10-16 11:59:12 -07003387 /* Monitor filter init */
3388 pdev->mon_filter_mode = MON_FILTER_ALL;
3389 pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3390 pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3391 pdev->fp_data_filter = FILTER_DATA_ALL;
3392 pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3393 pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3394 pdev->mo_data_filter = FILTER_DATA_ALL;
3395
Leo Chang5ea93a42016-11-03 12:39:49 -07003396 dp_local_peer_id_pool_init(pdev);
Sravan Kumar Kairamf1e07662018-06-18 21:36:14 +05303397
Ishank Jain949674c2017-02-27 17:09:29 +05303398 dp_dscp_tid_map_setup(pdev);
Debasis Dasc39a68d2019-01-28 17:02:06 +05303399 dp_pcp_tid_map_setup(pdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003400
Kai Chen6eca1a62017-01-12 10:17:53 -08003401 /* Rx monitor mode specific init */
3402 if (dp_rx_pdev_mon_attach(pdev)) {
3403 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003404 "dp_rx_pdev_mon_attach failed");
Keyur Parekhfad6d082017-05-07 08:54:47 -07003405 goto fail1;
3406 }
3407
3408 if (dp_wdi_event_attach(pdev)) {
3409 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303410 "dp_wdi_evet_attach failed");
Keyur Parekhfad6d082017-05-07 08:54:47 -07003411 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08003412 }
3413
Om Prakash Tripathia7fb93f2017-06-27 18:41:41 +05303414 /* set the reo destination during initialization */
3415 pdev->reo_dest = pdev->pdev_id + 1;
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05303416
Anish Natarajb9e7d012018-02-16 00:38:10 +05303417 /*
3418 * initialize ppdu tlv list
3419 */
3420 TAILQ_INIT(&pdev->ppdu_info_list);
3421 pdev->tlv_count = 0;
3422 pdev->list_depth = 0;
3423
Ruchi, Agrawal2cbca3b2018-06-20 19:31:03 +05303424 qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3425
3426 pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3427 sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3428 TRUE);
3429
Amir Patelac7d9462019-03-28 16:16:01 +05303430 if (pdev->sojourn_buf) {
3431 sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
3432 qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
3433 }
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303434 /* initlialize cal client timer */
3435 dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3436 &dp_iterate_update_peer_list);
Amir Patel1ea85d42019-01-09 15:19:10 +05303437 qdf_event_create(&pdev->fw_peer_stats_event);
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303438
Shashikala Prabhu550e69c2019-03-13 17:41:17 +05303439 pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05303440
3441 if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
3442 goto fail1;
3443
nobeljdebe2b32019-04-23 11:18:47 -07003444 dp_tx_ppdu_stats_attach(pdev);
Shashikala Prabhu550e69c2019-03-13 17:41:17 +05303445
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003446 return (struct cdp_pdev *)pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003447
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303448fail1:
Varun Reddy Yeturu23fbb872019-05-02 22:37:55 -07003449 if (pdev->invalid_peer)
3450 qdf_mem_free(pdev->invalid_peer);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303451 dp_pdev_detach((struct cdp_pdev *)pdev, 0);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303452
3453fail0:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003454 return NULL;
3455}
3456
3457/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003458* dp_rxdma_ring_cleanup() - configure the RX DMA rings
3459* @soc: data path SoC handle
3460* @pdev: Physical device handle
3461*
3462* Return: void
3463*/
3464#ifdef QCA_HOST2FW_RXBUF_RING
3465static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3466 struct dp_pdev *pdev)
3467{
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003468 int i;
3469
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003470 for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3471 dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3472 RXDMA_BUF, 1);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003473
3474 qdf_timer_free(&soc->mon_reap_timer);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003475}
3476#else
3477static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3478 struct dp_pdev *pdev)
3479{
3480}
3481#endif
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303482
3483/*
3484 * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3485 * @pdev: device object
3486 *
3487 * Return: void
3488 */
3489static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3490{
3491 struct dp_neighbour_peer *peer = NULL;
3492 struct dp_neighbour_peer *temp_peer = NULL;
3493
3494 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3495 neighbour_peer_list_elem, temp_peer) {
3496 /* delete this peer from the list */
3497 TAILQ_REMOVE(&pdev->neighbour_peers_list,
3498 peer, neighbour_peer_list_elem);
3499 qdf_mem_free(peer);
3500 }
3501
3502 qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3503}
3504
Anish Natarajcf526b72018-03-26 15:55:30 +05303505/**
3506* dp_htt_ppdu_stats_detach() - detach stats resources
3507* @pdev: Datapath PDEV handle
3508*
3509* Return: void
3510*/
3511static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3512{
3513 struct ppdu_info *ppdu_info, *ppdu_info_next;
3514
3515 TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3516 ppdu_info_list_elem, ppdu_info_next) {
3517 if (!ppdu_info)
3518 break;
3519 qdf_assert_always(ppdu_info->nbuf);
3520 qdf_nbuf_free(ppdu_info->nbuf);
3521 qdf_mem_free(ppdu_info);
3522 }
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05303523
3524 if (pdev->ppdu_tlv_buf)
3525 qdf_mem_free(pdev->ppdu_tlv_buf);
3526
Anish Natarajcf526b72018-03-26 15:55:30 +05303527}
3528
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003529#if !defined(DISABLE_MON_CONFIG)
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303530
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003531static
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303532void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3533 int mac_id)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003534{
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003535 if (soc->wlan_cfg_ctx->rxdma1_enable) {
3536 dp_srng_cleanup(soc,
3537 &pdev->rxdma_mon_buf_ring[mac_id],
3538 RXDMA_MONITOR_BUF, 0);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003539
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003540 dp_srng_cleanup(soc,
3541 &pdev->rxdma_mon_dst_ring[mac_id],
3542 RXDMA_MONITOR_DST, 0);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003543
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003544 dp_srng_cleanup(soc,
3545 &pdev->rxdma_mon_status_ring[mac_id],
3546 RXDMA_MONITOR_STATUS, 0);
3547
3548 dp_srng_cleanup(soc,
3549 &pdev->rxdma_mon_desc_ring[mac_id],
3550 RXDMA_MONITOR_DESC, 0);
3551
3552 dp_srng_cleanup(soc,
3553 &pdev->rxdma_err_dst_ring[mac_id],
3554 RXDMA_DST, 0);
3555 } else {
3556 dp_srng_cleanup(soc,
3557 &pdev->rxdma_mon_status_ring[mac_id],
3558 RXDMA_MONITOR_STATUS, 0);
3559
3560 dp_srng_cleanup(soc,
3561 &pdev->rxdma_err_dst_ring[mac_id],
3562 RXDMA_DST, 0);
3563 }
3564
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003565}
3566#else
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303567static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3568 int mac_id)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003569{
3570}
3571#endif
3572
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303573/**
3574 * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3575 *
3576 * @soc: soc handle
3577 * @pdev: datapath physical dev handle
3578 * @mac_id: mac number
3579 *
3580 * Return: None
3581 */
3582static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3583 int mac_id)
3584{
3585}
3586
3587/**
3588 * dp_pdev_mem_reset() - Reset txrx pdev memory
3589 * @pdev: dp pdev handle
3590 *
3591 * Return: None
3592 */
3593static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3594{
3595 uint16_t len = 0;
3596 uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3597
3598 len = sizeof(struct dp_pdev) -
3599 offsetof(struct dp_pdev, pdev_deinit) -
3600 sizeof(pdev->pdev_deinit);
3601 dp_pdev_offset = dp_pdev_offset +
3602 offsetof(struct dp_pdev, pdev_deinit) +
3603 sizeof(pdev->pdev_deinit);
3604
3605 qdf_mem_zero(dp_pdev_offset, len);
3606}
3607
3608/**
3609 * dp_pdev_deinit() - Deinit txrx pdev
3610 * @txrx_pdev: Datapath PDEV handle
3611 * @force: Force deinit
3612 *
3613 * Return: None
3614 */
3615static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003616{
3617 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3618 struct dp_soc *soc = pdev->soc;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303619 qdf_nbuf_t curr_nbuf, next_nbuf;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003620 int mac_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003621
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303622 /*
3623 * Prevent double pdev deinitialization during radio detach
3624 * execution .i.e. in the absence of any vdev
3625 */
3626 if (pdev->pdev_deinit)
3627 return;
3628
3629 pdev->pdev_deinit = 1;
3630
Keyur Parekhfad6d082017-05-07 08:54:47 -07003631 dp_wdi_event_detach(pdev);
3632
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303633 dp_tx_pdev_detach(pdev);
3634
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003635 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303636 dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3637 TCL_DATA, pdev->pdev_id);
3638 dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3639 WBM2SW_RELEASE, pdev->pdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003640 }
3641
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003642 dp_pktlogmod_exit(pdev);
3643
Leo Chang5ea93a42016-11-03 12:39:49 -07003644 dp_rx_pdev_detach(pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08003645 dp_rx_pdev_mon_detach(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303646 dp_neighbour_peers_detach(pdev);
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05303647 qdf_spinlock_destroy(&pdev->tx_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303648 qdf_spinlock_destroy(&pdev->vdev_list_lock);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303649
Yun Parkfde6b9e2017-06-26 17:13:11 -07003650 dp_ipa_uc_detach(soc, pdev);
3651
Yun Park47e6af82018-01-17 12:15:01 -08003652 dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
Yun Park601d0d82017-08-28 21:49:31 -07003653
Yun Parkfde6b9e2017-06-26 17:13:11 -07003654 /* Cleanup per PDEV REO rings if configured */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003655 if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303656 dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3657 REO_DST, pdev->pdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003658 }
3659
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303660 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003661
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003662 dp_rxdma_ring_cleanup(soc, pdev);
3663
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003664 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003665 dp_mon_ring_deinit(soc, pdev, mac_id);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303666 dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3667 RXDMA_DST, 0);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003668 }
Pramod Simhae382ff82017-06-05 18:09:26 -07003669
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303670 curr_nbuf = pdev->invalid_peer_head_msdu;
3671 while (curr_nbuf) {
3672 next_nbuf = qdf_nbuf_next(curr_nbuf);
3673 qdf_nbuf_free(curr_nbuf);
3674 curr_nbuf = next_nbuf;
3675 }
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303676 pdev->invalid_peer_head_msdu = NULL;
3677 pdev->invalid_peer_tail_msdu = NULL;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303678
Anish Natarajcf526b72018-03-26 15:55:30 +05303679 dp_htt_ppdu_stats_detach(pdev);
3680
Ruchi, Agrawal2cbca3b2018-06-20 19:31:03 +05303681 qdf_nbuf_free(pdev->sojourn_buf);
3682
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303683 dp_cal_client_detach(&pdev->cal_client_ctx);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303684
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08003685 soc->pdev_count--;
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003686 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
Varun Reddy Yeturu23fbb872019-05-02 22:37:55 -07003687 if (pdev->invalid_peer)
3688 qdf_mem_free(pdev->invalid_peer);
Santosh Anbu2280e862018-01-03 22:25:53 +05303689 qdf_mem_free(pdev->dp_txrx_handle);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303690 dp_pdev_mem_reset(pdev);
3691}
3692
3693/**
3694 * dp_pdev_deinit_wifi3() - Deinit txrx pdev
3695 * @txrx_pdev: Datapath PDEV handle
3696 * @force: Force deinit
3697 *
3698 * Return: None
3699 */
3700static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
3701{
phadiman449a2682019-02-20 14:00:00 +05303702 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3703 struct dp_soc *soc = pdev->soc;
3704
3705 soc->dp_soc_reinit = TRUE;
3706
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303707 dp_pdev_deinit(txrx_pdev, force);
3708}
3709
3710/*
3711 * dp_pdev_detach() - Complete rest of pdev detach
3712 * @txrx_pdev: Datapath PDEV handle
3713 * @force: Force deinit
3714 *
3715 * Return: None
3716 */
3717static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
3718{
3719 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3720 struct dp_soc *soc = pdev->soc;
phadiman7dd261d2019-03-15 01:48:50 +05303721 struct rx_desc_pool *rx_desc_pool;
3722 int mac_id, mac_for_pdev;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303723
3724 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3725 dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3726 TCL_DATA, pdev->pdev_id);
3727 dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3728 WBM2SW_RELEASE, pdev->pdev_id);
3729 }
3730
3731 dp_mon_link_free(pdev);
3732
nobeljdebe2b32019-04-23 11:18:47 -07003733 dp_tx_ppdu_stats_detach(pdev);
3734
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303735 /* Cleanup per PDEV REO rings if configured */
3736 if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3737 dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3738 REO_DST, pdev->pdev_id);
3739 }
Varun Reddy Yeturu23fbb872019-05-02 22:37:55 -07003740 dp_rxdma_ring_cleanup(soc, pdev);
3741 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303742
3743 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
Varun Reddy Yeturu23fbb872019-05-02 22:37:55 -07003744 dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303745
3746 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3747 dp_mon_ring_cleanup(soc, pdev, mac_id);
3748 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3749 RXDMA_DST, 0);
phadiman7dd261d2019-03-15 01:48:50 +05303750 if (dp_is_soc_reinit(soc)) {
3751 mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
3752 pdev->pdev_id);
3753 rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07003754 dp_rx_desc_pool_free(soc, rx_desc_pool);
phadiman7dd261d2019-03-15 01:48:50 +05303755 rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev];
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07003756 dp_rx_desc_pool_free(soc, rx_desc_pool);
phadiman7dd261d2019-03-15 01:48:50 +05303757 }
3758 }
3759
3760 if (dp_is_soc_reinit(soc)) {
3761 rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -07003762 dp_rx_desc_pool_free(soc, rx_desc_pool);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303763 }
3764
3765 soc->pdev_list[pdev->pdev_id] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003766 qdf_mem_free(pdev);
3767}
3768
3769/*
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303770 * dp_pdev_detach_wifi3() - detach txrx pdev
3771 * @txrx_pdev: Datapath PDEV handle
3772 * @force: Force detach
3773 *
3774 * Return: None
3775 */
3776static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3777{
3778 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3779 struct dp_soc *soc = pdev->soc;
3780
phadimana1f79822019-02-15 15:02:37 +05303781 if (dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303782 dp_pdev_detach(txrx_pdev, force);
3783 } else {
3784 dp_pdev_deinit(txrx_pdev, force);
3785 dp_pdev_detach(txrx_pdev, force);
3786 }
3787}
3788
3789/*
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003790 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3791 * @soc: DP SOC handle
3792 */
3793static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3794{
3795 struct reo_desc_list_node *desc;
3796 struct dp_rx_tid *rx_tid;
3797
3798 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3799 while (qdf_list_remove_front(&soc->reo_desc_freelist,
3800 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3801 rx_tid = &desc->rx_tid;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003802 qdf_mem_unmap_nbytes_single(soc->osdev,
Pramod Simha6b23f752017-03-30 11:54:18 -07003803 rx_tid->hw_qdesc_paddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003804 QDF_DMA_BIDIRECTIONAL,
3805 rx_tid->hw_qdesc_alloc_size);
3806 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003807 qdf_mem_free(desc);
3808 }
3809 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3810 qdf_list_destroy(&soc->reo_desc_freelist);
3811 qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3812}
3813
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303814/**
3815 * dp_soc_mem_reset() - Reset Dp Soc memory
3816 * @soc: DP handle
3817 *
3818 * Return: None
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003819 */
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303820static void dp_soc_mem_reset(struct dp_soc *soc)
3821{
3822 uint16_t len = 0;
3823 uint8_t *dp_soc_offset = (uint8_t *)soc;
3824
3825 len = sizeof(struct dp_soc) -
3826 offsetof(struct dp_soc, dp_soc_reinit) -
3827 sizeof(soc->dp_soc_reinit);
3828 dp_soc_offset = dp_soc_offset +
3829 offsetof(struct dp_soc, dp_soc_reinit) +
3830 sizeof(soc->dp_soc_reinit);
3831
3832 qdf_mem_zero(dp_soc_offset, len);
3833}
3834
3835/**
3836 * dp_soc_deinit() - Deinitialize txrx SOC
3837 * @txrx_soc: Opaque DP SOC handle
3838 *
3839 * Return: None
3840 */
3841static void dp_soc_deinit(void *txrx_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003842{
3843 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003844 int i;
3845
Ravi Joshi86e98262017-03-01 13:47:03 -08003846 qdf_atomic_set(&soc->cmn_init_done, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003847
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303848 for (i = 0; i < MAX_PDEV_CNT; i++) {
3849 if (soc->pdev_list[i])
3850 dp_pdev_deinit((struct cdp_pdev *)
3851 soc->pdev_list[i], 1);
3852 }
3853
Dustin Brownf653d162017-09-19 11:29:41 -07003854 qdf_flush_work(&soc->htt_stats.work);
3855 qdf_disable_work(&soc->htt_stats.work);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303856
3857 /* Free pending htt stats messages */
3858 qdf_nbuf_queue_free(&soc->htt_stats.msg);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303859
Chaithanya Garrepalli291dfa02018-10-12 17:11:34 +05303860 dp_reo_cmdlist_destroy(soc);
3861
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303862 dp_peer_find_detach(soc);
3863
3864 /* Free the ring memories */
3865 /* Common rings */
3866 dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3867
3868 /* Tx data rings */
3869 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3870 for (i = 0; i < soc->num_tcl_data_rings; i++) {
3871 dp_srng_deinit(soc, &soc->tcl_data_ring[i],
3872 TCL_DATA, i);
3873 dp_srng_deinit(soc, &soc->tx_comp_ring[i],
3874 WBM2SW_RELEASE, i);
3875 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003876 }
3877
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303878 /* TCL command and status rings */
3879 dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3880 dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3881
3882 /* Rx data rings */
3883 if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3884 soc->num_reo_dest_rings =
3885 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3886 for (i = 0; i < soc->num_reo_dest_rings; i++) {
3887 /* TODO: Get number of rings and ring sizes
3888 * from wlan_cfg
3889 */
3890 dp_srng_deinit(soc, &soc->reo_dest_ring[i],
3891 REO_DST, i);
3892 }
3893 }
3894 /* REO reinjection ring */
3895 dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3896
3897 /* Rx release ring */
3898 dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3899
3900 /* Rx exception ring */
3901 /* TODO: Better to store ring_type and ring_num in
3902 * dp_srng during setup
3903 */
3904 dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3905
3906 /* REO command and status rings */
3907 dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3908 dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3909
Pamidipati, Vijay8a4c03a2018-12-08 12:52:38 +05303910 dp_soc_wds_detach(soc);
3911
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303912 qdf_spinlock_destroy(&soc->peer_ref_mutex);
3913 qdf_spinlock_destroy(&soc->htt_stats.lock);
3914
Mohit Khanna40f76b52018-11-30 14:10:55 -08003915 htt_soc_htc_dealloc(soc->htt_handle);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303916
3917 qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3918
3919 dp_reo_cmdlist_destroy(soc);
3920 qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3921 dp_reo_desc_freelist_destroy(soc);
3922
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303923 qdf_spinlock_destroy(&soc->ast_lock);
3924
3925 dp_soc_mem_reset(soc);
3926}
3927
3928/**
3929 * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
3930 * @txrx_soc: Opaque DP SOC handle
3931 *
3932 * Return: None
3933 */
3934static void dp_soc_deinit_wifi3(void *txrx_soc)
3935{
3936 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3937
3938 soc->dp_soc_reinit = 1;
3939 dp_soc_deinit(txrx_soc);
3940}
3941
3942/*
3943 * dp_soc_detach() - Detach rest of txrx SOC
3944 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3945 *
3946 * Return: None
3947 */
3948static void dp_soc_detach(void *txrx_soc)
3949{
3950 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3951 int i;
3952
3953 qdf_atomic_set(&soc->cmn_init_done, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003954
3955 /* TBD: Call Tx and Rx cleanup functions to free buffers and
3956 * SW descriptors
3957 */
3958
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303959 for (i = 0; i < MAX_PDEV_CNT; i++) {
3960 if (soc->pdev_list[i])
3961 dp_pdev_detach((struct cdp_pdev *)
3962 soc->pdev_list[i], 1);
3963 }
3964
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003965 /* Free the ring memories */
3966 /* Common rings */
3967 dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3968
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003969 dp_tx_soc_detach(soc);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303970
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003971 /* Tx data rings */
3972 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3973 for (i = 0; i < soc->num_tcl_data_rings; i++) {
3974 dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
3975 TCL_DATA, i);
3976 dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
3977 WBM2SW_RELEASE, i);
3978 }
3979 }
3980
3981 /* TCL command and status rings */
3982 dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3983 dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3984
3985 /* Rx data rings */
3986 if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3987 soc->num_reo_dest_rings =
3988 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3989 for (i = 0; i < soc->num_reo_dest_rings; i++) {
3990 /* TODO: Get number of rings and ring sizes
3991 * from wlan_cfg
3992 */
3993 dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
3994 REO_DST, i);
3995 }
3996 }
3997 /* REO reinjection ring */
3998 dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3999
4000 /* Rx release ring */
4001 dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
Varun Reddy Yeturu23fbb872019-05-02 22:37:55 -07004002 dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004003
4004 /* Rx exception ring */
4005 /* TODO: Better to store ring_type and ring_num in
4006 * dp_srng during setup
4007 */
4008 dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4009
4010 /* REO command and status rings */
4011 dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4012 dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -07004013 dp_hw_link_desc_pool_cleanup(soc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08004014
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004015 htt_soc_detach(soc->htt_handle);
Mohit Khanna40f76b52018-11-30 14:10:55 -08004016 soc->dp_soc_reinit = 0;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08004017
Manikandan Mohanb01696b2017-05-09 18:03:19 -07004018 wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05304019
Venkata Sharath Chandra Manchala65bf2302017-03-09 17:28:56 -08004020 qdf_mem_free(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004021}
4022
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304023/*
4024 * dp_soc_detach_wifi3() - Detach txrx SOC
4025 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4026 *
4027 * Return: None
4028 */
4029static void dp_soc_detach_wifi3(void *txrx_soc)
4030{
4031 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4032
phadimana1f79822019-02-15 15:02:37 +05304033 if (dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304034 dp_soc_detach(txrx_soc);
4035 } else {
4036 dp_soc_deinit(txrx_soc);
4037 dp_soc_detach(txrx_soc);
4038 }
4039
4040}
4041
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004042#if !defined(DISABLE_MON_CONFIG)
4043/**
4044 * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4045 * @soc: soc handle
4046 * @pdev: physical device handle
4047 * @mac_id: ring number
4048 * @mac_for_pdev: mac_id
4049 *
4050 * Return: non-zero for failure, zero for success
4051 */
4052static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4053 struct dp_pdev *pdev,
4054 int mac_id,
4055 int mac_for_pdev)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004056{
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004057 QDF_STATUS status = QDF_STATUS_SUCCESS;
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004058
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004059 if (soc->wlan_cfg_ctx->rxdma1_enable) {
4060 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4061 pdev->rxdma_mon_buf_ring[mac_id]
4062 .hal_srng,
4063 RXDMA_MONITOR_BUF);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004064
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004065 if (status != QDF_STATUS_SUCCESS) {
4066 dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4067 return status;
4068 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004069
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004070 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4071 pdev->rxdma_mon_dst_ring[mac_id]
4072 .hal_srng,
4073 RXDMA_MONITOR_DST);
4074
4075 if (status != QDF_STATUS_SUCCESS) {
4076 dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4077 return status;
4078 }
4079
4080 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4081 pdev->rxdma_mon_status_ring[mac_id]
4082 .hal_srng,
4083 RXDMA_MONITOR_STATUS);
4084
4085 if (status != QDF_STATUS_SUCCESS) {
4086 dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4087 return status;
4088 }
4089
4090 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4091 pdev->rxdma_mon_desc_ring[mac_id]
4092 .hal_srng,
4093 RXDMA_MONITOR_DESC);
4094
4095 if (status != QDF_STATUS_SUCCESS) {
4096 dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4097 return status;
4098 }
4099 } else {
4100 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4101 pdev->rxdma_mon_status_ring[mac_id]
4102 .hal_srng,
4103 RXDMA_MONITOR_STATUS);
4104
4105 if (status != QDF_STATUS_SUCCESS) {
4106 dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4107 return status;
4108 }
4109 }
4110
4111 return status;
4112
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004113}
4114#else
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004115static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4116 struct dp_pdev *pdev,
4117 int mac_id,
4118 int mac_for_pdev)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004119{
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004120 return QDF_STATUS_SUCCESS;
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004121}
4122#endif
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004123
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004124/*
Yun Parkfde6b9e2017-06-26 17:13:11 -07004125 * dp_rxdma_ring_config() - configure the RX DMA rings
4126 *
4127 * This function is used to configure the MAC rings.
4128 * On MCL host provides buffers in Host2FW ring
4129 * FW refills (copies) buffers to the ring and updates
4130 * ring_idx in register
4131 *
4132 * @soc: data path SoC handle
4133 *
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004134 * Return: zero on success, non-zero on failure
Yun Parkfde6b9e2017-06-26 17:13:11 -07004135 */
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004136#ifdef QCA_HOST2FW_RXBUF_RING
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004137static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004138{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004139 int i;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004140 QDF_STATUS status = QDF_STATUS_SUCCESS;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004141 for (i = 0; i < MAX_PDEV_CNT; i++) {
4142 struct dp_pdev *pdev = soc->pdev_list[i];
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004143
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004144 if (pdev) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004145 int mac_id;
Dhanashri Atre398935e2017-03-31 15:34:28 -07004146 bool dbs_enable = 0;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004147 int max_mac_rings =
4148 wlan_cfg_get_num_mac_rings
4149 (pdev->wlan_cfg_ctx);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004150
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004151 htt_srng_setup(soc->htt_handle, 0,
4152 pdev->rx_refill_buf_ring.hal_srng,
4153 RXDMA_BUF);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004154
Yun Park601d0d82017-08-28 21:49:31 -07004155 if (pdev->rx_refill_buf_ring2.hal_srng)
4156 htt_srng_setup(soc->htt_handle, 0,
4157 pdev->rx_refill_buf_ring2.hal_srng,
4158 RXDMA_BUF);
Yun Parkfde6b9e2017-06-26 17:13:11 -07004159
Dhanashri Atre2c6381d2017-03-30 19:33:52 -07004160 if (soc->cdp_soc.ol_ops->
4161 is_hw_dbs_2x2_capable) {
Dhanashri Atre398935e2017-03-31 15:34:28 -07004162 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05304163 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Dhanashri Atre398935e2017-03-31 15:34:28 -07004164 }
4165
4166 if (dbs_enable) {
4167 QDF_TRACE(QDF_MODULE_ID_TXRX,
4168 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304169 FL("DBS enabled max_mac_rings %d"),
Dhanashri Atre398935e2017-03-31 15:34:28 -07004170 max_mac_rings);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004171 } else {
Dhanashri Atre398935e2017-03-31 15:34:28 -07004172 max_mac_rings = 1;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004173 QDF_TRACE(QDF_MODULE_ID_TXRX,
4174 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304175 FL("DBS disabled, max_mac_rings %d"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004176 max_mac_rings);
4177 }
4178
4179 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304180 FL("pdev_id %d max_mac_rings %d"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004181 pdev->pdev_id, max_mac_rings);
4182
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004183 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4184 int mac_for_pdev = dp_get_mac_id_for_pdev(
4185 mac_id, pdev->pdev_id);
4186
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004187 QDF_TRACE(QDF_MODULE_ID_TXRX,
4188 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304189 FL("mac_id %d"), mac_for_pdev);
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004190
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004191 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4192 pdev->rx_mac_buf_ring[mac_id]
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004193 .hal_srng,
4194 RXDMA_BUF);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004195 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4196 pdev->rxdma_err_dst_ring[mac_id]
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08004197 .hal_srng,
4198 RXDMA_DST);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004199
4200 /* Configure monitor mode rings */
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004201 status = dp_mon_htt_srng_setup(soc, pdev,
4202 mac_id,
4203 mac_for_pdev);
4204 if (status != QDF_STATUS_SUCCESS) {
4205 dp_err("Failed to send htt monitor messages to target");
4206 return status;
4207 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004208
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004209 }
4210 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004211 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004212
4213 /*
4214 * Timer to reap rxdma status rings.
4215 * Needed until we enable ppdu end interrupts
4216 */
4217 qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4218 dp_service_mon_rings, (void *)soc,
4219 QDF_TIMER_TYPE_WAKE_APPS);
4220 soc->reap_timer_init = 1;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004221 return status;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004222}
4223#else
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004224/* This is only for WIN */
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004225static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004226{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004227 int i;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004228 int mac_id;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004229 QDF_STATUS status = QDF_STATUS_SUCCESS;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004230
4231 for (i = 0; i < MAX_PDEV_CNT; i++) {
4232 struct dp_pdev *pdev = soc->pdev_list[i];
4233
Jeff Johnsona8edf332019-03-18 09:51:52 -07004234 if (!pdev)
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004235 continue;
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08004236
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004237 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4238 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4239
4240 htt_srng_setup(soc->htt_handle, mac_for_pdev,
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004241 pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05304242#ifndef DISABLE_MON_CONFIG
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004243 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4244 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4245 RXDMA_MONITOR_BUF);
4246 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4247 pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4248 RXDMA_MONITOR_DST);
4249 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4250 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08004251 RXDMA_MONITOR_STATUS);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004252 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4253 pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08004254 RXDMA_MONITOR_DESC);
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05304255#endif
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004256 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4257 pdev->rxdma_err_dst_ring[mac_id].hal_srng,
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08004258 RXDMA_DST);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004259 }
4260 }
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004261 return status;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004262}
4263#endif
4264
Kiran Venkatappa07921612019-03-02 23:14:12 +05304265#ifdef NO_RX_PKT_HDR_TLV
4266static QDF_STATUS
4267dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4268{
4269 int i;
4270 int mac_id;
4271 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4272 QDF_STATUS status = QDF_STATUS_SUCCESS;
4273
4274 htt_tlv_filter.mpdu_start = 1;
4275 htt_tlv_filter.msdu_start = 1;
4276 htt_tlv_filter.mpdu_end = 1;
4277 htt_tlv_filter.msdu_end = 1;
4278 htt_tlv_filter.attention = 1;
4279 htt_tlv_filter.packet = 1;
4280 htt_tlv_filter.packet_header = 0;
4281
4282 htt_tlv_filter.ppdu_start = 0;
4283 htt_tlv_filter.ppdu_end = 0;
4284 htt_tlv_filter.ppdu_end_user_stats = 0;
4285 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4286 htt_tlv_filter.ppdu_end_status_done = 0;
4287 htt_tlv_filter.enable_fp = 1;
4288 htt_tlv_filter.enable_md = 0;
4289 htt_tlv_filter.enable_md = 0;
4290 htt_tlv_filter.enable_mo = 0;
4291
4292 htt_tlv_filter.fp_mgmt_filter = 0;
4293 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4294 htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4295 FILTER_DATA_MCAST |
4296 FILTER_DATA_DATA);
4297 htt_tlv_filter.mo_mgmt_filter = 0;
4298 htt_tlv_filter.mo_ctrl_filter = 0;
4299 htt_tlv_filter.mo_data_filter = 0;
4300 htt_tlv_filter.md_data_filter = 0;
4301
4302 htt_tlv_filter.offset_valid = true;
4303
4304 htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4305 /*Not subscribing rx_pkt_header*/
4306 htt_tlv_filter.rx_header_offset = 0;
4307 htt_tlv_filter.rx_mpdu_start_offset =
4308 HAL_RX_PKT_TLV_MPDU_START_OFFSET(soc->hal_soc);
4309 htt_tlv_filter.rx_mpdu_end_offset =
4310 HAL_RX_PKT_TLV_MPDU_END_OFFSET(soc->hal_soc);
4311 htt_tlv_filter.rx_msdu_start_offset =
4312 HAL_RX_PKT_TLV_MSDU_START_OFFSET(soc->hal_soc);
4313 htt_tlv_filter.rx_msdu_end_offset =
4314 HAL_RX_PKT_TLV_MSDU_END_OFFSET(soc->hal_soc);
4315 htt_tlv_filter.rx_attn_offset =
4316 HAL_RX_PKT_TLV_ATTN_OFFSET(soc->hal_soc);
4317
4318 for (i = 0; i < MAX_PDEV_CNT; i++) {
4319 struct dp_pdev *pdev = soc->pdev_list[i];
4320
4321 if (!pdev)
4322 continue;
4323
4324 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4325 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4326 pdev->pdev_id);
4327
4328 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4329 pdev->rx_refill_buf_ring.hal_srng,
4330 RXDMA_BUF, RX_BUFFER_SIZE,
4331 &htt_tlv_filter);
4332 }
4333 }
4334 return status;
4335}
4336#else
4337static QDF_STATUS
4338dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4339{
4340 return QDF_STATUS_SUCCESS;
4341}
4342#endif
4343
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004344/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004345 * dp_soc_attach_target_wifi3() - SOC initialization in the target
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004346 * @cdp_soc: Opaque Datapath SOC handle
4347 *
4348 * Return: zero on success, non-zero on failure
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004349 */
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004350static QDF_STATUS
4351dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004352{
Leo Chang5ea93a42016-11-03 12:39:49 -07004353 struct dp_soc *soc = (struct dp_soc *)cdp_soc;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004354 QDF_STATUS status = QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004355
4356 htt_soc_attach_target(soc->htt_handle);
4357
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004358 status = dp_rxdma_ring_config(soc);
4359 if (status != QDF_STATUS_SUCCESS) {
4360 dp_err("Failed to send htt srng setup messages to target");
4361 return status;
4362 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004363
Kiran Venkatappa07921612019-03-02 23:14:12 +05304364 status = dp_rxdma_ring_sel_cfg(soc);
4365 if (status != QDF_STATUS_SUCCESS) {
4366 dp_err("Failed to send htt ring config message to target");
4367 return status;
4368 }
4369
Ishank Jainbc2d91f2017-01-03 18:14:54 +05304370 DP_STATS_INIT(soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05304371
4372 /* initialize work queue for stats processing */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05304373 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05304374
Karunakar Dasineni80756372019-05-02 23:49:31 -07004375 qdf_minidump_log((void *)soc, sizeof(*soc), "dp_soc");
4376
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004377 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004378}
4379
4380/*
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05304381 * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4382 * @txrx_soc: Datapath SOC handle
4383 */
4384static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4385{
4386 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4387 return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4388}
Krunal Soni03ba0f52019-02-12 11:44:46 -08004389
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05304390/*
4391 * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4392 * @txrx_soc: Datapath SOC handle
4393 * @nss_cfg: nss config
4394 */
4395static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4396{
4397 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05304398 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4399
4400 wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4401
4402 /*
4403 * TODO: masked out based on the per offloaded radio
4404 */
Aniruddha Paulc34164e2018-09-14 14:25:30 +05304405 switch (config) {
4406 case dp_nss_cfg_default:
4407 break;
4408 case dp_nss_cfg_dbdc:
4409 case dp_nss_cfg_dbtc:
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05304410 wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4411 wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4412 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4413 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
Aniruddha Paulc34164e2018-09-14 14:25:30 +05304414 break;
4415 default:
4416 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4417 "Invalid offload config %d", config);
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05304418 }
4419
Aditya Sathishded018e2018-07-02 16:25:21 +05304420 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4421 FL("nss-wifi<0> nss config is enabled"));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05304422}
Debasis Dasc39a68d2019-01-28 17:02:06 +05304423
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05304424/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004425* dp_vdev_attach_wifi3() - attach txrx vdev
4426* @txrx_pdev: Datapath PDEV handle
4427* @vdev_mac_addr: MAC address of the virtual interface
4428* @vdev_id: VDEV Id
4429* @wlan_op_mode: VDEV operating mode
4430*
4431* Return: DP VDEV handle on success, NULL on failure
4432*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004433static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004434 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
4435{
4436 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4437 struct dp_soc *soc = pdev->soc;
4438 struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4439
4440 if (!vdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304441 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4442 FL("DP VDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004443 goto fail0;
4444 }
4445
4446 vdev->pdev = pdev;
4447 vdev->vdev_id = vdev_id;
4448 vdev->opmode = op_mode;
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05304449 vdev->osdev = soc->osdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004450
4451 vdev->osif_rx = NULL;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05304452 vdev->osif_rsim_rx_decap = NULL;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05304453 vdev->osif_get_key = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004454 vdev->osif_rx_mon = NULL;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05304455 vdev->osif_tx_free_ext = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004456 vdev->osif_vdev = NULL;
4457
4458 vdev->delete.pending = 0;
4459 vdev->safemode = 0;
4460 vdev->drop_unenc = 1;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05304461 vdev->sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004462#ifdef notyet
4463 vdev->filters_num = 0;
4464#endif
4465
4466 qdf_mem_copy(
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004467 &vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004468
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004469 /* TODO: Initialize default HTT meta data that will be used in
4470 * TCL descriptors for packets transmitted from this VDEV
4471 */
4472
4473 TAILQ_INIT(&vdev->peer_list);
4474
chenguod22ed622018-12-03 16:54:56 +08004475 if ((soc->intr_mode == DP_INTR_POLL) &&
4476 wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4477 if ((pdev->vdev_count == 0) ||
4478 (wlan_op_mode_monitor == vdev->opmode))
4479 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4480 }
4481
chenguo2a733792018-11-01 16:10:38 +08004482 if (wlan_op_mode_monitor == vdev->opmode) {
4483 pdev->monitor_vdev = vdev;
Anish Nataraj83d08112018-10-17 20:20:55 +05304484 return (struct cdp_vdev *)vdev;
chenguo2a733792018-11-01 16:10:38 +08004485 }
Anish Nataraj83d08112018-10-17 20:20:55 +05304486
4487 vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4488 vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4489 vdev->dscp_tid_map_id = 0;
4490 vdev->mcast_enhancement_en = 0;
4491 vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
Varsha Mishraa331e6e2019-03-11 12:16:14 +05304492 vdev->prev_tx_enq_tstamp = 0;
4493 vdev->prev_rx_deliver_tstamp = 0;
Anish Nataraj83d08112018-10-17 20:20:55 +05304494
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304495 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004496 /* add this vdev into the pdev's list */
4497 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304498 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004499 pdev->vdev_count++;
4500
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05304501 dp_tx_vdev_attach(vdev);
4502
Tallapragada Kalyan16395272018-08-28 12:34:21 +05304503 if (pdev->vdev_count == 1)
4504 dp_lro_hash_setup(soc, pdev);
Dhanashri Atreb178eb42017-03-21 12:32:33 -07004505
Mohit Khanna02553142019-04-11 17:49:27 -07004506 dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304507 DP_STATS_INIT(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004508
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05304509 if (wlan_op_mode_sta == vdev->opmode)
4510 dp_peer_create_wifi3((struct cdp_vdev *)vdev,
Akshay Kosigi78eced82018-05-14 14:53:48 +05304511 vdev->mac_addr.raw,
4512 NULL);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05304513
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004514 return (struct cdp_vdev *)vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004515
4516fail0:
4517 return NULL;
4518}
4519
4520/**
4521 * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4522 * @vdev: Datapath VDEV handle
4523 * @osif_vdev: OSIF vdev handle
Akshay Kosigidbbaef42018-05-03 23:39:27 +05304524 * @ctrl_vdev: UMAC vdev handle
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004525 * @txrx_ops: Tx and Rx operations
4526 *
4527 * Return: DP VDEV handle on success, NULL on failure
4528 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004529static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigidbbaef42018-05-03 23:39:27 +05304530 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004531 struct ol_txrx_ops *txrx_ops)
4532{
4533 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4534 vdev->osif_vdev = osif_vdev;
Akshay Kosigidbbaef42018-05-03 23:39:27 +05304535 vdev->ctrl_vdev = ctrl_vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004536 vdev->osif_rx = txrx_ops->rx.rx;
Mohit Khanna7ac554b2018-05-24 11:58:13 -07004537 vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05304538 vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05304539 vdev->osif_get_key = txrx_ops->get_key;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004540 vdev->osif_rx_mon = txrx_ops->rx.mon;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05304541 vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
Sravan Kumar Kairamd55a74c2019-04-03 16:00:57 +05304542 vdev->tx_comp = txrx_ops->tx.tx_comp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004543#ifdef notyet
4544#if ATH_SUPPORT_WAPI
4545 vdev->osif_check_wai = txrx_ops->rx.wai_check;
4546#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004547#endif
Ishank Jain997955e2017-03-24 18:18:50 +05304548#ifdef UMAC_SUPPORT_PROXY_ARP
4549 vdev->osif_proxy_arp = txrx_ops->proxy_arp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004550#endif
Ishank Jainc838b132017-02-17 11:08:18 +05304551 vdev->me_convert = txrx_ops->me_convert;
4552
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004553 /* TODO: Enable the following once Tx code is integrated */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05304554 if (vdev->mesh_vdev)
4555 txrx_ops->tx.tx = dp_tx_send_mesh;
4556 else
4557 txrx_ops->tx.tx = dp_tx_send;
Leo Chang5ea93a42016-11-03 12:39:49 -07004558
Prathyusha Guduribe41d972018-01-19 14:17:14 +05304559 txrx_ops->tx.tx_exception = dp_tx_send_exception;
4560
Houston Hoffman41b912c2017-08-30 14:27:51 -07004561 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304562 "DP Vdev Register success");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004563}
4564
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304565/**
4566 * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
4567 * @vdev: Datapath VDEV handle
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304568 * @unmap_only: Flag to indicate "only unmap"
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304569 *
4570 * Return: void
4571 */
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304572static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304573{
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304574 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304575 struct dp_pdev *pdev = vdev->pdev;
4576 struct dp_soc *soc = pdev->soc;
4577 struct dp_peer *peer;
4578 uint16_t *peer_ids;
Chaithanya Garrepallia5ad5822019-03-20 16:56:43 +05304579 struct dp_ast_entry *ase, *tmp_ase;
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304580 uint8_t i = 0, j = 0;
4581
4582 peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
4583 if (!peer_ids) {
4584 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4585 "DP alloc failure - unable to flush peers");
4586 return;
4587 }
4588
4589 qdf_spin_lock_bh(&soc->peer_ref_mutex);
4590 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4591 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4592 if (peer->peer_ids[i] != HTT_INVALID_PEER)
4593 if (j < soc->max_peers)
4594 peer_ids[j++] = peer->peer_ids[i];
4595 }
4596 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4597
Krunal Sonice2009b2018-12-06 16:38:34 -08004598 for (i = 0; i < j ; i++) {
Vinay Adella5dc55512019-02-07 18:02:15 +05304599 if (unmap_only) {
4600 peer = __dp_peer_find_by_id(soc, peer_ids[i]);
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304601
Vinay Adella5dc55512019-02-07 18:02:15 +05304602 if (peer) {
Chaithanya Garrepallia5ad5822019-03-20 16:56:43 +05304603 if (soc->is_peer_map_unmap_v2) {
4604 /* free AST entries of peer before
4605 * release peer reference
4606 */
4607 DP_PEER_ITERATE_ASE_LIST(peer, ase,
4608 tmp_ase) {
4609 dp_rx_peer_unmap_handler
4610 (soc, peer_ids[i],
4611 vdev->vdev_id,
4612 ase->mac_addr.raw,
4613 1);
4614 }
4615 }
Vinay Adella5dc55512019-02-07 18:02:15 +05304616 dp_rx_peer_unmap_handler(soc, peer_ids[i],
4617 vdev->vdev_id,
4618 peer->mac_addr.raw,
4619 0);
4620 }
4621 } else {
4622 peer = dp_peer_find_by_id(soc, peer_ids[i]);
4623
4624 if (peer) {
4625 dp_info("peer: %pM is getting flush",
4626 peer->mac_addr.raw);
4627
Chaithanya Garrepallia5ad5822019-03-20 16:56:43 +05304628 if (soc->is_peer_map_unmap_v2) {
4629 /* free AST entries of peer before
4630 * release peer reference
4631 */
4632 DP_PEER_ITERATE_ASE_LIST(peer, ase,
4633 tmp_ase) {
4634 dp_rx_peer_unmap_handler
4635 (soc, peer_ids[i],
4636 vdev->vdev_id,
4637 ase->mac_addr.raw,
4638 1);
4639 }
4640 }
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304641 dp_peer_delete_wifi3(peer, 0);
Vinay Adella5dc55512019-02-07 18:02:15 +05304642 /*
4643 * we need to call dp_peer_unref_del_find_by_id
4644 * to remove additional ref count incremented
4645 * by dp_peer_find_by_id() call.
4646 *
4647 * Hold the ref count while executing
4648 * dp_peer_delete_wifi3() call.
4649 *
4650 */
4651 dp_peer_unref_del_find_by_id(peer);
4652 dp_rx_peer_unmap_handler(soc, peer_ids[i],
4653 vdev->vdev_id,
4654 peer->mac_addr.raw, 0);
4655 }
Krunal Sonice2009b2018-12-06 16:38:34 -08004656 }
Krunal Sonice2009b2018-12-06 16:38:34 -08004657 }
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304658
4659 qdf_mem_free(peer_ids);
4660
4661 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4662 FL("Flushed peers for vdev object %pK "), vdev);
4663}
4664
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004665/*
4666 * dp_vdev_detach_wifi3() - Detach txrx vdev
4667 * @txrx_vdev: Datapath VDEV handle
4668 * @callback: Callback OL_IF on completion of detach
4669 * @cb_context: Callback context
4670 *
4671 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004672static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004673 ol_txrx_vdev_delete_cb callback, void *cb_context)
4674{
4675 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Mohit Khanna02553142019-04-11 17:49:27 -07004676 struct dp_pdev *pdev;
4677 struct dp_soc *soc;
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304678 struct dp_neighbour_peer *peer = NULL;
sumedh baikadyda159202018-11-01 17:31:23 -07004679 struct dp_neighbour_peer *temp_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004680
4681 /* preconditions */
Mohit Khanna02553142019-04-11 17:49:27 -07004682 qdf_assert_always(vdev);
4683 pdev = vdev->pdev;
4684 soc = pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004685
Anish Nataraj83d08112018-10-17 20:20:55 +05304686 if (wlan_op_mode_monitor == vdev->opmode)
4687 goto free_vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004688
Tallapragada Kalyan9d9cbb62018-02-26 17:39:12 +05304689 if (wlan_op_mode_sta == vdev->opmode)
Chaitanya Kiran Godavarthi70aeda12019-02-01 17:32:48 +05304690 dp_peer_delete_wifi3(vdev->vap_self_peer, 0);
Tallapragada Kalyan9d9cbb62018-02-26 17:39:12 +05304691
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004692 /*
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304693 * If Target is hung, flush all peers before detaching vdev
4694 * this will free all references held due to missing
4695 * unmap commands from Target
4696 */
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304697 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
4698 dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304699
4700 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004701 * Use peer_ref_mutex while accessing peer_list, in case
4702 * a peer is in the process of being removed from the list.
4703 */
4704 qdf_spin_lock_bh(&soc->peer_ref_mutex);
4705 /* check that the vdev has no peers allocated */
4706 if (!TAILQ_EMPTY(&vdev->peer_list)) {
4707 /* debug print - will be removed later */
Mohit Khanna02553142019-04-11 17:49:27 -07004708 dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304709 vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004710 /* indicate that the vdev needs to be deleted */
4711 vdev->delete.pending = 1;
4712 vdev->delete.callback = callback;
4713 vdev->delete.context = cb_context;
4714 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4715 return;
4716 }
4717 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4718
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304719 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
sumedh baikadyda159202018-11-01 17:31:23 -07004720 if (!soc->hw_nac_monitor_support) {
4721 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4722 neighbour_peer_list_elem) {
4723 QDF_ASSERT(peer->vdev != vdev);
4724 }
4725 } else {
4726 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4727 neighbour_peer_list_elem, temp_peer) {
4728 if (peer->vdev == vdev) {
4729 TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
4730 neighbour_peer_list_elem);
4731 qdf_mem_free(peer);
4732 }
4733 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304734 }
4735 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4736
Krunal Soni7c4565f2018-09-04 19:02:53 -07004737 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05304738 dp_tx_vdev_detach(vdev);
Krunal Soni7c4565f2018-09-04 19:02:53 -07004739 /* remove the vdev from its parent pdev's list */
4740 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304741 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004742 FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
Anish Nataraj83d08112018-10-17 20:20:55 +05304743
Krunal Soni7c4565f2018-09-04 19:02:53 -07004744 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Anish Nataraj83d08112018-10-17 20:20:55 +05304745free_vdev:
4746 qdf_mem_free(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004747
4748 if (callback)
4749 callback(cb_context);
4750}
4751
Amir Patelcb990262019-05-28 15:12:48 +05304752#ifdef FEATURE_AST
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004753/*
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304754 * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
4755 * @soc - datapath soc handle
4756 * @peer - datapath peer handle
4757 *
4758 * Delete the AST entries belonging to a peer
4759 */
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304760static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4761 struct dp_peer *peer)
4762{
4763 struct dp_ast_entry *ast_entry, *temp_ast_entry;
4764
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304765 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
4766 dp_peer_del_ast(soc, ast_entry);
4767
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05304768 peer->self_ast_entry = NULL;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304769}
4770#else
4771static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4772 struct dp_peer *peer)
4773{
4774}
4775#endif
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304776#if ATH_SUPPORT_WRAP
4777static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4778 uint8_t *peer_mac_addr)
4779{
4780 struct dp_peer *peer;
4781
4782 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4783 0, vdev->vdev_id);
4784 if (!peer)
4785 return NULL;
4786
4787 if (peer->bss_peer)
4788 return peer;
4789
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05304790 dp_peer_unref_delete(peer);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304791 return NULL;
4792}
4793#else
4794static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4795 uint8_t *peer_mac_addr)
4796{
4797 struct dp_peer *peer;
4798
4799 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4800 0, vdev->vdev_id);
4801 if (!peer)
4802 return NULL;
4803
4804 if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
4805 return peer;
4806
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05304807 dp_peer_unref_delete(peer);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304808 return NULL;
4809}
4810#endif
4811
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304812#ifdef FEATURE_AST
phadiman64a7b912018-10-10 16:19:00 +05304813static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
Chaithanya Garrepalli4c952f12019-02-19 22:37:08 +05304814 struct dp_pdev *pdev,
phadiman64a7b912018-10-10 16:19:00 +05304815 uint8_t *peer_mac_addr)
4816{
4817 struct dp_ast_entry *ast_entry;
4818
4819 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli4c952f12019-02-19 22:37:08 +05304820 if (soc->ast_override_support)
4821 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
4822 pdev->pdev_id);
4823 else
4824 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +05304825
Amir Patelcb990262019-05-28 15:12:48 +05304826 if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304827 dp_peer_del_ast(soc, ast_entry);
4828
4829 qdf_spin_unlock_bh(&soc->ast_lock);
phadiman64a7b912018-10-10 16:19:00 +05304830}
4831#endif
4832
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05304833#ifdef PEER_CACHE_RX_PKTS
4834static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
4835{
4836 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
4837 peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
4838 qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
4839}
4840#else
4841static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
4842{
4843}
4844#endif
4845
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304846/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004847 * dp_peer_create_wifi3() - attach txrx peer
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004848 * @txrx_vdev: Datapath VDEV handle
4849 * @peer_mac_addr: Peer MAC address
4850 *
4851 * Return: DP peeer handle on success, NULL on failure
4852 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004853static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +05304854 uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004855{
4856 struct dp_peer *peer;
4857 int i;
4858 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4859 struct dp_pdev *pdev;
4860 struct dp_soc *soc;
Amir Patel468bded2019-03-21 11:42:31 +05304861 struct cdp_peer_cookie peer_cookie;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304862 enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004863
4864 /* preconditions */
4865 qdf_assert(vdev);
4866 qdf_assert(peer_mac_addr);
4867
4868 pdev = vdev->pdev;
4869 soc = pdev->soc;
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304870
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304871 /*
4872 * If a peer entry with given MAC address already exists,
4873 * reuse the peer and reset the state of peer.
4874 */
4875 peer = dp_peer_can_reuse(vdev, peer_mac_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304876
4877 if (peer) {
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05304878 qdf_atomic_init(&peer->is_default_route_set);
4879 dp_peer_cleanup(vdev, peer);
4880
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +05304881 qdf_spin_lock_bh(&soc->ast_lock);
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304882 dp_peer_delete_ast_entries(soc, peer);
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +05304883 peer->delete_in_progress = false;
4884 qdf_spin_unlock_bh(&soc->ast_lock);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05304885
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304886 if ((vdev->opmode == wlan_op_mode_sta) &&
4887 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004888 QDF_MAC_ADDR_SIZE)) {
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304889 ast_type = CDP_TXRX_AST_TYPE_SELF;
4890 }
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304891 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05304892 /*
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304893 * Control path maintains a node count which is incremented
4894 * for every new peer create command. Since new peer is not being
4895 * created and earlier reference is reused here,
4896 * peer_unref_delete event is sent to control path to
4897 * increment the count back.
4898 */
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05304899 if (soc->cdp_soc.ol_ops->peer_unref_delete) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05304900 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
Vinay Adella94201152018-12-03 19:02:58 +05304901 peer->mac_addr.raw, vdev->mac_addr.raw,
Pavankumar Nandeshwar2702aee2018-12-20 18:57:12 +05304902 vdev->opmode, peer->ctrl_peer, ctrl_peer);
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05304903 }
Akshay Kosigi78eced82018-05-14 14:53:48 +05304904 peer->ctrl_peer = ctrl_peer;
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05304905
Sravan Kumar Kairamda542172018-06-08 12:51:21 +05304906 dp_local_peer_id_alloc(pdev, peer);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05304907 DP_STATS_INIT(peer);
Surya Prakash07c81e72019-04-29 10:08:01 +05304908 DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304909
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304910 return (void *)peer;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304911 } else {
4912 /*
4913 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
4914 * need to remove the AST entry which was earlier added as a WDS
4915 * entry.
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304916 * If an AST entry exists, but no peer entry exists with a given
4917 * MAC addresses, we could deduce it as a WDS entry
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304918 */
Chaithanya Garrepalli4c952f12019-02-19 22:37:08 +05304919 dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304920 }
4921
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004922#ifdef notyet
4923 peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
4924 soc->mempool_ol_ath_peer);
4925#else
4926 peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
4927#endif
4928
4929 if (!peer)
4930 return NULL; /* failure */
4931
Tallapragada57d86602017-03-31 07:53:58 +05304932 qdf_mem_zero(peer, sizeof(struct dp_peer));
4933
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05304934 TAILQ_INIT(&peer->ast_entry_list);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05304935
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05304936 /* store provided params */
4937 peer->vdev = vdev;
Akshay Kosigi78eced82018-05-14 14:53:48 +05304938 peer->ctrl_peer = ctrl_peer;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05304939
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304940 if ((vdev->opmode == wlan_op_mode_sta) &&
4941 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004942 QDF_MAC_ADDR_SIZE)) {
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304943 ast_type = CDP_TXRX_AST_TYPE_SELF;
4944 }
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304945 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
Leo Chang5ea93a42016-11-03 12:39:49 -07004946 qdf_spinlock_create(&peer->peer_info_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004947
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05304948 dp_peer_rx_bufq_resources_init(peer);
4949
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004950 qdf_mem_copy(
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004951 &peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004952
4953 /* TODO: See of rx_opt_proc is really required */
4954 peer->rx_opt_proc = soc->rx_opt_proc;
4955
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004956 /* initialize the peer_id */
4957 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4958 peer->peer_ids[i] = HTT_INVALID_PEER;
4959
4960 qdf_spin_lock_bh(&soc->peer_ref_mutex);
4961
4962 qdf_atomic_init(&peer->ref_cnt);
4963
4964 /* keep one reference for attach */
4965 qdf_atomic_inc(&peer->ref_cnt);
4966
4967 /* add this peer into the vdev's list */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05304968 if (wlan_op_mode_sta == vdev->opmode)
4969 TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
4970 else
4971 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
4972
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004973 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4974
4975 /* TODO: See if hash based search is required */
4976 dp_peer_find_hash_add(soc, peer);
4977
Varun Reddy Yeturub9ec57e2017-11-28 11:42:09 -08004978 /* Initialize the peer state */
4979 peer->state = OL_TXRX_PEER_STATE_DISC;
4980
Mohit Khanna02553142019-04-11 17:49:27 -07004981 dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08004982 vdev, peer, peer->mac_addr.raw,
4983 qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004984 /*
4985 * For every peer MAp message search and set if bss_peer
4986 */
Chaitanya Kiran Godavarthi70aeda12019-02-01 17:32:48 +05304987 if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
4988 QDF_MAC_ADDR_SIZE) == 0 &&
4989 (wlan_op_mode_sta != vdev->opmode)) {
Mohit Khanna02553142019-04-11 17:49:27 -07004990 dp_info("vdev bss_peer!!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004991 peer->bss_peer = 1;
4992 vdev->vap_bss_peer = peer;
4993 }
Chaitanya Kiran Godavarthi70aeda12019-02-01 17:32:48 +05304994
4995 if (wlan_op_mode_sta == vdev->opmode &&
4996 qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
4997 QDF_MAC_ADDR_SIZE) == 0) {
4998 vdev->vap_self_peer = peer;
4999 }
5000
Sumedh Baikady1c61e062018-02-12 22:25:47 -08005001 for (i = 0; i < DP_MAX_TIDS; i++)
5002 qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05305003
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05305004 peer->valid = 1;
Leo Chang5ea93a42016-11-03 12:39:49 -07005005 dp_local_peer_id_alloc(pdev, peer);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305006 DP_STATS_INIT(peer);
Surya Prakash07c81e72019-04-29 10:08:01 +05305007 DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
Amir Patel468bded2019-03-21 11:42:31 +05305008
5009 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005010 QDF_MAC_ADDR_SIZE);
Amir Patel468bded2019-03-21 11:42:31 +05305011 peer_cookie.ctx = NULL;
5012 peer_cookie.cookie = pdev->next_peer_cookie++;
5013#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5014 dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5015 (void *)&peer_cookie,
5016 peer->peer_ids[0], WDI_NO_VAL, pdev->pdev_id);
5017#endif
5018 if (soc->wlanstats_enabled) {
5019 if (!peer_cookie.ctx) {
5020 pdev->next_peer_cookie--;
5021 qdf_err("Failed to initialize peer rate stats");
5022 } else {
5023 peer->wlanstats_ctx = (void *)peer_cookie.ctx;
5024 }
5025 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005026 return (void *)peer;
5027}
5028
5029/*
Mohit Khanna81179cb2018-08-16 20:50:43 -07005030 * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5031 * @vdev: Datapath VDEV handle
5032 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5033 * @hash_based: pointer to hash value (enabled/disabled) to be populated
5034 *
5035 * Return: None
5036 */
5037static
5038void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5039 enum cdp_host_reo_dest_ring *reo_dest,
5040 bool *hash_based)
5041{
5042 struct dp_soc *soc;
5043 struct dp_pdev *pdev;
5044
5045 pdev = vdev->pdev;
5046 soc = pdev->soc;
5047 /*
5048 * hash based steering is disabled for Radios which are offloaded
5049 * to NSS
5050 */
5051 if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5052 *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5053
5054 /*
5055 * Below line of code will ensure the proper reo_dest ring is chosen
5056 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5057 */
5058 *reo_dest = pdev->reo_dest;
5059}
5060
5061#ifdef IPA_OFFLOAD
5062/*
5063 * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5064 * @vdev: Datapath VDEV handle
5065 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5066 * @hash_based: pointer to hash value (enabled/disabled) to be populated
5067 *
5068 * If IPA is enabled in ini, for SAP mode, disable hash based
5069 * steering, use default reo_dst ring for RX. Use config values for other modes.
5070 * Return: None
5071 */
5072static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5073 enum cdp_host_reo_dest_ring *reo_dest,
5074 bool *hash_based)
5075{
5076 struct dp_soc *soc;
5077 struct dp_pdev *pdev;
5078
5079 pdev = vdev->pdev;
5080 soc = pdev->soc;
5081
5082 dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5083
5084 /*
5085 * If IPA is enabled, disable hash-based flow steering and set
5086 * reo_dest_ring_4 as the REO ring to receive packets on.
5087 * IPA is configured to reap reo_dest_ring_4.
5088 *
5089 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5090 * value enum value is from 1 - 4.
5091 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5092 */
5093 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5094 if (vdev->opmode == wlan_op_mode_ap) {
5095 *reo_dest = IPA_REO_DEST_RING_IDX + 1;
5096 *hash_based = 0;
5097 }
5098 }
5099}
5100
5101#else
5102
5103/*
5104 * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5105 * @vdev: Datapath VDEV handle
5106 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5107 * @hash_based: pointer to hash value (enabled/disabled) to be populated
5108 *
5109 * Use system config values for hash based steering.
5110 * Return: None
5111 */
5112
5113static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5114 enum cdp_host_reo_dest_ring *reo_dest,
5115 bool *hash_based)
5116{
5117 dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5118}
5119#endif /* IPA_OFFLOAD */
5120
5121/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005122 * dp_peer_setup_wifi3() - initialize the peer
5123 * @vdev_hdl: virtual device object
5124 * @peer: Peer object
5125 *
5126 * Return: void
5127 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005128static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005129{
5130 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
5131 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5132 struct dp_pdev *pdev;
5133 struct dp_soc *soc;
Dhanashri Atre14049172016-11-11 18:32:36 -08005134 bool hash_based = 0;
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05305135 enum cdp_host_reo_dest_ring reo_dest;
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005136
5137 /* preconditions */
5138 qdf_assert(vdev);
5139 qdf_assert(peer);
5140
5141 pdev = vdev->pdev;
5142 soc = pdev->soc;
5143
Mohit Khanna81179cb2018-08-16 20:50:43 -07005144 dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05305145
Mohit Khanna81179cb2018-08-16 20:50:43 -07005146 dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5147 pdev->pdev_id, vdev->vdev_id,
5148 vdev->opmode, hash_based, reo_dest);
Dhanashri Atre14049172016-11-11 18:32:36 -08005149
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05305150
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05305151 /*
5152 * There are corner cases where the AD1 = AD2 = "VAPs address"
5153 * i.e both the devices have same MAC address. In these
5154 * cases we want such pkts to be processed in NULL Q handler
5155 * which is REO2TCL ring. for this reason we should
5156 * not setup reo_queues and default route for bss_peer.
5157 */
5158 if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
5159 return;
5160
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005161 if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5162 /* TODO: Check the destination ring number to be passed to FW */
Dhanashri Atre14049172016-11-11 18:32:36 -08005163 soc->cdp_soc.ol_ops->peer_set_default_routing(
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +05305164 pdev->ctrl_pdev, peer->mac_addr.raw,
5165 peer->vdev->vdev_id, hash_based, reo_dest);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005166 }
Ruchi, Agrawal8e2796b2018-02-07 19:07:43 +05305167
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05305168 qdf_atomic_set(&peer->is_default_route_set, 1);
5169
Ruchi, Agrawal8e2796b2018-02-07 19:07:43 +05305170 dp_peer_rx_init(pdev, peer);
nobeljdebe2b32019-04-23 11:18:47 -07005171 dp_peer_tx_init(pdev, peer);
5172
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005173 return;
5174}
5175
5176/*
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05305177 * dp_cp_peer_del_resp_handler - Handle the peer delete response
5178 * @soc_hdl: Datapath SOC handle
5179 * @vdev_hdl: virtual device object
5180 * @mac_addr: Mac address of the peer
5181 *
5182 * Return: void
5183 */
5184static void dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5185 struct cdp_vdev *vdev_hdl,
5186 uint8_t *mac_addr)
5187{
5188 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5189 struct dp_ast_entry *ast_entry = NULL;
5190 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5191 txrx_ast_free_cb cb = NULL;
5192 void *cookie;
5193
5194 qdf_spin_lock_bh(&soc->ast_lock);
5195
5196 if (soc->ast_override_support)
5197 ast_entry =
5198 dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
5199 vdev->pdev->pdev_id);
5200 else
5201 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
5202
5203 /* in case of qwrap we have multiple BSS peers
5204 * with same mac address
5205 *
5206 * AST entry for this mac address will be created
5207 * only for one peer hence it will be NULL here
5208 */
5209 if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
5210 qdf_spin_unlock_bh(&soc->ast_lock);
5211 return;
5212 }
5213
5214 if (ast_entry->is_mapped)
5215 soc->ast_table[ast_entry->ast_idx] = NULL;
5216
5217 DP_STATS_INC(soc, ast.deleted, 1);
5218 dp_peer_ast_hash_remove(soc, ast_entry);
5219
5220 cb = ast_entry->callback;
5221 cookie = ast_entry->cookie;
5222 ast_entry->callback = NULL;
5223 ast_entry->cookie = NULL;
5224
5225 soc->num_ast_entries--;
5226 qdf_spin_unlock_bh(&soc->ast_lock);
5227
5228 if (cb) {
5229 cb(soc->ctrl_psoc,
5230 soc,
5231 cookie,
5232 CDP_TXRX_AST_DELETED);
5233 }
5234 qdf_mem_free(ast_entry);
5235}
5236
5237/*
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05305238 * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
5239 * @vdev_handle: virtual device object
5240 * @htt_pkt_type: type of pkt
5241 *
5242 * Return: void
5243 */
5244static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
5245 enum htt_cmn_pkt_type val)
5246{
5247 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5248 vdev->tx_encap_type = val;
5249}
5250
5251/*
5252 * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
5253 * @vdev_handle: virtual device object
5254 * @htt_pkt_type: type of pkt
5255 *
5256 * Return: void
5257 */
5258static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
5259 enum htt_cmn_pkt_type val)
5260{
5261 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5262 vdev->rx_decap_type = val;
5263}
5264
5265/*
sumedh baikady1f8f3192018-02-20 17:30:32 -08005266 * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5267 * @txrx_soc: cdp soc handle
5268 * @ac: Access category
5269 * @value: timeout value in millisec
5270 *
5271 * Return: void
5272 */
5273static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5274 uint8_t ac, uint32_t value)
5275{
5276 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5277
5278 hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
5279}
5280
5281/*
5282 * dp_get_ba_aging_timeout() - get ba aging timeout per AC
5283 * @txrx_soc: cdp soc handle
5284 * @ac: access category
5285 * @value: timeout value in millisec
5286 *
5287 * Return: void
5288 */
5289static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5290 uint8_t ac, uint32_t *value)
5291{
5292 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5293
5294 hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
5295}
5296
5297/*
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05305298 * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
5299 * @pdev_handle: physical device object
5300 * @val: reo destination ring index (1 - 4)
5301 *
5302 * Return: void
5303 */
5304static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
5305 enum cdp_host_reo_dest_ring val)
5306{
5307 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5308
5309 if (pdev)
5310 pdev->reo_dest = val;
5311}
5312
5313/*
5314 * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5315 * @pdev_handle: physical device object
5316 *
5317 * Return: reo destination ring index
5318 */
5319static enum cdp_host_reo_dest_ring
5320dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
5321{
5322 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5323
5324 if (pdev)
5325 return pdev->reo_dest;
5326 else
5327 return cdp_host_reo_dest_ring_unknown;
5328}
5329
5330/*
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305331 * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
5332 * @pdev_handle: device object
5333 * @val: value to be set
5334 *
5335 * Return: void
5336 */
5337static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5338 uint32_t val)
5339{
5340 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5341
5342 /* Enable/Disable smart mesh filtering. This flag will be checked
5343 * during rx processing to check if packets are from NAC clients.
5344 */
5345 pdev->filter_neighbour_peers = val;
5346 return 0;
5347}
5348
5349/*
5350 * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5351 * address for smart mesh filtering
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305352 * @vdev_handle: virtual device object
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305353 * @cmd: Add/Del command
5354 * @macaddr: nac client mac address
5355 *
5356 * Return: void
5357 */
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305358static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5359 uint32_t cmd, uint8_t *macaddr)
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305360{
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305361 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5362 struct dp_pdev *pdev = vdev->pdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305363 struct dp_neighbour_peer *peer = NULL;
5364
5365 if (!macaddr)
5366 goto fail0;
5367
5368 /* Store address of NAC (neighbour peer) which will be checked
5369 * against TA of received packets.
5370 */
5371 if (cmd == DP_NAC_PARAM_ADD) {
5372 peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5373 sizeof(*peer));
5374
5375 if (!peer) {
5376 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5377 FL("DP neighbour peer node memory allocation failed"));
5378 goto fail0;
5379 }
5380
5381 qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005382 macaddr, QDF_MAC_ADDR_SIZE);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305383 peer->vdev = vdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305384
5385 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305386
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305387 /* add this neighbour peer into the list */
5388 TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5389 neighbour_peer_list_elem);
5390 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5391
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305392 /* first neighbour */
5393 if (!pdev->neighbour_peers_added) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305394 pdev->neighbour_peers_added = true;
sumedh baikady59a2d332018-05-22 01:50:38 -07005395 dp_ppdu_ring_cfg(pdev);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305396 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305397 return 1;
5398
5399 } else if (cmd == DP_NAC_PARAM_DEL) {
5400 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5401 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5402 neighbour_peer_list_elem) {
5403 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005404 macaddr, QDF_MAC_ADDR_SIZE)) {
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305405 /* delete this peer from the list */
5406 TAILQ_REMOVE(&pdev->neighbour_peers_list,
5407 peer, neighbour_peer_list_elem);
5408 qdf_mem_free(peer);
5409 break;
5410 }
5411 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305412 /* last neighbour deleted */
sumedh baikadyda159202018-11-01 17:31:23 -07005413 if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305414 pdev->neighbour_peers_added = false;
sumedh baikadyda159202018-11-01 17:31:23 -07005415 dp_ppdu_ring_cfg(pdev);
5416 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305417
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305418 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5419
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305420 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5421 !pdev->enhanced_stats_en)
5422 dp_ppdu_ring_reset(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305423 return 1;
5424
5425 }
5426
5427fail0:
5428 return 0;
5429}
5430
5431/*
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05305432 * dp_get_sec_type() - Get the security type
5433 * @peer: Datapath peer handle
5434 * @sec_idx: Security id (mcast, ucast)
5435 *
5436 * return sec_type: Security type
5437 */
5438static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5439{
5440 struct dp_peer *dpeer = (struct dp_peer *)peer;
5441
5442 return dpeer->security[sec_idx].sec_type;
5443}
5444
5445/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005446 * dp_peer_authorize() - authorize txrx peer
5447 * @peer_handle: Datapath peer handle
5448 * @authorize
5449 *
5450 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05305451static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005452{
5453 struct dp_peer *peer = (struct dp_peer *)peer_handle;
5454 struct dp_soc *soc;
5455
Jeff Johnsona8edf332019-03-18 09:51:52 -07005456 if (peer) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005457 soc = peer->vdev->pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005458 qdf_spin_lock_bh(&soc->peer_ref_mutex);
5459 peer->authorize = authorize ? 1 : 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005460 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5461 }
5462}
5463
Krunal Soni7c4565f2018-09-04 19:02:53 -07005464static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
5465 struct dp_pdev *pdev,
5466 struct dp_peer *peer,
Om Prakash Tripathibf529e52019-04-11 17:23:57 +05305467 struct dp_vdev *vdev)
Krunal Soni7c4565f2018-09-04 19:02:53 -07005468{
Krunal Soni7c4565f2018-09-04 19:02:53 -07005469 struct dp_peer *bss_peer = NULL;
5470 uint8_t *m_addr = NULL;
5471
Krunal Soni7c4565f2018-09-04 19:02:53 -07005472 if (!vdev) {
5473 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5474 "vdev is NULL");
5475 } else {
5476 if (vdev->vap_bss_peer == peer)
5477 vdev->vap_bss_peer = NULL;
5478 m_addr = peer->mac_addr.raw;
5479 if (soc->cdp_soc.ol_ops->peer_unref_delete)
Vinay Adella94201152018-12-03 19:02:58 +05305480 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
Pavankumar Nandeshwar2702aee2018-12-20 18:57:12 +05305481 m_addr, vdev->mac_addr.raw, vdev->opmode,
5482 peer->ctrl_peer, NULL);
Vinay Adella94201152018-12-03 19:02:58 +05305483
Krunal Soni7c4565f2018-09-04 19:02:53 -07005484 if (vdev && vdev->vap_bss_peer) {
5485 bss_peer = vdev->vap_bss_peer;
5486 DP_UPDATE_STATS(vdev, peer);
5487 }
5488 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05305489 /*
5490 * Peer AST list hast to be empty here
5491 */
5492 DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
5493
Krunal Soni7c4565f2018-09-04 19:02:53 -07005494 qdf_mem_free(peer);
5495}
5496
Krunal Sonia5211f22018-09-21 13:46:33 -07005497/**
5498 * dp_delete_pending_vdev() - check and process vdev delete
5499 * @pdev: DP specific pdev pointer
5500 * @vdev: DP specific vdev pointer
5501 * @vdev_id: vdev id corresponding to vdev
5502 *
5503 * This API does following:
5504 * 1) It releases tx flow pools buffers as vdev is
5505 * going down and no peers are associated.
5506 * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
5507 */
5508static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
5509 uint8_t vdev_id)
Krunal Soni7c4565f2018-09-04 19:02:53 -07005510{
Krunal Soni7c4565f2018-09-04 19:02:53 -07005511 ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
5512 void *vdev_delete_context = NULL;
5513
Krunal Sonia5211f22018-09-21 13:46:33 -07005514 vdev_delete_cb = vdev->delete.callback;
5515 vdev_delete_context = vdev->delete.context;
Krunal Soni7c4565f2018-09-04 19:02:53 -07005516
Krunal Sonia5211f22018-09-21 13:46:33 -07005517 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5518 FL("deleting vdev object %pK (%pM)- its last peer is done"),
5519 vdev, vdev->mac_addr.raw);
5520 /* all peers are gone, go ahead and delete it */
5521 dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
5522 FLOW_TYPE_VDEV, vdev_id);
5523 dp_tx_vdev_detach(vdev);
5524
5525 qdf_spin_lock_bh(&pdev->vdev_list_lock);
5526 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
Krunal Soni7c4565f2018-09-04 19:02:53 -07005527 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5528
Krunal Sonia5211f22018-09-21 13:46:33 -07005529 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5530 FL("deleting vdev object %pK (%pM)"),
5531 vdev, vdev->mac_addr.raw);
5532 qdf_mem_free(vdev);
5533 vdev = NULL;
5534
Krunal Soni7c4565f2018-09-04 19:02:53 -07005535 if (vdev_delete_cb)
5536 vdev_delete_cb(vdev_delete_context);
5537}
5538
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005539/*
5540 * dp_peer_unref_delete() - unref and delete peer
5541 * @peer_handle: Datapath peer handle
5542 *
5543 */
5544void dp_peer_unref_delete(void *peer_handle)
5545{
5546 struct dp_peer *peer = (struct dp_peer *)peer_handle;
5547 struct dp_vdev *vdev = peer->vdev;
Kiran Venkatappa9edb9612017-03-16 11:37:35 +05305548 struct dp_pdev *pdev = vdev->pdev;
5549 struct dp_soc *soc = pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005550 struct dp_peer *tmppeer;
5551 int found = 0;
5552 uint16_t peer_id;
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08005553 uint16_t vdev_id;
Krunal Sonia5211f22018-09-21 13:46:33 -07005554 bool delete_vdev;
Amir Patel468bded2019-03-21 11:42:31 +05305555 struct cdp_peer_cookie peer_cookie;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005556
5557 /*
5558 * Hold the lock all the way from checking if the peer ref count
5559 * is zero until the peer references are removed from the hash
5560 * table and vdev list (if the peer ref count is zero).
5561 * This protects against a new HL tx operation starting to use the
5562 * peer object just after this function concludes it's done being used.
5563 * Furthermore, the lock needs to be held while checking whether the
5564 * vdev's list of peers is empty, to make sure that list is not modified
5565 * concurrently with the empty check.
5566 */
5567 qdf_spin_lock_bh(&soc->peer_ref_mutex);
5568 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
5569 peer_id = peer->peer_ids[0];
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08005570 vdev_id = vdev->vdev_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005571
5572 /*
5573 * Make sure that the reference to the peer in
5574 * peer object map is removed
5575 */
5576 if (peer_id != HTT_INVALID_PEER)
5577 soc->peer_id_to_obj_map[peer_id] = NULL;
5578
Krunal Sonic96a1162019-02-21 11:33:26 -08005579 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5580 "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005581
5582 /* remove the reference to the peer from the hash table */
5583 dp_peer_find_hash_remove(soc, peer);
5584
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05305585 qdf_spin_lock_bh(&soc->ast_lock);
5586 if (peer->self_ast_entry) {
5587 dp_peer_del_ast(soc, peer->self_ast_entry);
5588 peer->self_ast_entry = NULL;
5589 }
5590 qdf_spin_unlock_bh(&soc->ast_lock);
5591
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005592 TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
5593 if (tmppeer == peer) {
5594 found = 1;
5595 break;
5596 }
5597 }
Krunal Soni7c4565f2018-09-04 19:02:53 -07005598
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005599 if (found) {
5600 TAILQ_REMOVE(&peer->vdev->peer_list, peer,
5601 peer_list_elem);
5602 } else {
5603 /*Ignoring the remove operation as peer not found*/
Krunal Sonic96a1162019-02-21 11:33:26 -08005604 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Sravan Kumar Kairam51d46642018-08-24 15:07:55 +05305605 "peer:%pK not found in vdev:%pK peerlist:%pK",
5606 peer, vdev, &peer->vdev->peer_list);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005607 }
5608
Amir Patel468bded2019-03-21 11:42:31 +05305609 /* send peer destroy event to upper layer */
5610 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005611 QDF_MAC_ADDR_SIZE);
Amir Patel468bded2019-03-21 11:42:31 +05305612 peer_cookie.ctx = NULL;
5613 peer_cookie.ctx = (void *)peer->wlanstats_ctx;
5614#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5615 dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
5616 pdev->soc,
5617 (void *)&peer_cookie,
5618 peer->peer_ids[0],
5619 WDI_NO_VAL,
5620 pdev->pdev_id);
5621#endif
5622 peer->wlanstats_ctx = NULL;
5623
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08005624 /* cleanup the peer data */
5625 dp_peer_cleanup(vdev, peer);
Shashikala Prabhu3f2ceb82019-05-10 15:33:09 +05305626 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
Om Prakash Tripathibf529e52019-04-11 17:23:57 +05305627 dp_reset_and_release_peer_mem(soc, pdev, peer, vdev);
Shashikala Prabhu3f2ceb82019-05-10 15:33:09 +05305628 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005629
5630 /* check whether the parent vdev has no peers left */
5631 if (TAILQ_EMPTY(&vdev->peer_list)) {
5632 /*
Krunal Sonia5211f22018-09-21 13:46:33 -07005633 * capture vdev delete pending flag's status
5634 * while holding peer_ref_mutex lock
5635 */
5636 delete_vdev = vdev->delete.pending;
5637 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005638 * Now that there are no references to the peer, we can
5639 * release the peer reference lock.
5640 */
5641 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5642 /*
5643 * Check if the parent vdev was waiting for its peers
5644 * to be deleted, in order for it to be deleted too.
5645 */
Krunal Sonia5211f22018-09-21 13:46:33 -07005646 if (delete_vdev)
5647 dp_delete_pending_vdev(pdev, vdev, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005648 } else {
5649 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5650 }
chenguo1dead6f2018-01-08 14:51:44 +08005651
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005652 } else {
5653 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5654 }
5655}
5656
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05305657#ifdef PEER_CACHE_RX_PKTS
5658static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
5659{
Sravan Kumar Kairam5b8283f2019-05-18 22:20:06 +05305660 dp_rx_flush_rx_cached(peer, true);
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05305661 qdf_list_destroy(&peer->bufq_info.cached_bufq);
5662 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
5663}
5664#else
5665static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
5666{
5667}
5668#endif
5669
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005670/*
5671 * dp_peer_detach_wifi3() – Detach txrx peer
Naveen Rawat761329b2017-09-19 10:30:11 -07005672 * @peer_handle: Datapath peer handle
5673 * @bitmap: bitmap indicating special handling of request.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005674 *
5675 */
Naveen Rawat761329b2017-09-19 10:30:11 -07005676static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005677{
5678 struct dp_peer *peer = (struct dp_peer *)peer_handle;
5679
5680 /* redirect the peer's rx delivery function to point to a
5681 * discard func
5682 */
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05305683
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005684 peer->rx_opt_proc = dp_rx_discard;
Pavankumar Nandeshwar2702aee2018-12-20 18:57:12 +05305685
5686 /* Do not make ctrl_peer to NULL for connected sta peers.
5687 * We need ctrl_peer to release the reference during dp
5688 * peer free. This reference was held for
5689 * obj_mgr peer during the creation of dp peer.
5690 */
5691 if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
5692 !peer->bss_peer))
5693 peer->ctrl_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005694
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05305695 peer->valid = 0;
5696
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05305697 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07005698 FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005699
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08005700 dp_local_peer_id_free(peer->vdev->pdev, peer);
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08005701
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05305702 dp_peer_rx_bufq_resources_deinit(peer);
5703
Sravan Kumar Kairam5b8283f2019-05-18 22:20:06 +05305704 qdf_spinlock_destroy(&peer->peer_info_lock);
5705
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005706 /*
5707 * Remove the reference added during peer_attach.
5708 * The peer will still be left allocated until the
5709 * PEER_UNMAP message arrives to remove the other
5710 * reference, added by the PEER_MAP message.
5711 */
5712 dp_peer_unref_delete(peer_handle);
Leo Chang5ea93a42016-11-03 12:39:49 -07005713}
5714
5715/*
5716 * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
5717 * @peer_handle: Datapath peer handle
5718 *
5719 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005720static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07005721{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005722 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07005723 return vdev->mac_addr.raw;
5724}
5725
5726/*
Karunakar Dasinenica792542017-01-16 10:08:58 -08005727 * dp_vdev_set_wds() - Enable per packet stats
5728 * @vdev_handle: DP VDEV handle
5729 * @val: value
5730 *
5731 * Return: none
5732 */
5733static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
5734{
5735 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5736
5737 vdev->wds_enabled = val;
5738 return 0;
5739}
5740
5741/*
Leo Chang5ea93a42016-11-03 12:39:49 -07005742 * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
5743 * @peer_handle: Datapath peer handle
5744 *
5745 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005746static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
5747 uint8_t vdev_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07005748{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005749 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07005750 struct dp_vdev *vdev = NULL;
5751
5752 if (qdf_unlikely(!pdev))
5753 return NULL;
5754
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305755 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07005756 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Mohit Khanna02553142019-04-11 17:49:27 -07005757 if (vdev->delete.pending)
5758 continue;
5759
Leo Chang5ea93a42016-11-03 12:39:49 -07005760 if (vdev->vdev_id == vdev_id)
5761 break;
5762 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305763 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07005764
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005765 return (struct cdp_vdev *)vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07005766}
5767
chenguo2a733792018-11-01 16:10:38 +08005768/*
5769 * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
5770 * @dev: PDEV handle
5771 *
5772 * Return: VDEV handle of monitor mode
5773 */
5774
5775static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
5776{
5777 struct dp_pdev *pdev = (struct dp_pdev *)dev;
5778
5779 if (qdf_unlikely(!pdev))
5780 return NULL;
5781
5782 return (struct cdp_vdev *)pdev->monitor_vdev;
5783}
5784
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005785static int dp_get_opmode(struct cdp_vdev *vdev_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07005786{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005787 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07005788
5789 return vdev->opmode;
5790}
5791
Mohit Khanna7ac554b2018-05-24 11:58:13 -07005792static
5793void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
5794 ol_txrx_rx_fp *stack_fn_p,
5795 ol_osif_vdev_handle *osif_vdev_p)
5796{
5797 struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
5798
5799 qdf_assert(vdev);
5800 *stack_fn_p = vdev->osif_rx_stack;
5801 *osif_vdev_p = vdev->osif_vdev;
5802}
5803
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005804static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07005805{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005806 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07005807 struct dp_pdev *pdev = vdev->pdev;
5808
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005809 return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
Leo Chang5ea93a42016-11-03 12:39:49 -07005810}
phadiman7821bf82018-02-06 16:03:54 +05305811
Kai Chen6eca1a62017-01-12 10:17:53 -08005812/**
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005813 * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
5814 * ring based on target
5815 * @soc: soc handle
5816 * @mac_for_pdev: pdev_id
5817 * @pdev: physical device handle
5818 * @ring_num: mac id
5819 * @htt_tlv_filter: tlv filter
5820 *
5821 * Return: zero on success, non-zero on failure
5822 */
5823static inline
5824QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
5825 struct dp_pdev *pdev, uint8_t ring_num,
5826 struct htt_rx_ring_tlv_filter htt_tlv_filter)
5827{
5828 QDF_STATUS status;
5829
5830 if (soc->wlan_cfg_ctx->rxdma1_enable)
5831 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5832 pdev->rxdma_mon_buf_ring[ring_num]
5833 .hal_srng,
5834 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
5835 &htt_tlv_filter);
5836 else
5837 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5838 pdev->rx_mac_buf_ring[ring_num]
5839 .hal_srng,
5840 RXDMA_BUF, RX_BUFFER_SIZE,
5841 &htt_tlv_filter);
5842
5843 return status;
5844}
5845
5846/**
sumedh baikady84613b02017-09-19 16:36:14 -07005847 * dp_reset_monitor_mode() - Disable monitor mode
5848 * @pdev_handle: Datapath PDEV handle
5849 *
Kai Chen52ef33f2019-03-05 18:33:40 -08005850 * Return: QDF_STATUS
sumedh baikady84613b02017-09-19 16:36:14 -07005851 */
Kai Chen52ef33f2019-03-05 18:33:40 -08005852QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
sumedh baikady84613b02017-09-19 16:36:14 -07005853{
5854 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5855 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005856 struct dp_soc *soc = pdev->soc;
sumedh baikady84613b02017-09-19 16:36:14 -07005857 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005858 int mac_id;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005859 QDF_STATUS status = QDF_STATUS_SUCCESS;
sumedh baikady84613b02017-09-19 16:36:14 -07005860
5861 pdev_id = pdev->pdev_id;
5862 soc = pdev->soc;
5863
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08005864 qdf_spin_lock_bh(&pdev->mon_lock);
5865
hangtianfe681a52019-01-16 17:16:28 +08005866 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
sumedh baikady84613b02017-09-19 16:36:14 -07005867
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005868 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5869 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
sumedh baikady84613b02017-09-19 16:36:14 -07005870
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005871 status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5872 pdev, mac_id,
5873 htt_tlv_filter);
5874
5875 if (status != QDF_STATUS_SUCCESS) {
5876 dp_err("Failed to send tlv filter for monitor mode rings");
5877 return status;
5878 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005879
5880 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005881 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5882 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
5883 &htt_tlv_filter);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005884 }
sumedh baikady84613b02017-09-19 16:36:14 -07005885
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08005886 pdev->monitor_vdev = NULL;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05305887 pdev->mcopy_mode = 0;
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05305888 pdev->monitor_configured = false;
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08005889
5890 qdf_spin_unlock_bh(&pdev->mon_lock);
5891
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005892 return QDF_STATUS_SUCCESS;
sumedh baikady84613b02017-09-19 16:36:14 -07005893}
phadiman7821bf82018-02-06 16:03:54 +05305894
5895/**
5896 * dp_set_nac() - set peer_nac
5897 * @peer_handle: Datapath PEER handle
5898 *
5899 * Return: void
5900 */
5901static void dp_set_nac(struct cdp_peer *peer_handle)
5902{
5903 struct dp_peer *peer = (struct dp_peer *)peer_handle;
5904
5905 peer->nac = 1;
5906}
5907
5908/**
5909 * dp_get_tx_pending() - read pending tx
5910 * @pdev_handle: Datapath PDEV handle
5911 *
5912 * Return: outstanding tx
5913 */
5914static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
5915{
5916 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5917
5918 return qdf_atomic_read(&pdev->num_tx_outstanding);
5919}
5920
5921/**
5922 * dp_get_peer_mac_from_peer_id() - get peer mac
5923 * @pdev_handle: Datapath PDEV handle
5924 * @peer_id: Peer ID
5925 * @peer_mac: MAC addr of PEER
5926 *
5927 * Return: void
5928 */
5929static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
5930 uint32_t peer_id, uint8_t *peer_mac)
5931{
5932 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5933 struct dp_peer *peer;
5934
5935 if (pdev && peer_mac) {
5936 peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05305937 if (peer) {
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07005938 qdf_mem_copy(peer_mac, peer->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005939 QDF_MAC_ADDR_SIZE);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05305940 dp_peer_unref_del_find_by_id(peer);
phadiman7821bf82018-02-06 16:03:54 +05305941 }
5942 }
5943}
5944
sumedh baikady84613b02017-09-19 16:36:14 -07005945/**
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05305946 * dp_pdev_configure_monitor_rings() - configure monitor rings
Kai Chen6eca1a62017-01-12 10:17:53 -08005947 * @vdev_handle: Datapath VDEV handle
5948 *
Kai Chen52ef33f2019-03-05 18:33:40 -08005949 * Return: QDF_STATUS
Kai Chen6eca1a62017-01-12 10:17:53 -08005950 */
Kai Chen52ef33f2019-03-05 18:33:40 -08005951QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
Kai Chen6eca1a62017-01-12 10:17:53 -08005952{
Kai Chen6eca1a62017-01-12 10:17:53 -08005953 struct htt_rx_ring_tlv_filter htt_tlv_filter;
5954 struct dp_soc *soc;
5955 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005956 int mac_id;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005957 QDF_STATUS status = QDF_STATUS_SUCCESS;
Kai Chen6eca1a62017-01-12 10:17:53 -08005958
Kai Chen6eca1a62017-01-12 10:17:53 -08005959 pdev_id = pdev->pdev_id;
5960 soc = pdev->soc;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305961
nobeljd124b742017-10-16 11:59:12 -07005962 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05305963 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
nobeljd124b742017-10-16 11:59:12 -07005964 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5965 pdev->fp_ctrl_filter, pdev->fp_data_filter,
5966 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5967 pdev->mo_data_filter);
5968
hangtianfe681a52019-01-16 17:16:28 +08005969 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
nobelj1c31fee2018-03-21 11:47:05 -07005970
Kai Chen6eca1a62017-01-12 10:17:53 -08005971 htt_tlv_filter.mpdu_start = 1;
5972 htt_tlv_filter.msdu_start = 1;
5973 htt_tlv_filter.packet = 1;
5974 htt_tlv_filter.msdu_end = 1;
5975 htt_tlv_filter.mpdu_end = 1;
5976 htt_tlv_filter.packet_header = 1;
5977 htt_tlv_filter.attention = 1;
5978 htt_tlv_filter.ppdu_start = 0;
5979 htt_tlv_filter.ppdu_end = 0;
5980 htt_tlv_filter.ppdu_end_user_stats = 0;
5981 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
5982 htt_tlv_filter.ppdu_end_status_done = 0;
sumedh baikady308ff002017-09-18 16:24:36 -07005983 htt_tlv_filter.header_per_msdu = 1;
nobeljd124b742017-10-16 11:59:12 -07005984 htt_tlv_filter.enable_fp =
5985 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08005986 htt_tlv_filter.enable_md = 0;
nobeljd124b742017-10-16 11:59:12 -07005987 htt_tlv_filter.enable_mo =
5988 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
5989 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
5990 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
phadiman86911cd2019-04-04 19:17:41 +05305991
5992 if (pdev->mcopy_mode) {
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05305993 htt_tlv_filter.fp_data_filter = 0;
phadiman86911cd2019-04-04 19:17:41 +05305994 htt_tlv_filter.mo_data_filter = 0;
5995 } else {
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05305996 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
phadiman86911cd2019-04-04 19:17:41 +05305997 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
5998 }
nobeljd124b742017-10-16 11:59:12 -07005999 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6000 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
Kiran Venkatappa07921612019-03-02 23:14:12 +05306001 htt_tlv_filter.offset_valid = false;
Kai Chen6eca1a62017-01-12 10:17:53 -08006002
Kai Chen52ef33f2019-03-05 18:33:40 -08006003 if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6004 (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6005 htt_tlv_filter.fp_mgmt_filter = 0;
6006 htt_tlv_filter.fp_ctrl_filter = 0;
6007 htt_tlv_filter.fp_data_filter = 0;
6008 htt_tlv_filter.mo_mgmt_filter = 0;
6009 htt_tlv_filter.mo_ctrl_filter = 0;
6010 htt_tlv_filter.mo_data_filter = 0;
6011 }
6012
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006013 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6014 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6015
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006016 status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6017 pdev, mac_id,
6018 htt_tlv_filter);
6019
6020 if (status != QDF_STATUS_SUCCESS) {
6021 dp_err("Failed to send tlv filter for monitor mode rings");
6022 return status;
6023 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006024 }
Kai Chen6eca1a62017-01-12 10:17:53 -08006025
hangtianfe681a52019-01-16 17:16:28 +08006026 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
nobelj1c31fee2018-03-21 11:47:05 -07006027
Kai Chen6eca1a62017-01-12 10:17:53 -08006028 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006029 htt_tlv_filter.msdu_start = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08006030 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006031 htt_tlv_filter.msdu_end = 0;
6032 htt_tlv_filter.mpdu_end = 0;
Kai Chen52ef33f2019-03-05 18:33:40 -08006033 if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6034 (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6035 htt_tlv_filter.mpdu_end = 1;
6036 }
nobelj1c31fee2018-03-21 11:47:05 -07006037 htt_tlv_filter.attention = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08006038 htt_tlv_filter.ppdu_start = 1;
6039 htt_tlv_filter.ppdu_end = 1;
6040 htt_tlv_filter.ppdu_end_user_stats = 1;
6041 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6042 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006043 htt_tlv_filter.enable_fp = 1;
Karunakar Dasineni40555682017-03-26 22:44:39 -07006044 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006045 htt_tlv_filter.enable_mo = 1;
Kai Chen52ef33f2019-03-05 18:33:40 -08006046 if (pdev->mcopy_mode ||
6047 (pdev->rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
nobelj1c31fee2018-03-21 11:47:05 -07006048 htt_tlv_filter.packet_header = 1;
Kai Chen52ef33f2019-03-05 18:33:40 -08006049 if (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) {
6050 htt_tlv_filter.header_per_msdu = 0;
6051 htt_tlv_filter.enable_mo = 0;
6052 } else if (pdev->rx_enh_capture_mode ==
6053 CDP_RX_ENH_CAPTURE_MPDU_MSDU) {
6054 htt_tlv_filter.header_per_msdu = 1;
6055 htt_tlv_filter.enable_mo = 0;
6056 }
nobelj1c31fee2018-03-21 11:47:05 -07006057 }
Kai Chen52ef33f2019-03-05 18:33:40 -08006058
nobelj1c31fee2018-03-21 11:47:05 -07006059 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6060 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6061 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6062 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6063 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6064 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Kiran Venkatappa07921612019-03-02 23:14:12 +05306065 htt_tlv_filter.offset_valid = false;
nobeljd124b742017-10-16 11:59:12 -07006066
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006067 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07006068 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6069 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006070
6071 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6072 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6073 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6074 }
nobeljd124b742017-10-16 11:59:12 -07006075
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306076 return status;
6077}
6078
6079/**
6080 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6081 * @vdev_handle: Datapath VDEV handle
6082 * @smart_monitor: Flag to denote if its smart monitor mode
6083 *
6084 * Return: 0 on success, not 0 on failure
6085 */
6086static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
Naga65fad662019-03-22 19:01:28 +05306087 uint8_t special_monitor)
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306088{
6089 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6090 struct dp_pdev *pdev;
6091
6092 qdf_assert(vdev);
6093
6094 pdev = vdev->pdev;
Naga65fad662019-03-22 19:01:28 +05306095 pdev->monitor_vdev = vdev;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306096 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6097 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6098 pdev, pdev->pdev_id, pdev->soc, vdev);
6099
Naga65fad662019-03-22 19:01:28 +05306100 /*
6101 * do not configure monitor buf ring and filter for smart and
6102 * lite monitor
6103 * for smart monitor filters are added along with first NAC
6104 * for lite monitor required configuration done through
6105 * dp_set_pdev_param
6106 */
6107 if (special_monitor)
6108 return QDF_STATUS_SUCCESS;
6109
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306110 /*Check if current pdev's monitor_vdev exists */
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05306111 if (pdev->monitor_configured) {
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306112 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6113 "monitor vap already created vdev=%pK\n", vdev);
6114 qdf_assert(vdev);
6115 return QDF_STATUS_E_RESOURCES;
6116 }
6117
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05306118 pdev->monitor_configured = true;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306119
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306120 return dp_pdev_configure_monitor_rings(pdev);
nobeljd124b742017-10-16 11:59:12 -07006121}
6122
6123/**
6124 * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6125 * @pdev_handle: Datapath PDEV handle
6126 * @filter_val: Flag to select Filter for monitor mode
6127 * Return: 0 on success, not 0 on failure
6128 */
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006129static QDF_STATUS
6130dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
6131 struct cdp_monitor_filter *filter_val)
nobeljd124b742017-10-16 11:59:12 -07006132{
6133 /* Many monitor VAPs can exists in a system but only one can be up at
6134 * anytime
6135 */
6136 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6137 struct dp_vdev *vdev = pdev->monitor_vdev;
6138 struct htt_rx_ring_tlv_filter htt_tlv_filter;
6139 struct dp_soc *soc;
6140 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006141 int mac_id;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006142 QDF_STATUS status = QDF_STATUS_SUCCESS;
nobeljd124b742017-10-16 11:59:12 -07006143
6144 pdev_id = pdev->pdev_id;
6145 soc = pdev->soc;
6146
6147 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Aditya Sathishded018e2018-07-02 16:25:21 +05306148 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
nobeljd124b742017-10-16 11:59:12 -07006149 pdev, pdev_id, soc, vdev);
6150
6151 /*Check if current pdev's monitor_vdev exists */
6152 if (!pdev->monitor_vdev) {
6153 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05306154 "vdev=%pK", vdev);
nobeljd124b742017-10-16 11:59:12 -07006155 qdf_assert(vdev);
6156 }
6157
6158 /* update filter mode, type in pdev structure */
6159 pdev->mon_filter_mode = filter_val->mode;
6160 pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6161 pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6162 pdev->fp_data_filter = filter_val->fp_data;
6163 pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6164 pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6165 pdev->mo_data_filter = filter_val->mo_data;
6166
6167 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05306168 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
nobeljd124b742017-10-16 11:59:12 -07006169 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6170 pdev->fp_ctrl_filter, pdev->fp_data_filter,
6171 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6172 pdev->mo_data_filter);
6173
hangtianfe681a52019-01-16 17:16:28 +08006174 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
nobeljd124b742017-10-16 11:59:12 -07006175
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006176 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6177 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
nobeljd124b742017-10-16 11:59:12 -07006178
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006179 status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6180 pdev, mac_id,
6181 htt_tlv_filter);
6182
6183 if (status != QDF_STATUS_SUCCESS) {
6184 dp_err("Failed to send tlv filter for monitor mode rings");
6185 return status;
6186 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006187
6188 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6189 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6190 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6191 }
nobeljd124b742017-10-16 11:59:12 -07006192
6193 htt_tlv_filter.mpdu_start = 1;
6194 htt_tlv_filter.msdu_start = 1;
6195 htt_tlv_filter.packet = 1;
6196 htt_tlv_filter.msdu_end = 1;
6197 htt_tlv_filter.mpdu_end = 1;
6198 htt_tlv_filter.packet_header = 1;
6199 htt_tlv_filter.attention = 1;
6200 htt_tlv_filter.ppdu_start = 0;
6201 htt_tlv_filter.ppdu_end = 0;
6202 htt_tlv_filter.ppdu_end_user_stats = 0;
6203 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6204 htt_tlv_filter.ppdu_end_status_done = 0;
6205 htt_tlv_filter.header_per_msdu = 1;
6206 htt_tlv_filter.enable_fp =
6207 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6208 htt_tlv_filter.enable_md = 0;
6209 htt_tlv_filter.enable_mo =
6210 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6211 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6212 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306213 if (pdev->mcopy_mode)
6214 htt_tlv_filter.fp_data_filter = 0;
6215 else
6216 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
nobeljd124b742017-10-16 11:59:12 -07006217 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6218 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6219 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
Kiran Venkatappa07921612019-03-02 23:14:12 +05306220 htt_tlv_filter.offset_valid = false;
nobeljd124b742017-10-16 11:59:12 -07006221
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006222 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6223 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6224
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006225 status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6226 pdev, mac_id,
6227 htt_tlv_filter);
6228
6229 if (status != QDF_STATUS_SUCCESS) {
6230 dp_err("Failed to send tlv filter for monitor mode rings");
6231 return status;
6232 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006233 }
nobeljd124b742017-10-16 11:59:12 -07006234
hangtianfe681a52019-01-16 17:16:28 +08006235 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
nobelj1c31fee2018-03-21 11:47:05 -07006236
nobeljd124b742017-10-16 11:59:12 -07006237 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006238 htt_tlv_filter.msdu_start = 0;
nobeljd124b742017-10-16 11:59:12 -07006239 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006240 htt_tlv_filter.msdu_end = 0;
6241 htt_tlv_filter.mpdu_end = 0;
6242 htt_tlv_filter.attention = 0;
nobeljd124b742017-10-16 11:59:12 -07006243 htt_tlv_filter.ppdu_start = 1;
6244 htt_tlv_filter.ppdu_end = 1;
6245 htt_tlv_filter.ppdu_end_user_stats = 1;
6246 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6247 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006248 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07006249 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006250 htt_tlv_filter.enable_mo = 1;
6251 if (pdev->mcopy_mode) {
6252 htt_tlv_filter.packet_header = 1;
6253 }
6254 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6255 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6256 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6257 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6258 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6259 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Kiran Venkatappa07921612019-03-02 23:14:12 +05306260 htt_tlv_filter.offset_valid = false;
Karunakar Dasineni40555682017-03-26 22:44:39 -07006261
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006262 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07006263 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6264 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006265
6266 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6267 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6268 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6269 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05306270
Kai Chen6eca1a62017-01-12 10:17:53 -08006271 return QDF_STATUS_SUCCESS;
6272}
Leo Chang5ea93a42016-11-03 12:39:49 -07006273
nobeljc8eb4d62018-01-04 14:29:32 -08006274/**
phadiman7821bf82018-02-06 16:03:54 +05306275 * dp_get_pdev_id_frm_pdev() - get pdev_id
6276 * @pdev_handle: Datapath PDEV handle
6277 *
6278 * Return: pdev_id
6279 */
6280static
6281uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
6282{
6283 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6284
6285 return pdev->pdev_id;
6286}
6287
6288/**
Varsha Mishraa331e6e2019-03-11 12:16:14 +05306289 * dp_get_delay_stats_flag() - get delay stats flag
6290 * @pdev_handle: Datapath PDEV handle
6291 *
6292 * Return: 0 if flag is disabled else 1
6293 */
6294static
6295bool dp_get_delay_stats_flag(struct cdp_pdev *pdev_handle)
6296{
6297 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6298
6299 return pdev->delay_stats_flag;
6300}
6301
6302/**
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07006303 * dp_pdev_set_chan_noise_floor() - set channel noise floor
6304 * @pdev_handle: Datapath PDEV handle
6305 * @chan_noise_floor: Channel Noise Floor
6306 *
6307 * Return: void
6308 */
6309static
6310void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
6311 int16_t chan_noise_floor)
6312{
6313 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6314
6315 pdev->chan_noise_floor = chan_noise_floor;
6316}
6317
6318/**
nobeljc8eb4d62018-01-04 14:29:32 -08006319 * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
6320 * @vdev_handle: Datapath VDEV handle
6321 * Return: true on ucast filter flag set
6322 */
6323static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
6324{
6325 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6326 struct dp_pdev *pdev;
6327
6328 pdev = vdev->pdev;
6329
6330 if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
6331 (pdev->mo_data_filter & FILTER_DATA_UCAST))
6332 return true;
6333
6334 return false;
6335}
6336
6337/**
6338 * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
6339 * @vdev_handle: Datapath VDEV handle
6340 * Return: true on mcast filter flag set
6341 */
6342static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
6343{
6344 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6345 struct dp_pdev *pdev;
6346
6347 pdev = vdev->pdev;
6348
6349 if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
6350 (pdev->mo_data_filter & FILTER_DATA_MCAST))
6351 return true;
6352
6353 return false;
6354}
6355
6356/**
6357 * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
6358 * @vdev_handle: Datapath VDEV handle
6359 * Return: true on non data filter flag set
6360 */
6361static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
6362{
6363 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6364 struct dp_pdev *pdev;
6365
6366 pdev = vdev->pdev;
6367
6368 if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
6369 (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
6370 if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
6371 (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
6372 return true;
6373 }
6374 }
6375
6376 return false;
6377}
6378
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306379#ifdef MESH_MODE_SUPPORT
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05306380void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306381{
6382 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6383
6384 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Venkateswara Swamy Bandaru5caa83a2017-03-06 11:33:15 +05306385 FL("val %d"), val);
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306386 vdev->mesh_vdev = val;
6387}
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05306388
6389/*
6390 * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
6391 * @vdev_hdl: virtual device object
6392 * @val: value to be set
6393 *
6394 * Return: void
6395 */
6396void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
6397{
6398 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6399
6400 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6401 FL("val %d"), val);
6402 vdev->mesh_rx_filter = val;
6403}
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306404#endif
6405
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306406/**
6407 * dp_rx_bar_stats_cb(): BAR received stats callback
6408 * @soc: SOC handle
6409 * @cb_ctxt: Call back context
6410 * @reo_status: Reo status
6411 *
6412 * return: void
6413 */
6414void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
6415 union hal_reo_status *reo_status)
6416{
6417 struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
6418 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
6419
Chaithanya Garrepalli291dfa02018-10-12 17:11:34 +05306420 if (!qdf_atomic_read(&soc->cmn_init_done))
6421 return;
6422
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306423 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
Venkata Sharath Chandra Manchalac61826c2019-05-14 22:24:25 -07006424 DP_PRINT_STATS("REO stats failure %d",
6425 queue_status->header.status);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306426 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306427 return;
6428 }
6429
6430 pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306431 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306432
6433}
6434
Ishank Jain1e7401c2017-02-17 15:38:39 +05306435/**
6436 * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
6437 * @vdev: DP VDEV handle
6438 *
6439 * return: void
6440 */
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306441void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
6442 struct cdp_vdev_stats *vdev_stats)
Ishank Jain1e7401c2017-02-17 15:38:39 +05306443{
6444 struct dp_peer *peer = NULL;
Amir Patelee49ad52018-12-18 13:23:36 +05306445 struct dp_soc *soc = NULL;
6446
Viyom Mittal757853f2019-01-03 14:38:56 +05306447 if (!vdev || !vdev->pdev)
Amir Patelee49ad52018-12-18 13:23:36 +05306448 return;
6449
6450 soc = vdev->pdev->soc;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306451
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306452 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
Ishank Jain1e7401c2017-02-17 15:38:39 +05306453
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05306454 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306455 dp_update_vdev_stats(vdev_stats, peer);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306456
Amir Patel756d05e2018-10-10 12:35:30 +05306457#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6458 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6459 vdev_stats, vdev->vdev_id,
6460 UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6461#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05306462}
6463
Venkata Sharath Chandra Manchalabedc0cd2019-05-10 18:25:19 -07006464void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
Ishank Jain1e7401c2017-02-17 15:38:39 +05306465{
6466 struct dp_vdev *vdev = NULL;
Amir Patel17b91782019-01-08 12:17:15 +05306467 struct dp_soc *soc;
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306468 struct cdp_vdev_stats *vdev_stats =
6469 qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6470
6471 if (!vdev_stats) {
6472 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6473 "DP alloc failure - unable to get alloc vdev stats");
6474 return;
6475 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05306476
hangtianfe681a52019-01-16 17:16:28 +08006477 qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
6478 qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
6479 qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
Ishank Jain1e7401c2017-02-17 15:38:39 +05306480
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +05306481 if (pdev->mcopy_mode)
6482 DP_UPDATE_STATS(pdev, pdev->invalid_peer);
6483
Amir Patel17b91782019-01-08 12:17:15 +05306484 soc = pdev->soc;
6485 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05306486 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306487 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05306488
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306489 dp_aggregate_vdev_stats(vdev, vdev_stats);
6490 dp_update_pdev_stats(pdev, vdev_stats);
Viyom Mittal18a73bc2018-12-18 12:30:22 +05306491 dp_update_pdev_ingress_stats(pdev, vdev);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306492 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05306493 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Amir Patel17b91782019-01-08 12:17:15 +05306494 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306495 qdf_mem_free(vdev_stats);
6496
Amir Patel756d05e2018-10-10 12:35:30 +05306497#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6498 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
6499 pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
6500#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05306501}
6502
6503/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306504 * dp_vdev_getstats() - get vdev packet level stats
6505 * @vdev_handle: Datapath VDEV handle
6506 * @stats: cdp network device stats structure
6507 *
6508 * Return: void
6509 */
6510static void dp_vdev_getstats(void *vdev_handle,
6511 struct cdp_dev_stats *stats)
6512{
6513 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Amir Patel17b91782019-01-08 12:17:15 +05306514 struct dp_pdev *pdev;
6515 struct dp_soc *soc;
6516 struct cdp_vdev_stats *vdev_stats;
6517
6518 if (!vdev)
6519 return;
6520
6521 pdev = vdev->pdev;
6522 if (!pdev)
6523 return;
6524
6525 soc = pdev->soc;
6526
6527 vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306528
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306529 if (!vdev_stats) {
6530 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6531 "DP alloc failure - unable to get alloc vdev stats");
6532 return;
6533 }
6534
Amir Patel17b91782019-01-08 12:17:15 +05306535 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306536 dp_aggregate_vdev_stats(vdev, vdev_stats);
Amir Patel17b91782019-01-08 12:17:15 +05306537 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306538
6539 stats->tx_packets = vdev_stats->tx_i.rcvd.num;
6540 stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
6541
6542 stats->tx_errors = vdev_stats->tx.tx_failed +
6543 vdev_stats->tx_i.dropped.dropped_pkt.num;
6544 stats->tx_dropped = stats->tx_errors;
6545
6546 stats->rx_packets = vdev_stats->rx.unicast.num +
6547 vdev_stats->rx.multicast.num +
6548 vdev_stats->rx.bcast.num;
6549 stats->rx_bytes = vdev_stats->rx.unicast.bytes +
6550 vdev_stats->rx.multicast.bytes +
6551 vdev_stats->rx.bcast.bytes;
6552
Amir Patel46f39b62019-04-16 12:56:26 +05306553 qdf_mem_free(vdev_stats);
6554
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306555}
6556
6557
6558/**
Anish Natarajf12b0a32018-03-14 14:27:13 +05306559 * dp_pdev_getstats() - get pdev packet level stats
6560 * @pdev_handle: Datapath PDEV handle
6561 * @stats: cdp network device stats structure
6562 *
6563 * Return: void
6564 */
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306565static void dp_pdev_getstats(void *pdev_handle,
Anish Natarajf12b0a32018-03-14 14:27:13 +05306566 struct cdp_dev_stats *stats)
6567{
6568 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6569
6570 dp_aggregate_pdev_stats(pdev);
6571
6572 stats->tx_packets = pdev->stats.tx_i.rcvd.num;
6573 stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
6574
6575 stats->tx_errors = pdev->stats.tx.tx_failed +
6576 pdev->stats.tx_i.dropped.dropped_pkt.num;
6577 stats->tx_dropped = stats->tx_errors;
6578
6579 stats->rx_packets = pdev->stats.rx.unicast.num +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05306580 pdev->stats.rx.multicast.num +
6581 pdev->stats.rx.bcast.num;
Anish Natarajf12b0a32018-03-14 14:27:13 +05306582 stats->rx_bytes = pdev->stats.rx.unicast.bytes +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05306583 pdev->stats.rx.multicast.bytes +
6584 pdev->stats.rx.bcast.bytes;
Anish Natarajf12b0a32018-03-14 14:27:13 +05306585}
6586
6587/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306588 * dp_get_device_stats() - get interface level packet stats
6589 * @handle: device handle
6590 * @stats: cdp network device stats structure
6591 * @type: device type pdev/vdev
6592 *
6593 * Return: void
6594 */
6595static void dp_get_device_stats(void *handle,
6596 struct cdp_dev_stats *stats, uint8_t type)
6597{
6598 switch (type) {
6599 case UPDATE_VDEV_STATS:
6600 dp_vdev_getstats(handle, stats);
6601 break;
6602 case UPDATE_PDEV_STATS:
6603 dp_pdev_getstats(handle, stats);
6604 break;
6605 default:
6606 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6607 "apstats cannot be updated for this input "
Aditya Sathishded018e2018-07-02 16:25:21 +05306608 "type %d", type);
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306609 break;
6610 }
6611
6612}
6613
Venkata Sharath Chandra Manchala3dfc6142019-05-01 17:21:40 -07006614const
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07006615char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
6616{
6617 switch (ring_type) {
6618 case REO_DST:
6619 return "Reo_dst";
6620 case REO_EXCEPTION:
6621 return "Reo_exception";
6622 case REO_CMD:
6623 return "Reo_cmd";
6624 case REO_REINJECT:
6625 return "Reo_reinject";
6626 case REO_STATUS:
6627 return "Reo_status";
6628 case WBM2SW_RELEASE:
6629 return "wbm2sw_release";
6630 case TCL_DATA:
6631 return "tcl_data";
6632 case TCL_CMD:
6633 return "tcl_cmd";
6634 case TCL_STATUS:
6635 return "tcl_status";
6636 case SW2WBM_RELEASE:
6637 return "sw2wbm_release";
6638 case RXDMA_BUF:
6639 return "Rxdma_buf";
6640 case RXDMA_DST:
6641 return "Rxdma_dst";
6642 case RXDMA_MONITOR_BUF:
6643 return "Rxdma_monitor_buf";
6644 case RXDMA_MONITOR_DESC:
6645 return "Rxdma_monitor_desc";
6646 case RXDMA_MONITOR_STATUS:
6647 return "Rxdma_monitor_status";
6648 default:
6649 dp_err("Invalid ring type");
6650 break;
6651 }
6652 return "Invalid";
6653}
sumedh baikady72b1c712017-08-24 12:11:46 -07006654
Pamidipati, Vijay37d107d2018-12-31 14:46:14 +05306655/*
6656 * dp_print_napi_stats(): NAPI stats
6657 * @soc - soc handle
6658 */
6659static void dp_print_napi_stats(struct dp_soc *soc)
6660{
6661 hif_print_napi_stats(soc->hif_handle);
6662}
6663
sumedh baikady72b1c712017-08-24 12:11:46 -07006664/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05306665 * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
6666 * @vdev: DP_VDEV handle
6667 *
6668 * Return:void
6669 */
6670static inline void
6671dp_txrx_host_stats_clr(struct dp_vdev *vdev)
6672{
6673 struct dp_peer *peer = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +05306674
phadiman49757302018-12-18 16:13:59 +05306675 if (!vdev || !vdev->pdev)
6676 return;
6677
Ishank Jain1e7401c2017-02-17 15:38:39 +05306678 DP_STATS_CLR(vdev->pdev);
6679 DP_STATS_CLR(vdev->pdev->soc);
6680 DP_STATS_CLR(vdev);
Pamidipati, Vijay37d107d2018-12-31 14:46:14 +05306681
6682 hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
6683
Ishank Jain1e7401c2017-02-17 15:38:39 +05306684 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6685 if (!peer)
6686 return;
6687 DP_STATS_CLR(peer);
Anish Nataraj28490c42018-01-19 19:34:54 +05306688
Amir Patel756d05e2018-10-10 12:35:30 +05306689#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6690 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6691 &peer->stats, peer->peer_ids[0],
6692 UPDATE_PEER_STATS, vdev->pdev->pdev_id);
6693#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05306694 }
6695
Amir Patel756d05e2018-10-10 12:35:30 +05306696#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6697 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6698 &vdev->stats, vdev->vdev_id,
6699 UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6700#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05306701}
6702
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006703/*
6704 * dp_get_host_peer_stats()- function to print peer stats
6705 * @pdev_handle: DP_PDEV handle
6706 * @mac_addr: mac address of the peer
6707 *
6708 * Return: void
6709 */
6710static void
6711dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
6712{
6713 struct dp_peer *peer;
6714 uint8_t local_id;
6715
Aditya Sathishb514afc2018-12-05 15:42:17 +05306716 if (!mac_addr) {
6717 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6718 "Invalid MAC address\n");
6719 return;
6720 }
6721
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006722 peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
6723 &local_id);
6724
6725 if (!peer) {
6726 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6727 "%s: Invalid peer\n", __func__);
6728 return;
6729 }
6730
Aditya Sathishf8074d82019-02-13 15:28:49 +05306731 /* Making sure the peer is for the specific pdev */
6732 if ((struct dp_pdev *)pdev_handle != peer->vdev->pdev) {
6733 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6734 "%s: Peer is not for this pdev\n", __func__);
6735 return;
6736 }
6737
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006738 dp_print_peer_stats(peer);
6739 dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
6740}
6741
Ishank Jain1e7401c2017-02-17 15:38:39 +05306742/**
Venkata Sharath Chandra Manchala389c4e12018-10-03 12:13:33 -07006743 * dp_txrx_stats_help() - Helper function for Txrx_Stats
6744 *
6745 * Return: None
6746 */
6747static void dp_txrx_stats_help(void)
6748{
6749 dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
6750 dp_info("stats_option:");
6751 dp_info(" 1 -- HTT Tx Statistics");
6752 dp_info(" 2 -- HTT Rx Statistics");
6753 dp_info(" 3 -- HTT Tx HW Queue Statistics");
6754 dp_info(" 4 -- HTT Tx HW Sched Statistics");
6755 dp_info(" 5 -- HTT Error Statistics");
6756 dp_info(" 6 -- HTT TQM Statistics");
6757 dp_info(" 7 -- HTT TQM CMDQ Statistics");
6758 dp_info(" 8 -- HTT TX_DE_CMN Statistics");
6759 dp_info(" 9 -- HTT Tx Rate Statistics");
6760 dp_info(" 10 -- HTT Rx Rate Statistics");
6761 dp_info(" 11 -- HTT Peer Statistics");
6762 dp_info(" 12 -- HTT Tx SelfGen Statistics");
6763 dp_info(" 13 -- HTT Tx MU HWQ Statistics");
6764 dp_info(" 14 -- HTT RING_IF_INFO Statistics");
6765 dp_info(" 15 -- HTT SRNG Statistics");
6766 dp_info(" 16 -- HTT SFM Info Statistics");
6767 dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
6768 dp_info(" 18 -- HTT Peer List Details");
6769 dp_info(" 20 -- Clear Host Statistics");
6770 dp_info(" 21 -- Host Rx Rate Statistics");
6771 dp_info(" 22 -- Host Tx Rate Statistics");
6772 dp_info(" 23 -- Host Tx Statistics");
6773 dp_info(" 24 -- Host Rx Statistics");
6774 dp_info(" 25 -- Host AST Statistics");
6775 dp_info(" 26 -- Host SRNG PTR Statistics");
6776 dp_info(" 27 -- Host Mon Statistics");
6777 dp_info(" 28 -- Host REO Queue Statistics");
Venkata Sharath Chandra Manchalaf167af12018-10-09 20:23:02 -07006778 dp_info(" 29 -- Host Soc cfg param Statistics");
6779 dp_info(" 30 -- Host pdev cfg param Statistics");
Venkata Sharath Chandra Manchala389c4e12018-10-03 12:13:33 -07006780}
6781
6782/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05306783 * dp_print_host_stats()- Function to print the stats aggregated at host
6784 * @vdev_handle: DP_VDEV handle
Ishank Jain1e7401c2017-02-17 15:38:39 +05306785 * @type: host stats type
6786 *
Ishank Jain1e7401c2017-02-17 15:38:39 +05306787 * Return: 0 on success, print error message in case of failure
6788 */
6789static int
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006790dp_print_host_stats(struct cdp_vdev *vdev_handle,
6791 struct cdp_txrx_stats_req *req)
Ishank Jain1e7401c2017-02-17 15:38:39 +05306792{
6793 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6794 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006795 enum cdp_host_txrx_stats type =
6796 dp_stats_mapping_table[req->stats][STATS_HOST];
Ishank Jain1e7401c2017-02-17 15:38:39 +05306797
6798 dp_aggregate_pdev_stats(pdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306799
Ishank Jain1e7401c2017-02-17 15:38:39 +05306800 switch (type) {
Ishank Jain6290a3c2017-03-21 10:49:39 +05306801 case TXRX_CLEAR_STATS:
6802 dp_txrx_host_stats_clr(vdev);
6803 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306804 case TXRX_RX_RATE_STATS:
6805 dp_print_rx_rates(vdev);
6806 break;
6807 case TXRX_TX_RATE_STATS:
6808 dp_print_tx_rates(vdev);
6809 break;
6810 case TXRX_TX_HOST_STATS:
6811 dp_print_pdev_tx_stats(pdev);
6812 dp_print_soc_tx_stats(pdev->soc);
6813 break;
6814 case TXRX_RX_HOST_STATS:
6815 dp_print_pdev_rx_stats(pdev);
6816 dp_print_soc_rx_stats(pdev->soc);
6817 break;
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306818 case TXRX_AST_STATS:
6819 dp_print_ast_stats(pdev->soc);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05306820 dp_print_peer_table(vdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05306821 break;
sumedh baikady72b1c712017-08-24 12:11:46 -07006822 case TXRX_SRNG_PTR_STATS:
Kai Chen783e0382018-01-25 16:29:08 -08006823 dp_print_ring_stats(pdev);
6824 break;
6825 case TXRX_RX_MON_STATS:
6826 dp_print_pdev_rx_mon_stats(pdev);
6827 break;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07006828 case TXRX_REO_QUEUE_STATS:
6829 dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
6830 break;
Venkata Sharath Chandra Manchalaf167af12018-10-09 20:23:02 -07006831 case TXRX_SOC_CFG_PARAMS:
6832 dp_print_soc_cfg_params(pdev->soc);
6833 break;
6834 case TXRX_PDEV_CFG_PARAMS:
6835 dp_print_pdev_cfg_params(pdev);
6836 break;
Pamidipati, Vijay37d107d2018-12-31 14:46:14 +05306837 case TXRX_NAPI_STATS:
6838 dp_print_napi_stats(pdev->soc);
Mohit Khannae5a6e942018-11-28 14:22:48 -08006839 case TXRX_SOC_INTERRUPT_STATS:
6840 dp_print_soc_interrupt_stats(pdev->soc);
Pamidipati, Vijay37d107d2018-12-31 14:46:14 +05306841 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306842 default:
Venkata Sharath Chandra Manchala389c4e12018-10-03 12:13:33 -07006843 dp_info("Wrong Input For TxRx Host Stats");
6844 dp_txrx_stats_help();
Ishank Jain1e7401c2017-02-17 15:38:39 +05306845 break;
6846 }
6847 return 0;
6848}
6849
6850/*
Soumya Bhat7422db82017-12-15 13:48:53 +05306851 * dp_ppdu_ring_reset()- Reset PPDU Stats ring
6852 * @pdev: DP_PDEV handle
6853 *
6854 * Return: void
6855 */
6856static void
6857dp_ppdu_ring_reset(struct dp_pdev *pdev)
6858{
6859 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006860 int mac_id;
Soumya Bhat7422db82017-12-15 13:48:53 +05306861
hangtianfe681a52019-01-16 17:16:28 +08006862 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
Soumya Bhat7422db82017-12-15 13:48:53 +05306863
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006864 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6865 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6866 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05306867
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006868 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6869 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6870 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6871 }
Soumya Bhat7422db82017-12-15 13:48:53 +05306872}
6873
6874/*
Anish Nataraj38a29562017-08-18 19:41:17 +05306875 * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
6876 * @pdev: DP_PDEV handle
6877 *
6878 * Return: void
6879 */
6880static void
6881dp_ppdu_ring_cfg(struct dp_pdev *pdev)
6882{
6883 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006884 int mac_id;
Anish Nataraj38a29562017-08-18 19:41:17 +05306885
Soumya Bhat35fc6992018-03-09 18:39:03 +05306886 htt_tlv_filter.mpdu_start = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05306887 htt_tlv_filter.msdu_start = 0;
6888 htt_tlv_filter.packet = 0;
6889 htt_tlv_filter.msdu_end = 0;
6890 htt_tlv_filter.mpdu_end = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006891 htt_tlv_filter.attention = 0;
Anish Nataraj38a29562017-08-18 19:41:17 +05306892 htt_tlv_filter.ppdu_start = 1;
6893 htt_tlv_filter.ppdu_end = 1;
6894 htt_tlv_filter.ppdu_end_user_stats = 1;
6895 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6896 htt_tlv_filter.ppdu_end_status_done = 1;
6897 htt_tlv_filter.enable_fp = 1;
6898 htt_tlv_filter.enable_md = 0;
sumedh baikady59a2d332018-05-22 01:50:38 -07006899 if (pdev->neighbour_peers_added &&
6900 pdev->soc->hw_nac_monitor_support) {
6901 htt_tlv_filter.enable_md = 1;
6902 htt_tlv_filter.packet_header = 1;
6903 }
nobelj1c31fee2018-03-21 11:47:05 -07006904 if (pdev->mcopy_mode) {
6905 htt_tlv_filter.packet_header = 1;
Soumya Bhat2f54de22018-02-21 09:54:28 +05306906 htt_tlv_filter.enable_mo = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006907 }
nobeljd124b742017-10-16 11:59:12 -07006908 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6909 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6910 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6911 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6912 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6913 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
sumedh baikady59a2d332018-05-22 01:50:38 -07006914 if (pdev->neighbour_peers_added &&
6915 pdev->soc->hw_nac_monitor_support)
6916 htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
Anish Nataraj38a29562017-08-18 19:41:17 +05306917
Kiran Venkatappa07921612019-03-02 23:14:12 +05306918 htt_tlv_filter.offset_valid = false;
6919
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006920 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6921 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6922 pdev->pdev_id);
6923
6924 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
6925 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6926 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6927 }
Anish Nataraj38a29562017-08-18 19:41:17 +05306928}
6929
6930/*
Alok Singh40a622b2018-06-28 10:47:26 +05306931 * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
6932 * modes are enabled or not.
6933 * @dp_pdev: dp pdev handle.
6934 *
6935 * Return: bool
6936 */
6937static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
6938{
6939 if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
6940 !pdev->mcopy_mode)
6941 return true;
6942 else
6943 return false;
6944}
6945
6946/*
Vinay Adella873dc402018-05-28 12:06:34 +05306947 *dp_set_bpr_enable() - API to enable/disable bpr feature
6948 *@pdev_handle: DP_PDEV handle.
6949 *@val: Provided value.
6950 *
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306951 *Return: 0 for success. nonzero for failure.
Vinay Adella873dc402018-05-28 12:06:34 +05306952 */
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306953static QDF_STATUS
Vinay Adella873dc402018-05-28 12:06:34 +05306954dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
6955{
6956 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6957
6958 switch (val) {
6959 case CDP_BPR_DISABLE:
6960 pdev->bpr_enable = CDP_BPR_DISABLE;
6961 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
6962 !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6963 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
6964 } else if (pdev->enhanced_stats_en &&
6965 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6966 !pdev->pktlog_ppdu_stats) {
6967 dp_h2t_cfg_stats_msg_send(pdev,
6968 DP_PPDU_STATS_CFG_ENH_STATS,
6969 pdev->pdev_id);
6970 }
6971 break;
6972 case CDP_BPR_ENABLE:
6973 pdev->bpr_enable = CDP_BPR_ENABLE;
6974 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
6975 !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
6976 dp_h2t_cfg_stats_msg_send(pdev,
6977 DP_PPDU_STATS_CFG_BPR,
6978 pdev->pdev_id);
6979 } else if (pdev->enhanced_stats_en &&
6980 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
6981 !pdev->pktlog_ppdu_stats) {
6982 dp_h2t_cfg_stats_msg_send(pdev,
6983 DP_PPDU_STATS_CFG_BPR_ENH,
6984 pdev->pdev_id);
6985 } else if (pdev->pktlog_ppdu_stats) {
6986 dp_h2t_cfg_stats_msg_send(pdev,
6987 DP_PPDU_STATS_CFG_BPR_PKTLOG,
6988 pdev->pdev_id);
6989 }
6990 break;
6991 default:
6992 break;
6993 }
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306994
6995 return QDF_STATUS_SUCCESS;
Vinay Adella873dc402018-05-28 12:06:34 +05306996}
6997
6998/*
Varsha Mishra18281792019-03-06 17:57:23 +05306999 * dp_pdev_tid_stats_ingress_inc
7000 * @pdev: pdev handle
7001 * @val: increase in value
7002 *
7003 * Return: void
7004 */
7005static void
7006dp_pdev_tid_stats_ingress_inc(struct cdp_pdev *pdev, uint32_t val)
7007{
7008 struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
7009
7010 dp_pdev->stats.tid_stats.ingress_stack += val;
7011}
7012
7013/*
7014 * dp_pdev_tid_stats_osif_drop
7015 * @pdev: pdev handle
7016 * @val: increase in value
7017 *
7018 * Return: void
7019 */
7020static void
7021dp_pdev_tid_stats_osif_drop(struct cdp_pdev *pdev, uint32_t val)
7022{
7023 struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
7024
7025 dp_pdev->stats.tid_stats.osif_drop += val;
7026}
7027
7028/*
Soumya Bhat6fee59c2017-10-31 13:12:37 +05307029 * dp_config_debug_sniffer()- API to enable/disable debug sniffer
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307030 * @pdev_handle: DP_PDEV handle
7031 * @val: user provided value
7032 *
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307033 * Return: 0 for success. nonzero for failure.
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307034 */
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307035static QDF_STATUS
Soumya Bhat6fee59c2017-10-31 13:12:37 +05307036dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307037{
7038 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307039 QDF_STATUS status = QDF_STATUS_SUCCESS;
7040
7041 if (pdev->mcopy_mode)
7042 dp_reset_monitor_mode(pdev_handle);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307043
Soumya Bhat89647ef2017-11-16 17:23:48 +05307044 switch (val) {
7045 case 0:
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307046 pdev->tx_sniffer_enable = 0;
Soumya Bhat7422db82017-12-15 13:48:53 +05307047 pdev->mcopy_mode = 0;
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05307048 pdev->monitor_configured = false;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307049
Alok Singh40a622b2018-06-28 10:47:26 +05307050 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7051 !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007052 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05307053 dp_ppdu_ring_reset(pdev);
Alok Singh40a622b2018-06-28 10:47:26 +05307054 } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05307055 dp_h2t_cfg_stats_msg_send(pdev,
7056 DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05307057 } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7058 dp_h2t_cfg_stats_msg_send(pdev,
7059 DP_PPDU_STATS_CFG_BPR_ENH,
7060 pdev->pdev_id);
7061 } else {
7062 dp_h2t_cfg_stats_msg_send(pdev,
7063 DP_PPDU_STATS_CFG_BPR,
7064 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05307065 }
Soumya Bhat89647ef2017-11-16 17:23:48 +05307066 break;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307067
Soumya Bhat89647ef2017-11-16 17:23:48 +05307068 case 1:
7069 pdev->tx_sniffer_enable = 1;
Soumya Bhat7422db82017-12-15 13:48:53 +05307070 pdev->mcopy_mode = 0;
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05307071 pdev->monitor_configured = false;
Soumya Bhat7422db82017-12-15 13:48:53 +05307072
Soumya Bhat0d6245c2018-02-08 21:02:57 +05307073 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05307074 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05307075 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05307076 break;
7077 case 2:
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307078 if (pdev->monitor_vdev) {
7079 status = QDF_STATUS_E_RESOURCES;
7080 break;
7081 }
7082
Soumya Bhat7422db82017-12-15 13:48:53 +05307083 pdev->mcopy_mode = 1;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307084 dp_pdev_configure_monitor_rings(pdev);
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05307085 pdev->monitor_configured = true;
Soumya Bhat89647ef2017-11-16 17:23:48 +05307086 pdev->tx_sniffer_enable = 0;
Soumya Bhat0d6245c2018-02-08 21:02:57 +05307087
7088 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05307089 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05307090 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05307091 break;
Kai Chen52ef33f2019-03-05 18:33:40 -08007092
Soumya Bhat89647ef2017-11-16 17:23:48 +05307093 default:
7094 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05307095 "Invalid value");
Soumya Bhat89647ef2017-11-16 17:23:48 +05307096 break;
7097 }
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307098 return status;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307099}
7100
7101/*
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05307102 * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7103 * @pdev_handle: DP_PDEV handle
7104 *
7105 * Return: void
7106 */
7107static void
7108dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
7109{
7110 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05307111
7112 if (pdev->enhanced_stats_en == 0)
7113 dp_cal_client_timer_start(pdev->cal_client_ctx);
7114
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05307115 pdev->enhanced_stats_en = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05307116
Chaithanya Garrepalli1bbf4f02018-07-20 12:07:38 +05307117 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7118 !pdev->monitor_vdev)
Soumya Bhat7422db82017-12-15 13:48:53 +05307119 dp_ppdu_ring_cfg(pdev);
7120
Alok Singh40a622b2018-06-28 10:47:26 +05307121 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05307122 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05307123 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7124 dp_h2t_cfg_stats_msg_send(pdev,
7125 DP_PPDU_STATS_CFG_BPR_ENH,
7126 pdev->pdev_id);
7127 }
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05307128}
7129
7130/*
7131 * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7132 * @pdev_handle: DP_PDEV handle
7133 *
7134 * Return: void
7135 */
7136static void
7137dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
7138{
7139 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307140
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05307141 if (pdev->enhanced_stats_en == 1)
7142 dp_cal_client_timer_stop(pdev->cal_client_ctx);
7143
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05307144 pdev->enhanced_stats_en = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307145
Alok Singh40a622b2018-06-28 10:47:26 +05307146 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007147 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05307148 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7149 dp_h2t_cfg_stats_msg_send(pdev,
7150 DP_PPDU_STATS_CFG_BPR,
7151 pdev->pdev_id);
7152 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05307153
Chaithanya Garrepalli1bbf4f02018-07-20 12:07:38 +05307154 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7155 !pdev->monitor_vdev)
Soumya Bhat7422db82017-12-15 13:48:53 +05307156 dp_ppdu_ring_reset(pdev);
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05307157}
7158
7159/*
Ishank Jain6290a3c2017-03-21 10:49:39 +05307160 * dp_get_fw_peer_stats()- function to print peer stats
7161 * @pdev_handle: DP_PDEV handle
7162 * @mac_addr: mac address of the peer
7163 * @cap: Type of htt stats requested
Amir Patel1ea85d42019-01-09 15:19:10 +05307164 * @is_wait: if set, wait on completion from firmware response
Ishank Jain6290a3c2017-03-21 10:49:39 +05307165 *
7166 * Currently Supporting only MAC ID based requests Only
7167 * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7168 * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7169 * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7170 *
7171 * Return: void
7172 */
7173static void
7174dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
Amir Patel1ea85d42019-01-09 15:19:10 +05307175 uint32_t cap, uint32_t is_wait)
Ishank Jain6290a3c2017-03-21 10:49:39 +05307176{
7177 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05307178 int i;
Ishank Jain6290a3c2017-03-21 10:49:39 +05307179 uint32_t config_param0 = 0;
7180 uint32_t config_param1 = 0;
7181 uint32_t config_param2 = 0;
7182 uint32_t config_param3 = 0;
7183
7184 HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7185 config_param0 |= (1 << (cap + 1));
7186
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05307187 for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7188 config_param1 |= (1 << i);
7189 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05307190
7191 config_param2 |= (mac_addr[0] & 0x000000ff);
7192 config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7193 config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7194 config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7195
7196 config_param3 |= (mac_addr[4] & 0x000000ff);
7197 config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7198
Amir Patel1ea85d42019-01-09 15:19:10 +05307199 if (is_wait) {
7200 qdf_event_reset(&pdev->fw_peer_stats_event);
7201 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7202 config_param0, config_param1,
7203 config_param2, config_param3,
7204 0, 1, 0);
7205 qdf_wait_single_event(&pdev->fw_peer_stats_event,
7206 DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
7207 } else {
7208 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7209 config_param0, config_param1,
7210 config_param2, config_param3,
7211 0, 0, 0);
7212 }
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07007213
Ishank Jain6290a3c2017-03-21 10:49:39 +05307214}
7215
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05307216/* This struct definition will be removed from here
7217 * once it get added in FW headers*/
7218struct httstats_cmd_req {
7219 uint32_t config_param0;
7220 uint32_t config_param1;
7221 uint32_t config_param2;
7222 uint32_t config_param3;
7223 int cookie;
7224 u_int8_t stats_id;
7225};
7226
7227/*
7228 * dp_get_htt_stats: function to process the httstas request
7229 * @pdev_handle: DP pdev handle
7230 * @data: pointer to request data
7231 * @data_len: length for request data
7232 *
7233 * return: void
7234 */
7235static void
7236dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
7237{
7238 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7239 struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7240
7241 QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7242 dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7243 req->config_param0, req->config_param1,
7244 req->config_param2, req->config_param3,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08007245 req->cookie, 0, 0);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05307246}
Vinay Adella873dc402018-05-28 12:06:34 +05307247
Ishank Jain9f174c62017-03-30 18:37:42 +05307248/*
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307249 * dp_set_pdev_param: function to set parameters in pdev
7250 * @pdev_handle: DP pdev handle
7251 * @param: parameter type to be set
7252 * @val: value of parameter to be set
7253 *
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307254 * Return: 0 for success. nonzero for failure.
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307255 */
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307256static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
7257 enum cdp_pdev_param_type param,
7258 uint8_t val)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307259{
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +05307260 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307261 switch (param) {
Soumya Bhat6fee59c2017-10-31 13:12:37 +05307262 case CDP_CONFIG_DEBUG_SNIFFER:
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307263 return dp_config_debug_sniffer(pdev_handle, val);
Vinay Adella873dc402018-05-28 12:06:34 +05307264 case CDP_CONFIG_BPR_ENABLE:
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307265 return dp_set_bpr_enable(pdev_handle, val);
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +05307266 case CDP_CONFIG_PRIMARY_RADIO:
7267 pdev->is_primary = val;
7268 break;
Ankit Kumar8dc0e2a2019-02-28 18:17:15 +05307269 case CDP_CONFIG_CAPTURE_LATENCY:
7270 if (val == 1)
7271 pdev->latency_capture_enable = true;
7272 else
7273 pdev->latency_capture_enable = false;
7274 break;
Varsha Mishra18281792019-03-06 17:57:23 +05307275 case CDP_INGRESS_STATS:
7276 dp_pdev_tid_stats_ingress_inc(pdev_handle, val);
7277 break;
7278 case CDP_OSIF_DROP:
7279 dp_pdev_tid_stats_osif_drop(pdev_handle, val);
7280 break;
Kai Chen52ef33f2019-03-05 18:33:40 -08007281 case CDP_CONFIG_ENH_RX_CAPTURE:
7282 return dp_config_enh_rx_capture(pdev_handle, val);
nobeljdebe2b32019-04-23 11:18:47 -07007283 case CDP_CONFIG_TX_CAPTURE:
7284 return dp_config_enh_tx_capture(pdev_handle, val);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307285 default:
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307286 return QDF_STATUS_E_INVAL;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307287 }
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05307288 return QDF_STATUS_SUCCESS;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05307289}
7290
7291/*
Varsha Mishraa331e6e2019-03-11 12:16:14 +05307292 * dp_calculate_delay_stats: function to get rx delay stats
7293 * @vdev_handle: DP vdev handle
7294 * @nbuf: skb
7295 *
7296 * Return: void
7297 */
7298static void dp_calculate_delay_stats(struct cdp_vdev *vdev_handle,
7299 qdf_nbuf_t nbuf)
7300{
7301 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7302
7303 dp_rx_compute_delay(vdev, nbuf);
7304}
7305
7306/*
phadiman4213e9c2018-10-29 12:50:02 +05307307 * dp_get_vdev_param: function to get parameters from vdev
7308 * @param: parameter type to get value
7309 *
7310 * return: void
7311 */
7312static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
7313 enum cdp_vdev_param_type param)
7314{
7315 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7316 uint32_t val;
7317
7318 switch (param) {
7319 case CDP_ENABLE_WDS:
7320 val = vdev->wds_enabled;
7321 break;
7322 case CDP_ENABLE_MEC:
7323 val = vdev->mec_enabled;
7324 break;
7325 case CDP_ENABLE_DA_WAR:
Nandha Kishore Easwaranf9c44ce2019-01-18 15:31:18 +05307326 val = vdev->pdev->soc->da_war_enabled;
phadiman4213e9c2018-10-29 12:50:02 +05307327 break;
7328 default:
7329 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7330 "param value %d is wrong\n",
7331 param);
7332 val = -1;
7333 break;
7334 }
7335
7336 return val;
7337}
7338
7339/*
Ishank Jain9f174c62017-03-30 18:37:42 +05307340 * dp_set_vdev_param: function to set parameters in vdev
7341 * @param: parameter type to be set
7342 * @val: value of parameter to be set
7343 *
7344 * return: void
7345 */
7346static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
7347 enum cdp_vdev_param_type param, uint32_t val)
7348{
7349 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Ishank Jain9f174c62017-03-30 18:37:42 +05307350 switch (param) {
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05307351 case CDP_ENABLE_WDS:
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05307352 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7353 "wds_enable %d for vdev(%p) id(%d)\n",
7354 val, vdev, vdev->vdev_id);
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05307355 vdev->wds_enabled = val;
7356 break;
phadiman4213e9c2018-10-29 12:50:02 +05307357 case CDP_ENABLE_MEC:
7358 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7359 "mec_enable %d for vdev(%p) id(%d)\n",
7360 val, vdev, vdev->vdev_id);
7361 vdev->mec_enabled = val;
7362 break;
7363 case CDP_ENABLE_DA_WAR:
7364 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7365 "da_war_enable %d for vdev(%p) id(%d)\n",
7366 val, vdev, vdev->vdev_id);
Nandha Kishore Easwaranf9c44ce2019-01-18 15:31:18 +05307367 vdev->pdev->soc->da_war_enabled = val;
7368 dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
7369 vdev->pdev->soc));
phadiman4213e9c2018-10-29 12:50:02 +05307370 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05307371 case CDP_ENABLE_NAWDS:
7372 vdev->nawds_enabled = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05307373 break;
Ishank Jainc838b132017-02-17 11:08:18 +05307374 case CDP_ENABLE_MCAST_EN:
7375 vdev->mcast_enhancement_en = val;
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05307376 break;
7377 case CDP_ENABLE_PROXYSTA:
7378 vdev->proxysta_vdev = val;
7379 break;
Kabilan Kannan56bfd8f2017-04-26 13:26:47 -07007380 case CDP_UPDATE_TDLS_FLAGS:
7381 vdev->tdls_link_connected = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05307382 break;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307383 case CDP_CFG_WDS_AGING_TIMER:
7384 if (val == 0)
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05307385 qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307386 else if (val != vdev->wds_aging_timer_val)
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05307387 qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307388
7389 vdev->wds_aging_timer_val = val;
7390 break;
URAJ SASAN81d95712017-08-21 20:51:03 +05307391 case CDP_ENABLE_AP_BRIDGE:
7392 if (wlan_op_mode_sta != vdev->opmode)
7393 vdev->ap_bridge_enabled = val;
7394 else
7395 vdev->ap_bridge_enabled = false;
7396 break;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05307397 case CDP_ENABLE_CIPHER:
7398 vdev->sec_type = val;
7399 break;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +05307400 case CDP_ENABLE_QWRAP_ISOLATION:
7401 vdev->isolation_vdev = val;
7402 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05307403 default:
7404 break;
7405 }
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05307406
7407 dp_tx_vdev_update_search_flags(vdev);
Ishank Jain9f174c62017-03-30 18:37:42 +05307408}
7409
7410/**
7411 * dp_peer_set_nawds: set nawds bit in peer
7412 * @peer_handle: pointer to peer
7413 * @value: enable/disable nawds
7414 *
7415 * return: void
7416 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05307417static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
Ishank Jain9f174c62017-03-30 18:37:42 +05307418{
7419 struct dp_peer *peer = (struct dp_peer *)peer_handle;
7420 peer->nawds_enabled = value;
7421}
Ishank Jain1e7401c2017-02-17 15:38:39 +05307422
Ishank Jain949674c2017-02-27 17:09:29 +05307423/*
7424 * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
7425 * @vdev_handle: DP_VDEV handle
7426 * @map_id:ID of map that needs to be updated
7427 *
7428 * Return: void
7429 */
7430static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
7431 uint8_t map_id)
7432{
7433 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7434 vdev->dscp_tid_map_id = map_id;
7435 return;
7436}
7437
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +05307438#ifdef DP_RATETABLE_SUPPORT
7439static int dp_txrx_get_ratekbps(int preamb, int mcs,
7440 int htflag, int gintval)
7441{
Amir Patelffe9a862019-02-28 14:13:12 +05307442 uint32_t rix;
7443
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +05307444 return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
Amir Patelffe9a862019-02-28 14:13:12 +05307445 (uint8_t)preamb, 1, &rix);
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +05307446}
7447#else
7448static int dp_txrx_get_ratekbps(int preamb, int mcs,
7449 int htflag, int gintval)
7450{
7451 return 0;
7452}
7453#endif
7454
Amir Patel756d05e2018-10-10 12:35:30 +05307455/* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
7456 * @peer_handle: DP pdev handle
7457 *
7458 * return : cdp_pdev_stats pointer
7459 */
7460static struct cdp_pdev_stats*
7461dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
7462{
7463 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7464
7465 dp_aggregate_pdev_stats(pdev);
7466
7467 return &pdev->stats;
7468}
7469
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05307470/* dp_txrx_get_peer_stats - will return cdp_peer_stats
7471 * @peer_handle: DP_PEER handle
7472 *
7473 * return : cdp_peer_stats pointer
7474 */
7475static struct cdp_peer_stats*
7476 dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
7477{
7478 struct dp_peer *peer = (struct dp_peer *)peer_handle;
7479
7480 qdf_assert(peer);
7481
7482 return &peer->stats;
7483}
7484
7485/* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
7486 * @peer_handle: DP_PEER handle
7487 *
7488 * return : void
7489 */
7490static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
7491{
7492 struct dp_peer *peer = (struct dp_peer *)peer_handle;
7493
7494 qdf_assert(peer);
7495
hangtianfe681a52019-01-16 17:16:28 +08007496 qdf_mem_zero(&peer->stats, sizeof(peer->stats));
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05307497}
7498
7499/* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
7500 * @vdev_handle: DP_VDEV handle
7501 * @buf: buffer for vdev stats
7502 *
7503 * return : int
7504 */
7505static int dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
7506 bool is_aggregate)
7507{
7508 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Amir Patel17b91782019-01-08 12:17:15 +05307509 struct cdp_vdev_stats *vdev_stats;
7510 struct dp_pdev *pdev;
7511 struct dp_soc *soc;
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05307512
Amir Patel17b91782019-01-08 12:17:15 +05307513 if (!vdev)
7514 return 1;
7515
7516 pdev = vdev->pdev;
7517 if (!pdev)
7518 return 1;
7519
7520 soc = pdev->soc;
7521 vdev_stats = (struct cdp_vdev_stats *)buf;
7522
7523 if (is_aggregate) {
7524 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05307525 dp_aggregate_vdev_stats(vdev, buf);
Amir Patel17b91782019-01-08 12:17:15 +05307526 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7527 } else {
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05307528 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
Amir Patel17b91782019-01-08 12:17:15 +05307529 }
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05307530
7531 return 0;
7532}
7533
Prathyusha Guduri184b6402018-02-04 23:01:49 +05307534/*
Pranita Solanke92096e42018-09-11 11:14:51 +05307535 * dp_get_total_per(): get total per
7536 * @pdev_handle: DP_PDEV handle
7537 *
7538 * Return: % error rate using retries per packet and success packets
7539 */
7540static int dp_get_total_per(struct cdp_pdev *pdev_handle)
7541{
7542 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7543
7544 dp_aggregate_pdev_stats(pdev);
7545 if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
7546 return 0;
7547 return ((pdev->stats.tx.retries * 100) /
7548 ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
7549}
7550
7551/*
Prathyusha Guduri184b6402018-02-04 23:01:49 +05307552 * dp_txrx_stats_publish(): publish pdev stats into a buffer
7553 * @pdev_handle: DP_PDEV handle
7554 * @buf: to hold pdev_stats
7555 *
7556 * Return: int
7557 */
7558static int
Pranita Solanke71371bc2018-11-26 16:57:22 +05307559dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, struct cdp_stats_extd *buf)
Prathyusha Guduri184b6402018-02-04 23:01:49 +05307560{
7561 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7562 struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05307563 struct cdp_txrx_stats_req req = {0,};
Prathyusha Guduri184b6402018-02-04 23:01:49 +05307564
7565 dp_aggregate_pdev_stats(pdev);
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07007566 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05307567 req.cookie_val = 1;
7568 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08007569 req.param1, req.param2, req.param3, 0,
7570 req.cookie_val, 0);
7571
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05307572 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05307573
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07007574 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05307575 req.cookie_val = 1;
7576 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08007577 req.param1, req.param2, req.param3, 0,
7578 req.cookie_val, 0);
7579
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05307580 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05307581 qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
7582
7583 return TXRX_STATS_LEVEL;
7584}
7585
Ishank Jain949674c2017-02-27 17:09:29 +05307586/**
7587 * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
7588 * @pdev: DP_PDEV handle
7589 * @map_id: ID of map that needs to be updated
7590 * @tos: index value in map
7591 * @tid: tid value passed by the user
7592 *
7593 * Return: void
7594 */
7595static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
7596 uint8_t map_id, uint8_t tos, uint8_t tid)
7597{
7598 uint8_t dscp;
7599 struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05307600 struct dp_soc *soc = pdev->soc;
7601
7602 if (!soc)
7603 return;
7604
Ishank Jain949674c2017-02-27 17:09:29 +05307605 dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
7606 pdev->dscp_tid_map[map_id][dscp] = tid;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05307607
7608 if (map_id < soc->num_hw_dscp_tid_map)
7609 hal_tx_update_dscp_tid(soc->hal_soc, tid,
7610 map_id, dscp);
Ishank Jain949674c2017-02-27 17:09:29 +05307611 return;
7612}
7613
Ishank Jain6290a3c2017-03-21 10:49:39 +05307614/**
Shashikala Prabhu8f6703b2018-10-31 09:43:00 +05307615 * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
7616 * @pdev_handle: pdev handle
7617 * @val: hmmc-dscp flag value
7618 *
7619 * Return: void
7620 */
7621static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
7622 bool val)
7623{
7624 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7625
7626 pdev->hmmc_tid_override_en = val;
7627}
7628
7629/**
7630 * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
7631 * @pdev_handle: pdev handle
7632 * @tid: tid value
7633 *
7634 * Return: void
7635 */
7636static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
7637 uint8_t tid)
7638{
7639 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7640
7641 pdev->hmmc_tid = tid;
7642}
7643
7644/**
Ishank Jain6290a3c2017-03-21 10:49:39 +05307645 * dp_fw_stats_process(): Process TxRX FW stats request
7646 * @vdev_handle: DP VDEV handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307647 * @req: stats request
Ishank Jain6290a3c2017-03-21 10:49:39 +05307648 *
7649 * return: int
7650 */
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307651static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
7652 struct cdp_txrx_stats_req *req)
Ishank Jain6290a3c2017-03-21 10:49:39 +05307653{
7654 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7655 struct dp_pdev *pdev = NULL;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307656 uint32_t stats = req->stats;
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07007657 uint8_t mac_id = req->mac_id;
Ishank Jain6290a3c2017-03-21 10:49:39 +05307658
7659 if (!vdev) {
7660 DP_TRACE(NONE, "VDEV not found");
7661 return 1;
7662 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05307663 pdev = vdev->pdev;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307664
chenguocda25122018-01-24 17:39:38 +08007665 /*
7666 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
7667 * from param0 to param3 according to below rule:
7668 *
7669 * PARAM:
7670 * - config_param0 : start_offset (stats type)
7671 * - config_param1 : stats bmask from start offset
7672 * - config_param2 : stats bmask from start offset + 32
7673 * - config_param3 : stats bmask from start offset + 64
7674 */
7675 if (req->stats == CDP_TXRX_STATS_0) {
7676 req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
7677 req->param1 = 0xFFFFFFFF;
7678 req->param2 = 0xFFFFFFFF;
7679 req->param3 = 0xFFFFFFFF;
Chaithanya Garrepalli32fcc2a2018-08-03 15:09:42 +05307680 } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
7681 req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
chenguocda25122018-01-24 17:39:38 +08007682 }
7683
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307684 return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08007685 req->param1, req->param2, req->param3,
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07007686 0, 0, mac_id);
Ishank Jain6290a3c2017-03-21 10:49:39 +05307687}
7688
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307689/**
7690 * dp_txrx_stats_request - function to map to firmware and host stats
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007691 * @vdev: virtual handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307692 * @req: stats request
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007693 *
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007694 * Return: QDF_STATUS
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007695 */
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007696static
7697QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
7698 struct cdp_txrx_stats_req *req)
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007699{
7700 int host_stats;
7701 int fw_stats;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307702 enum cdp_stats stats;
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007703 int num_stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007704
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307705 if (!vdev || !req) {
7706 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7707 "Invalid vdev/req instance");
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007708 return QDF_STATUS_E_INVAL;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307709 }
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08007710
Venkata Sharath Chandra Manchala8d583a82019-04-21 12:32:24 -07007711 if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
7712 dp_err("Invalid mac id request");
7713 return QDF_STATUS_E_INVAL;
7714 }
7715
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307716 stats = req->stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007717 if (stats >= CDP_TXRX_MAX_STATS)
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007718 return QDF_STATUS_E_INVAL;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007719
Ishank Jain6290a3c2017-03-21 10:49:39 +05307720 /*
7721 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
7722 * has to be updated if new FW HTT stats added
7723 */
7724 if (stats > CDP_TXRX_STATS_HTT_MAX)
7725 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007726
7727 num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
7728
7729 if (stats >= num_stats) {
7730 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7731 "%s: Invalid stats option: %d", __func__, stats);
7732 return QDF_STATUS_E_INVAL;
7733 }
7734
7735 req->stats = stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007736 fw_stats = dp_stats_mapping_table[stats][STATS_FW];
7737 host_stats = dp_stats_mapping_table[stats][STATS_HOST];
7738
Mohit Khanna3d1e1b72019-03-18 14:30:01 -07007739 dp_info("stats: %u fw_stats_type: %d host_stats: %d",
7740 stats, fw_stats, host_stats);
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007741
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05307742 if (fw_stats != TXRX_FW_STATS_INVALID) {
7743 /* update request with FW stats type */
7744 req->stats = fw_stats;
7745 return dp_fw_stats_process(vdev, req);
7746 }
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007747
Ishank Jain57c42a12017-04-12 10:42:22 +05307748 if ((host_stats != TXRX_HOST_STATS_INVALID) &&
7749 (host_stats <= TXRX_HOST_STATS_MAX))
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07007750 return dp_print_host_stats(vdev, req);
Ishank Jain57c42a12017-04-12 10:42:22 +05307751 else
7752 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7753 "Wrong Input for TxRx Stats");
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007754
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07007755 return QDF_STATUS_SUCCESS;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08007756}
7757
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007758/*
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007759 * dp_txrx_dump_stats() - Dump statistics
7760 * @value - Statistics option
7761 */
Mohit Khanna90d7ebd2017-09-12 21:54:21 -07007762static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
7763 enum qdf_stats_verbosity_level level)
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007764{
7765 struct dp_soc *soc =
7766 (struct dp_soc *)psoc;
7767 QDF_STATUS status = QDF_STATUS_SUCCESS;
7768
7769 if (!soc) {
7770 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7771 "%s: soc is NULL", __func__);
7772 return QDF_STATUS_E_INVAL;
7773 }
7774
7775 switch (value) {
7776 case CDP_TXRX_PATH_STATS:
7777 dp_txrx_path_stats(soc);
Mohit Khannae5a6e942018-11-28 14:22:48 -08007778 dp_print_soc_interrupt_stats(soc);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007779 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007780
7781 case CDP_RX_RING_STATS:
7782 dp_print_per_ring_stats(soc);
7783 break;
7784
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007785 case CDP_TXRX_TSO_STATS:
7786 /* TODO: NOT IMPLEMENTED */
7787 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007788
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007789 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07007790 cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007791 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007792
psimha61b1a362017-07-27 15:45:49 -07007793 case CDP_DP_NAPI_STATS:
7794 dp_print_napi_stats(soc);
7795 break;
7796
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007797 case CDP_TXRX_DESC_STATS:
7798 /* TODO: NOT IMPLEMENTED */
7799 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07007800
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08007801 default:
7802 status = QDF_STATUS_E_INVAL;
7803 break;
7804 }
7805
7806 return status;
7807
7808}
7809
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007810#ifdef QCA_LL_TX_FLOW_CONTROL_V2
7811/**
7812 * dp_update_flow_control_parameters() - API to store datapath
7813 * config parameters
7814 * @soc: soc handle
7815 * @cfg: ini parameter handle
7816 *
7817 * Return: void
7818 */
7819static inline
7820void dp_update_flow_control_parameters(struct dp_soc *soc,
7821 struct cdp_config_params *params)
7822{
7823 soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
7824 params->tx_flow_stop_queue_threshold;
7825 soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
7826 params->tx_flow_start_queue_offset;
7827}
7828#else
7829static inline
7830void dp_update_flow_control_parameters(struct dp_soc *soc,
7831 struct cdp_config_params *params)
7832{
7833}
7834#endif
7835
Mohit Khannae5a6e942018-11-28 14:22:48 -08007836#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
7837/* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
7838#define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
7839
7840/* Max packet limit for RX REAP Loop (dp_rx_process) */
7841#define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
7842
7843static
7844void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
7845 struct cdp_config_params *params)
7846{
7847 soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
7848 params->tx_comp_loop_pkt_limit;
7849
7850 if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
7851 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
7852 else
7853 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
7854
7855 soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
7856 params->rx_reap_loop_pkt_limit;
7857
7858 if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
7859 soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
7860 else
7861 soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
7862
7863 soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
7864 params->rx_hp_oos_update_limit;
7865
7866 dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
7867 soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
7868 soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
7869 soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
7870 soc->wlan_cfg_ctx->rx_enable_eol_data_check,
7871 soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
7872}
7873#else
7874static inline
7875void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
7876 struct cdp_config_params *params)
7877{ }
7878#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
7879
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007880/**
7881 * dp_update_config_parameters() - API to store datapath
7882 * config parameters
7883 * @soc: soc handle
7884 * @cfg: ini parameter handle
7885 *
7886 * Return: status
7887 */
7888static
7889QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
7890 struct cdp_config_params *params)
7891{
7892 struct dp_soc *soc = (struct dp_soc *)psoc;
7893
7894 if (!(soc)) {
7895 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7896 "%s: Invalid handle", __func__);
7897 return QDF_STATUS_E_INVAL;
7898 }
7899
7900 soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
7901 soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
7902 soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
7903 soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
7904 params->tcp_udp_checksumoffload;
7905 soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
Mohit Khanna81179cb2018-08-16 20:50:43 -07007906 soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
Mohit Khanna16816ae2018-10-30 14:12:03 -07007907 soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
Mohit Khanna81179cb2018-08-16 20:50:43 -07007908
Mohit Khannae5a6e942018-11-28 14:22:48 -08007909 dp_update_rx_soft_irq_limit_params(soc, params);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07007910 dp_update_flow_control_parameters(soc, params);
7911
7912 return QDF_STATUS_SUCCESS;
7913}
7914
Karunakar Dasinenica792542017-01-16 10:08:58 -08007915static struct cdp_wds_ops dp_ops_wds = {
7916 .vdev_set_wds = dp_vdev_set_wds,
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05307917#ifdef WDS_VENDOR_EXTENSION
7918 .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
7919 .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
7920#endif
Karunakar Dasinenica792542017-01-16 10:08:58 -08007921};
7922
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05307923/*
Kabilan Kannan60e3b302017-09-07 20:06:17 -07007924 * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
7925 * @vdev_handle - datapath vdev handle
7926 * @callback - callback function
7927 * @ctxt: callback context
7928 *
7929 */
7930static void
7931dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
7932 ol_txrx_data_tx_cb callback, void *ctxt)
7933{
7934 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7935
7936 vdev->tx_non_std_data_callback.func = callback;
7937 vdev->tx_non_std_data_callback.ctxt = ctxt;
7938}
7939
Santosh Anbu2280e862018-01-03 22:25:53 +05307940/**
7941 * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
7942 * @pdev_hdl: datapath pdev handle
7943 *
7944 * Return: opaque pointer to dp txrx handle
7945 */
7946static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
7947{
7948 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7949
7950 return pdev->dp_txrx_handle;
7951}
7952
7953/**
7954 * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
7955 * @pdev_hdl: datapath pdev handle
7956 * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
7957 *
7958 * Return: void
7959 */
7960static void
7961dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
7962{
7963 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
7964
7965 pdev->dp_txrx_handle = dp_txrx_hdl;
7966}
7967
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05307968/**
7969 * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
7970 * @soc_handle: datapath soc handle
7971 *
7972 * Return: opaque pointer to external dp (non-core DP)
7973 */
7974static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
7975{
7976 struct dp_soc *soc = (struct dp_soc *)soc_handle;
7977
7978 return soc->external_txrx_handle;
7979}
7980
7981/**
7982 * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
7983 * @soc_handle: datapath soc handle
7984 * @txrx_handle: opaque pointer to external dp (non-core DP)
7985 *
7986 * Return: void
7987 */
7988static void
7989dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
7990{
7991 struct dp_soc *soc = (struct dp_soc *)soc_handle;
7992
7993 soc->external_txrx_handle = txrx_handle;
7994}
7995
Akshay Kosigia4f6e172018-09-03 21:42:27 +05307996/**
Padma Raghunathan93549e12019-02-28 14:30:55 +05307997 * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
7998 * @pdev_hdl: datapath pdev handle
7999 * @lmac_id: lmac id
8000 *
8001 * Return: void
8002 */
8003static void
8004dp_soc_map_pdev_to_lmac(struct cdp_pdev *pdev_hdl, uint32_t lmac_id)
8005{
8006 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8007 struct dp_soc *soc = pdev->soc;
8008
8009 pdev->lmac_id = lmac_id;
8010 wlan_cfg_set_hw_macid(soc->wlan_cfg_ctx,
8011 pdev->pdev_id,
8012 (lmac_id + 1));
8013}
8014
8015/**
Akshay Kosigia4f6e172018-09-03 21:42:27 +05308016 * dp_get_cfg_capabilities() - get dp capabilities
8017 * @soc_handle: datapath soc handle
8018 * @dp_caps: enum for dp capabilities
8019 *
8020 * Return: bool to determine if dp caps is enabled
8021 */
8022static bool
8023dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
8024 enum cdp_capabilities dp_caps)
8025{
8026 struct dp_soc *soc = (struct dp_soc *)soc_handle;
8027
8028 return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
8029}
8030
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05308031#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05308032static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
8033{
Amir Patelcb990262019-05-28 15:12:48 +05308034 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8035 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
8036 struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05308037
Aditya Sathish6add3db2018-04-10 19:43:34 +05308038 /*
8039 * For BSS peer, new peer is not created on alloc_node if the
8040 * peer with same address already exists , instead refcnt is
8041 * increased for existing peer. Correspondingly in delete path,
8042 * only refcnt is decreased; and peer is only deleted , when all
8043 * references are deleted. So delete_in_progress should not be set
8044 * for bss_peer, unless only 2 reference remains (peer map reference
8045 * and peer hash table reference).
8046 */
Amir Patelcb990262019-05-28 15:12:48 +05308047 if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2))
Aditya Sathish6add3db2018-04-10 19:43:34 +05308048 return;
Aditya Sathish6add3db2018-04-10 19:43:34 +05308049
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +05308050 qdf_spin_lock_bh(&soc->ast_lock);
Karunakar Dasineni372647d2018-01-15 22:27:39 -08008051 peer->delete_in_progress = true;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05308052 dp_peer_delete_ast_entries(soc, peer);
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +05308053 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05308054}
8055#endif
8056
Soumya Bhatbc719e62018-02-18 18:21:25 +05308057#ifdef ATH_SUPPORT_NAC_RSSI
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05308058/**
8059 * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
8060 * @vdev_hdl: DP vdev handle
8061 * @rssi: rssi value
8062 *
8063 * Return: 0 for success. nonzero for failure.
8064 */
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05308065static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
8066 char *mac_addr,
8067 uint8_t *rssi)
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05308068{
8069 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8070 struct dp_pdev *pdev = vdev->pdev;
8071 struct dp_neighbour_peer *peer = NULL;
8072 QDF_STATUS status = QDF_STATUS_E_FAILURE;
8073
8074 *rssi = 0;
8075 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
8076 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
8077 neighbour_peer_list_elem) {
8078 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08008079 mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05308080 *rssi = peer->rssi;
8081 status = QDF_STATUS_SUCCESS;
8082 break;
8083 }
8084 }
8085 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
8086 return status;
8087}
8088
Soumya Bhatbc719e62018-02-18 18:21:25 +05308089static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
8090 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
8091 uint8_t chan_num)
8092{
8093
8094 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8095 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8096 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8097
8098 pdev->nac_rssi_filtering = 1;
8099 /* Store address of NAC (neighbour peer) which will be checked
8100 * against TA of received packets.
8101 */
8102
8103 if (cmd == CDP_NAC_PARAM_ADD) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05308104 dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
8105 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05308106 } else if (cmd == CDP_NAC_PARAM_DEL) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05308107 dp_update_filter_neighbour_peers(vdev_handle,
8108 DP_NAC_PARAM_DEL,
8109 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05308110 }
8111
8112 if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
8113 soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05308114 ((void *)vdev->pdev->ctrl_pdev,
8115 vdev->vdev_id, cmd, bssid);
Soumya Bhatbc719e62018-02-18 18:21:25 +05308116
8117 return QDF_STATUS_SUCCESS;
8118}
8119#endif
8120
Keyur Parekhc28f8392018-11-21 02:50:56 -08008121/**
8122 * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
8123 * for pktlog
8124 * @txrx_pdev_handle: cdp_pdev handle
8125 * @enb_dsb: Enable or disable peer based filtering
8126 *
8127 * Return: QDF_STATUS
8128 */
8129static int
8130dp_enable_peer_based_pktlog(
8131 struct cdp_pdev *txrx_pdev_handle,
8132 char *mac_addr, uint8_t enb_dsb)
8133{
8134 struct dp_peer *peer;
8135 uint8_t local_id;
8136 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
8137
8138 peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
8139 mac_addr, &local_id);
8140
8141 if (!peer) {
8142 dp_err("Invalid Peer");
8143 return QDF_STATUS_E_FAILURE;
8144 }
8145
8146 peer->peer_based_pktlog_filter = enb_dsb;
8147 pdev->dp_peer_based_pktlog = enb_dsb;
8148
8149 return QDF_STATUS_SUCCESS;
8150}
8151
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07008152#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07008153#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
8154/**
8155 * dp_summarize_tag_stats - sums up the given protocol type's counters
8156 * across all the rings and dumps the same
8157 * @pdev_handle: cdp_pdev handle
8158 * @protocol_type: protocol type for which stats should be displayed
8159 *
8160 * Return: none
8161 */
8162static uint64_t dp_summarize_tag_stats(struct cdp_pdev *pdev_handle,
8163 uint16_t protocol_type)
8164{
8165 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8166 uint8_t ring_idx;
8167 uint64_t total_tag_cnt = 0;
8168
8169 for (ring_idx = 0; ring_idx < MAX_REO_DEST_RINGS; ring_idx++) {
8170 total_tag_cnt +=
8171 pdev->reo_proto_tag_stats[ring_idx][protocol_type].tag_ctr;
8172 }
8173 total_tag_cnt += pdev->rx_err_proto_tag_stats[protocol_type].tag_ctr;
8174 DP_PRINT_STATS("ProtoID: %d, Tag: %u Tagged MSDU cnt: %llu",
8175 protocol_type,
8176 pdev->rx_proto_tag_map[protocol_type].tag,
8177 total_tag_cnt);
8178 return total_tag_cnt;
8179}
8180
8181/**
8182 * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
8183 * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
8184 * @pdev_handle: cdp_pdev handle
8185 * @protocol_type: protocol type for which stats should be displayed
8186 *
8187 * Return: none
8188 */
8189static void
8190dp_dump_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
8191 uint16_t protocol_type)
8192{
8193 uint16_t proto_idx;
8194
8195 if (protocol_type != RX_PROTOCOL_TAG_ALL &&
8196 protocol_type >= RX_PROTOCOL_TAG_MAX) {
8197 DP_PRINT_STATS("Invalid protocol type : %u", protocol_type);
8198 return;
8199 }
8200
8201 /* protocol_type in [0 ... RX_PROTOCOL_TAG_MAX] */
8202 if (protocol_type != RX_PROTOCOL_TAG_ALL) {
8203 dp_summarize_tag_stats(pdev_handle, protocol_type);
8204 return;
8205 }
8206
8207 /* protocol_type == RX_PROTOCOL_TAG_ALL */
8208 for (proto_idx = 0; proto_idx < RX_PROTOCOL_TAG_MAX; proto_idx++)
8209 dp_summarize_tag_stats(pdev_handle, proto_idx);
8210}
8211#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
8212
8213/**
8214 * dp_reset_pdev_rx_protocol_tag_stats - resets the stats counters for
8215 * given protocol type
8216 * @pdev_handle: cdp_pdev handle
8217 * @protocol_type: protocol type for which stats should be reset
8218 *
8219 * Return: none
8220 */
8221#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
8222static void
8223dp_reset_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
8224 uint16_t protocol_type)
8225{
8226 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8227 uint8_t ring_idx;
8228
8229 for (ring_idx = 0; ring_idx < MAX_REO_DEST_RINGS; ring_idx++)
8230 pdev->reo_proto_tag_stats[ring_idx][protocol_type].tag_ctr = 0;
8231 pdev->rx_err_proto_tag_stats[protocol_type].tag_ctr = 0;
8232}
8233#else
8234static void
8235dp_reset_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
8236 uint16_t protocol_type)
8237{
8238 /** Stub API */
8239}
8240#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
8241
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07008242/**
8243 * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
8244 * applied to the desired protocol type packets
8245 * @txrx_pdev_handle: cdp_pdev handle
8246 * @enable_rx_protocol_tag - bitmask that indicates what protocol types
8247 * are enabled for tagging. zero indicates disable feature, non-zero indicates
8248 * enable feature
8249 * @protocol_type: new protocol type for which the tag is being added
8250 * @tag: user configured tag for the new protocol
8251 *
8252 * Return: QDF_STATUS
8253 */
8254static QDF_STATUS
8255dp_update_pdev_rx_protocol_tag(struct cdp_pdev *pdev_handle,
8256 uint32_t enable_rx_protocol_tag,
8257 uint16_t protocol_type,
8258 uint16_t tag)
8259{
8260 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8261 /*
8262 * dynamically enable/disable tagging based on enable_rx_protocol_tag
8263 * flag.
8264 */
8265 if (enable_rx_protocol_tag) {
8266 /* Tagging for one or more protocols has been set by user */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07008267 pdev->is_rx_protocol_tagging_enabled = true;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07008268 } else {
8269 /*
8270 * No protocols being tagged, disable feature till next add
8271 * operation
8272 */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07008273 pdev->is_rx_protocol_tagging_enabled = false;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07008274 }
8275
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07008276 /** Reset stats counter across all rings for given protocol */
8277 dp_reset_pdev_rx_protocol_tag_stats(pdev_handle, protocol_type);
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07008278
8279 pdev->rx_proto_tag_map[protocol_type].tag = tag;
8280
8281 return QDF_STATUS_SUCCESS;
8282}
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07008283#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
8284
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05308285static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05308286 uint32_t max_peers,
Tallapragada Kalyana7023622018-12-03 19:29:52 +05308287 uint32_t max_ast_index,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05308288 bool peer_map_unmap_v2)
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05308289{
8290 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8291
8292 soc->max_peers = max_peers;
8293
Tallapragada Kalyana7023622018-12-03 19:29:52 +05308294 qdf_print ("%s max_peers %u, max_ast_index: %u\n",
8295 __func__, max_peers, max_ast_index);
8296 wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05308297
8298 if (dp_peer_find_attach(soc))
8299 return QDF_STATUS_E_FAILURE;
8300
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05308301 soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
8302
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05308303 return QDF_STATUS_SUCCESS;
8304}
8305
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +05308306/**
8307 * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
8308 * @dp_pdev: dp pdev handle
8309 * @ctrl_pdev: UMAC ctrl pdev handle
8310 *
8311 * Return: void
8312 */
8313static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
8314 struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
8315{
8316 struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
8317
8318 pdev->ctrl_pdev = ctrl_pdev;
8319}
8320
Amir Patel468bded2019-03-21 11:42:31 +05308321static void dp_set_rate_stats_cap(struct cdp_soc_t *soc_hdl,
8322 uint8_t val)
8323{
8324 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8325
8326 soc->wlanstats_enabled = val;
8327}
8328
8329static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
8330 void *stats_ctx)
8331{
8332 struct dp_soc *soc = (struct dp_soc *)soc_handle;
8333
8334 soc->rate_stats_ctx = stats_ctx;
8335}
8336
8337#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8338static void dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
8339 struct cdp_pdev *pdev_hdl)
8340{
8341 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8342 struct dp_soc *soc = (struct dp_soc *)pdev->soc;
8343 struct dp_vdev *vdev = NULL;
8344 struct dp_peer *peer = NULL;
8345
8346 qdf_spin_lock_bh(&soc->peer_ref_mutex);
8347 qdf_spin_lock_bh(&pdev->vdev_list_lock);
8348 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
8349 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
8350 if (peer)
8351 dp_wdi_event_handler(
8352 WDI_EVENT_FLUSH_RATE_STATS_REQ,
8353 pdev->soc, peer->wlanstats_ctx,
8354 peer->peer_ids[0],
8355 WDI_NO_VAL, pdev->pdev_id);
8356 }
8357 }
8358 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
8359 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
8360}
8361#else
8362static inline void
8363dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
8364 struct cdp_pdev *pdev_hdl)
8365{
8366}
8367#endif
8368
8369#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8370static void dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
8371 struct cdp_pdev *pdev_handle,
8372 void *buf)
8373{
8374 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8375
8376 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
8377 pdev->soc, buf, HTT_INVALID_PEER,
8378 WDI_NO_VAL, pdev->pdev_id);
8379}
8380#else
8381static inline void
8382dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
8383 struct cdp_pdev *pdev_handle,
8384 void *buf)
8385{
8386}
8387#endif
8388
8389static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
8390{
8391 struct dp_soc *soc = (struct dp_soc *)soc_handle;
8392
8393 return soc->rate_stats_ctx;
8394}
8395
jitiphil60ac9aa2018-10-05 19:54:04 +05308396/*
8397 * dp_get_cfg() - get dp cfg
8398 * @soc: cdp soc handle
8399 * @cfg: cfg enum
8400 *
8401 * Return: cfg value
8402 */
8403static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
8404{
8405 struct dp_soc *dpsoc = (struct dp_soc *)soc;
8406 uint32_t value = 0;
8407
8408 switch (cfg) {
8409 case cfg_dp_enable_data_stall:
8410 value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
8411 break;
8412 case cfg_dp_enable_ip_tcp_udp_checksum_offload:
8413 value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
8414 break;
8415 case cfg_dp_tso_enable:
8416 value = dpsoc->wlan_cfg_ctx->tso_enabled;
8417 break;
8418 case cfg_dp_lro_enable:
8419 value = dpsoc->wlan_cfg_ctx->lro_enabled;
8420 break;
8421 case cfg_dp_gro_enable:
8422 value = dpsoc->wlan_cfg_ctx->gro_enabled;
8423 break;
8424 case cfg_dp_tx_flow_start_queue_offset:
8425 value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
8426 break;
8427 case cfg_dp_tx_flow_stop_queue_threshold:
8428 value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
8429 break;
8430 case cfg_dp_disable_intra_bss_fwd:
8431 value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
8432 break;
8433 default:
8434 value = 0;
8435 }
8436
8437 return value;
8438}
8439
Shashikala Prabhu550e69c2019-03-13 17:41:17 +05308440#ifdef CONFIG_WIN
8441/**
8442 * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
8443 * @pdev_hdl: datapath pdev handle
8444 * @param: ol ath params
8445 * @value: value of the flag
8446 * @buff: Buffer to be passed
8447 *
8448 * Implemented this function same as legacy function. In legacy code, single
8449 * function is used to display stats and update pdev params.
8450 *
8451 * Return: 0 for success. nonzero for failure.
8452 */
8453static uint32_t dp_tx_flow_ctrl_configure_pdev(void *pdev_handle,
8454 enum _ol_ath_param_t param,
8455 uint32_t value, void *buff)
8456{
8457 struct dp_soc *soc = NULL;
8458 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8459
8460 if (qdf_unlikely(!pdev))
8461 return 1;
8462
8463 soc = pdev->soc;
8464 if (!soc)
8465 return 1;
8466
8467 switch (param) {
8468 case OL_ATH_PARAM_VIDEO_DELAY_STATS_FC:
8469 if (value)
8470 pdev->delay_stats_flag = true;
8471 else
8472 pdev->delay_stats_flag = false;
8473 break;
8474 case OL_ATH_PARAM_VIDEO_STATS_FC:
8475 qdf_print("------- TID Stats ------\n");
8476 dp_pdev_print_tid_stats(pdev);
8477 qdf_print("------ Delay Stats ------\n");
8478 dp_pdev_print_delay_stats(pdev);
8479 break;
8480 case OL_ATH_PARAM_TOTAL_Q_SIZE:
8481 {
8482 uint32_t tx_min, tx_max;
8483
8484 tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
8485 tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
8486
8487 if (!buff) {
8488 if ((value >= tx_min) && (value <= tx_max)) {
8489 pdev->num_tx_allowed = value;
8490 } else {
8491 QDF_TRACE(QDF_MODULE_ID_DP,
8492 QDF_TRACE_LEVEL_INFO,
8493 "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
8494 tx_min, tx_max);
8495 break;
8496 }
8497 } else {
8498 *(int *)buff = pdev->num_tx_allowed;
8499 }
8500 }
8501 break;
8502 default:
8503 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8504 "%s: not handled param %d ", __func__, param);
8505 break;
8506 }
8507
8508 return 0;
8509}
8510#endif
8511
Debasis Dasc39a68d2019-01-28 17:02:06 +05308512/**
8513 * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
8514 * @vdev: DP_PDEV handle
8515 * @pcp: pcp value
8516 * @tid: tid value passed by the user
8517 *
8518 * Return: QDF_STATUS_SUCCESS on success
8519 */
8520static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
8521 uint8_t pcp, uint8_t tid)
8522{
8523 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8524 struct dp_soc *soc = pdev->soc;
8525
8526 soc->pcp_tid_map[pcp] = tid;
8527
8528 hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
8529 return QDF_STATUS_SUCCESS;
8530}
8531
8532/**
8533 * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
8534 * @vdev: DP_PDEV handle
8535 * @prio: tidmap priority value passed by the user
8536 *
8537 * Return: QDF_STATUS_SUCCESS on success
8538 */
8539static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct cdp_pdev *pdev_handle,
8540 uint8_t prio)
8541{
8542 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8543 struct dp_soc *soc = pdev->soc;
8544
8545 soc->tidmap_prty = prio;
8546
8547 hal_tx_set_tidmap_prty(soc->hal_soc, prio);
8548 return QDF_STATUS_SUCCESS;
8549}
8550
8551/**
8552 * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
8553 * @vdev: DP_VDEV handle
8554 * @pcp: pcp value
8555 * @tid: tid value passed by the user
8556 *
8557 * Return: QDF_STATUS_SUCCESS on success
8558 */
8559static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
8560 uint8_t pcp, uint8_t tid)
8561{
8562 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8563
8564 vdev->pcp_tid_map[pcp] = tid;
8565
8566 return QDF_STATUS_SUCCESS;
8567}
8568
8569/**
8570 * dp_set_vdev_tidmap_tbl_id_wifi3(): update tidmapi tbl id in vdev
8571 * @vdev: DP_VDEV handle
8572 * @mapid: map_id value passed by the user
8573 *
8574 * Return: QDF_STATUS_SUCCESS on success
8575 */
8576static QDF_STATUS dp_set_vdev_tidmap_tbl_id_wifi3(struct cdp_vdev *vdev_handle,
8577 uint8_t mapid)
8578{
8579 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8580
8581 vdev->tidmap_tbl_id = mapid;
8582
8583 return QDF_STATUS_SUCCESS;
8584}
8585
8586/**
8587 * dp_set_vdev_tidmap_prty_wifi3(): update tidmap priority in vdev
8588 * @vdev: DP_VDEV handle
8589 * @prio: tidmap priority value passed by the user
8590 *
8591 * Return: QDF_STATUS_SUCCESS on success
8592 */
8593static QDF_STATUS dp_set_vdev_tidmap_prty_wifi3(struct cdp_vdev *vdev_handle,
8594 uint8_t prio)
8595{
8596 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8597
8598 vdev->tidmap_prty = prio;
8599
8600 return QDF_STATUS_SUCCESS;
8601}
8602
Leo Chang5ea93a42016-11-03 12:39:49 -07008603static struct cdp_cmn_ops dp_ops_cmn = {
8604 .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
8605 .txrx_vdev_attach = dp_vdev_attach_wifi3,
8606 .txrx_vdev_detach = dp_vdev_detach_wifi3,
8607 .txrx_pdev_attach = dp_pdev_attach_wifi3,
8608 .txrx_pdev_detach = dp_pdev_detach_wifi3,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05308609 .txrx_pdev_deinit = dp_pdev_deinit_wifi3,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08008610 .txrx_peer_create = dp_peer_create_wifi3,
8611 .txrx_peer_setup = dp_peer_setup_wifi3,
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05308612#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05308613 .txrx_peer_teardown = dp_peer_teardown_wifi3,
8614#else
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08008615 .txrx_peer_teardown = NULL,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05308616#endif
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05308617 .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05308618 .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05308619 .txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
8620 .txrx_peer_get_ast_info_by_pdev =
8621 dp_peer_get_ast_info_by_pdevid_wifi3,
8622 .txrx_peer_ast_delete_by_soc =
8623 dp_peer_ast_entry_del_by_soc,
8624 .txrx_peer_ast_delete_by_pdev =
8625 dp_peer_ast_entry_del_by_pdev,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08008626 .txrx_peer_delete = dp_peer_delete_wifi3,
Leo Chang5ea93a42016-11-03 12:39:49 -07008627 .txrx_vdev_register = dp_vdev_register_wifi3,
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05308628 .txrx_vdev_flush_peers = dp_vdev_flush_peers,
Leo Chang5ea93a42016-11-03 12:39:49 -07008629 .txrx_soc_detach = dp_soc_detach_wifi3,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05308630 .txrx_soc_deinit = dp_soc_deinit_wifi3,
8631 .txrx_soc_init = dp_soc_init_wifi3,
8632 .txrx_tso_soc_attach = dp_tso_soc_attach,
8633 .txrx_tso_soc_detach = dp_tso_soc_detach,
Leo Chang5ea93a42016-11-03 12:39:49 -07008634 .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
8635 .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
chenguo2a733792018-11-01 16:10:38 +08008636 .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
Leo Chang5ea93a42016-11-03 12:39:49 -07008637 .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05308638 .txrx_ath_getstats = dp_get_device_stats,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07008639 .addba_requestprocess = dp_addba_requestprocess_wifi3,
8640 .addba_responsesetup = dp_addba_responsesetup_wifi3,
Sumedh Baikady1c61e062018-02-12 22:25:47 -08008641 .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07008642 .delba_process = dp_delba_process_wifi3,
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08008643 .set_addba_response = dp_set_addba_response,
Ishank Jain1e7401c2017-02-17 15:38:39 +05308644 .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
Manikandan Mohane2fa8b72017-03-22 11:18:26 -07008645 .flush_cache_rx_queue = NULL,
Ishank Jain949674c2017-02-27 17:09:29 +05308646 /* TODO: get API's for dscp-tid need to be added*/
8647 .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
8648 .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
Shashikala Prabhu8f6703b2018-10-31 09:43:00 +05308649 .hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
8650 .set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
Pranita Solanke92096e42018-09-11 11:14:51 +05308651 .txrx_get_total_per = dp_get_total_per,
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308652 .txrx_stats_request = dp_txrx_stats_request,
Kai Chen6eca1a62017-01-12 10:17:53 -08008653 .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
phadiman7821bf82018-02-06 16:03:54 +05308654 .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
Varsha Mishraa331e6e2019-03-11 12:16:14 +05308655 .txrx_get_vow_config_frm_pdev = dp_get_delay_stats_flag,
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07008656 .txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
phadiman7821bf82018-02-06 16:03:54 +05308657 .txrx_set_nac = dp_set_nac,
8658 .txrx_get_tx_pending = dp_get_tx_pending,
8659 .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
8660 .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08008661 .display_stats = dp_txrx_dump_stats,
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05308662 .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
8663 .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
psimhac983d7e2017-07-26 15:20:07 -07008664 .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
Venkateswara Swamy Bandarua95b3242017-05-19 20:20:30 +05308665 .txrx_intr_detach = dp_soc_interrupt_detach,
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05308666 .set_pn_check = dp_set_pn_check_wifi3,
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07008667 .update_config_parameters = dp_update_config_parameters,
Leo Chang5ea93a42016-11-03 12:39:49 -07008668 /* TODO: Add other functions */
Santosh Anbu2280e862018-01-03 22:25:53 +05308669 .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
8670 .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
8671 .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05308672 .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
8673 .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
Padma Raghunathan93549e12019-02-28 14:30:55 +05308674 .map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
sumedh baikady1f8f3192018-02-20 17:30:32 -08008675 .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
8676 .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05308677 .tx_send = dp_tx_send,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05308678 .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
8679 .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
8680 .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05308681 .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +05308682 .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
Mohit Khanna7ac554b2018-05-24 11:58:13 -07008683 .txrx_get_os_rx_handles_from_vdev =
8684 dp_get_os_rx_handles_from_vdev_wifi3,
sumedh baikadydf4a57c2018-04-08 22:19:22 -07008685 .delba_tx_completion = dp_delba_tx_completion_wifi3,
Akshay Kosigia4f6e172018-09-03 21:42:27 +05308686 .get_dp_capabilities = dp_get_cfg_capabilities,
jitiphil60ac9aa2018-10-05 19:54:04 +05308687 .txrx_get_cfg = dp_get_cfg,
Amir Patel468bded2019-03-21 11:42:31 +05308688 .set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
8689 .get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
8690 .txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
8691 .txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
Debasis Dasc39a68d2019-01-28 17:02:06 +05308692
8693 .set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
8694 .set_pdev_tidmap_prty = dp_set_pdev_tidmap_prty_wifi3,
8695 .set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
8696 .set_vdev_tidmap_prty = dp_set_vdev_tidmap_prty_wifi3,
8697 .set_vdev_tidmap_tbl_id = dp_set_vdev_tidmap_tbl_id_wifi3,
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05308698
8699 .txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
Leo Chang5ea93a42016-11-03 12:39:49 -07008700};
8701
8702static struct cdp_ctrl_ops dp_ops_ctrl = {
8703 .txrx_peer_authorize = dp_peer_authorize,
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05308704 .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
8705 .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05308706#ifdef MESH_MODE_SUPPORT
8707 .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05308708 .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05308709#endif
Ishank Jain9f174c62017-03-30 18:37:42 +05308710 .txrx_set_vdev_param = dp_set_vdev_param,
8711 .txrx_peer_set_nawds = dp_peer_set_nawds,
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05308712 .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
8713 .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05308714 .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
8715 .txrx_update_filter_neighbour_peers =
8716 dp_update_filter_neighbour_peers,
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05308717 .txrx_get_sec_type = dp_get_sec_type,
Leo Chang5ea93a42016-11-03 12:39:49 -07008718 /* TODO: Add other functions */
Keyur Parekhfad6d082017-05-07 08:54:47 -07008719 .txrx_wdi_event_sub = dp_wdi_event_sub,
8720 .txrx_wdi_event_unsub = dp_wdi_event_unsub,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008721#ifdef WDI_EVENT_ENABLE
8722 .txrx_get_pldev = dp_get_pldev,
8723#endif
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308724 .txrx_set_pdev_param = dp_set_pdev_param,
Soumya Bhatbc719e62018-02-18 18:21:25 +05308725#ifdef ATH_SUPPORT_NAC_RSSI
8726 .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05308727 .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
Soumya Bhatbc719e62018-02-18 18:21:25 +05308728#endif
Pramod Simha6e10cb22018-06-20 12:05:44 -07008729 .set_key = dp_set_michael_key,
phadiman4213e9c2018-10-29 12:50:02 +05308730 .txrx_get_vdev_param = dp_get_vdev_param,
Keyur Parekhc28f8392018-11-21 02:50:56 -08008731 .enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
Varsha Mishraa331e6e2019-03-11 12:16:14 +05308732 .calculate_delay_stats = dp_calculate_delay_stats,
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07008733#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
8734 .txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
8735#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
8736 .txrx_dump_pdev_rx_protocol_tag_stats =
8737 dp_dump_pdev_rx_protocol_tag_stats,
8738#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
8739#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
Leo Chang5ea93a42016-11-03 12:39:49 -07008740};
8741
8742static struct cdp_me_ops dp_ops_me = {
Ishank Jainc838b132017-02-17 11:08:18 +05308743#ifdef ATH_SUPPORT_IQUE
8744 .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
8745 .tx_me_free_descriptor = dp_tx_me_free_descriptor,
8746 .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
8747#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07008748};
8749
8750static struct cdp_mon_ops dp_ops_mon = {
Kai Chen6eca1a62017-01-12 10:17:53 -08008751 .txrx_monitor_set_filter_ucast_data = NULL,
8752 .txrx_monitor_set_filter_mcast_data = NULL,
8753 .txrx_monitor_set_filter_non_data = NULL,
nobeljc8eb4d62018-01-04 14:29:32 -08008754 .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
8755 .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
8756 .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
sumedh baikady84613b02017-09-19 16:36:14 -07008757 .txrx_reset_monitor_mode = dp_reset_monitor_mode,
nobeljd124b742017-10-16 11:59:12 -07008758 /* Added support for HK advance filter */
8759 .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
Leo Chang5ea93a42016-11-03 12:39:49 -07008760};
8761
8762static struct cdp_host_stats_ops dp_ops_host_stats = {
Ishank Jain6290a3c2017-03-21 10:49:39 +05308763 .txrx_per_peer_stats = dp_get_host_peer_stats,
8764 .get_fw_peer_stats = dp_get_fw_peer_stats,
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05308765 .get_htt_stats = dp_get_htt_stats,
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05308766 .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
8767 .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
Prathyusha Guduri184b6402018-02-04 23:01:49 +05308768 .txrx_stats_publish = dp_txrx_stats_publish,
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05308769 .txrx_get_vdev_stats = dp_txrx_get_vdev_stats,
8770 .txrx_get_peer_stats = dp_txrx_get_peer_stats,
8771 .txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
Amir Patel756d05e2018-10-10 12:35:30 +05308772 .txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +05308773 .txrx_get_ratekbps = dp_txrx_get_ratekbps,
Amir Patel468bded2019-03-21 11:42:31 +05308774 .configure_rate_stats = dp_set_rate_stats_cap,
Leo Chang5ea93a42016-11-03 12:39:49 -07008775 /* TODO */
8776};
8777
Leo Chang5ea93a42016-11-03 12:39:49 -07008778static struct cdp_raw_ops dp_ops_raw = {
8779 /* TODO */
8780};
8781
8782#ifdef CONFIG_WIN
8783static struct cdp_pflow_ops dp_ops_pflow = {
Shashikala Prabhu550e69c2019-03-13 17:41:17 +05308784 dp_tx_flow_ctrl_configure_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -07008785};
8786#endif /* CONFIG_WIN */
8787
Yue Ma245b47b2017-02-21 16:35:31 -08008788#ifdef FEATURE_RUNTIME_PM
8789/**
8790 * dp_runtime_suspend() - ensure DP is ready to runtime suspend
8791 * @opaque_pdev: DP pdev context
8792 *
8793 * DP is ready to runtime suspend if there are no pending TX packets.
8794 *
8795 * Return: QDF_STATUS
8796 */
8797static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
8798{
8799 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
8800 struct dp_soc *soc = pdev->soc;
8801
Yue Maaf4272d2018-08-27 12:35:21 -07008802 /* Abort if there are any pending TX packets */
8803 if (dp_get_tx_pending(opaque_pdev) > 0) {
8804 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8805 FL("Abort suspend due to pending TX packets"));
8806 return QDF_STATUS_E_AGAIN;
8807 }
Yue Ma245b47b2017-02-21 16:35:31 -08008808
8809 if (soc->intr_mode == DP_INTR_POLL)
8810 qdf_timer_stop(&soc->int_timer);
8811
8812 return QDF_STATUS_SUCCESS;
8813}
8814
8815/**
8816 * dp_runtime_resume() - ensure DP is ready to runtime resume
8817 * @opaque_pdev: DP pdev context
8818 *
8819 * Resume DP for runtime PM.
8820 *
8821 * Return: QDF_STATUS
8822 */
8823static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
8824{
8825 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
8826 struct dp_soc *soc = pdev->soc;
8827 void *hal_srng;
8828 int i;
8829
8830 if (soc->intr_mode == DP_INTR_POLL)
8831 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
8832
8833 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
8834 hal_srng = soc->tcl_data_ring[i].hal_srng;
8835 if (hal_srng) {
8836 /* We actually only need to acquire the lock */
8837 hal_srng_access_start(soc->hal_soc, hal_srng);
8838 /* Update SRC ring head pointer for HW to send
8839 all pending packets */
8840 hal_srng_access_end(soc->hal_soc, hal_srng);
8841 }
8842 }
8843
8844 return QDF_STATUS_SUCCESS;
8845}
8846#endif /* FEATURE_RUNTIME_PM */
8847
Sravan Kumar Kairamc71219e2019-04-19 22:08:16 +05308848/**
8849 * dp_tx_get_success_ack_stats() - get tx success completion count
8850 * @opaque_pdev: dp pdev context
8851 * @vdevid: vdev identifier
8852 *
8853 * Return: tx success ack count
8854 */
8855static uint32_t dp_tx_get_success_ack_stats(struct cdp_pdev *pdev,
8856 uint8_t vdev_id)
8857{
8858 struct dp_vdev *vdev =
8859 (struct dp_vdev *)dp_get_vdev_from_vdev_id_wifi3(pdev,
8860 vdev_id);
8861 struct dp_soc *soc = ((struct dp_pdev *)pdev)->soc;
8862 struct cdp_vdev_stats *vdev_stats = NULL;
8863 uint32_t tx_success;
8864
8865 if (!vdev) {
8866 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8867 FL("Invalid vdev id %d"), vdev_id);
8868 return 0;
8869 }
8870
8871 vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
8872 if (!vdev_stats) {
8873 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8874 "DP alloc failure - unable to get alloc vdev stats");
8875 return 0;
8876 }
8877
8878 qdf_spin_lock_bh(&soc->peer_ref_mutex);
8879 dp_aggregate_vdev_stats(vdev, vdev_stats);
8880 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
8881
8882 tx_success = vdev_stats->tx.tx_success.num;
8883 qdf_mem_free(vdev_stats);
8884
8885 return tx_success;
8886}
8887
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05308888#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07008889static struct cdp_misc_ops dp_ops_misc = {
Jeff Johnson6889ddf2019-02-08 07:22:01 -08008890#ifdef FEATURE_WLAN_TDLS
Kabilan Kannan60e3b302017-09-07 20:06:17 -07008891 .tx_non_std = dp_tx_non_std,
Jeff Johnson6889ddf2019-02-08 07:22:01 -08008892#endif /* FEATURE_WLAN_TDLS */
Leo Chang5ea93a42016-11-03 12:39:49 -07008893 .get_opmode = dp_get_opmode,
Dustin Brown4a3b96b2017-05-10 15:49:38 -07008894#ifdef FEATURE_RUNTIME_PM
Yue Ma245b47b2017-02-21 16:35:31 -08008895 .runtime_suspend = dp_runtime_suspend,
8896 .runtime_resume = dp_runtime_resume,
8897#endif /* FEATURE_RUNTIME_PM */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008898 .pkt_log_init = dp_pkt_log_init,
8899 .pkt_log_con_service = dp_pkt_log_con_service,
Mohit Khanna16816ae2018-10-30 14:12:03 -07008900 .get_num_rx_contexts = dp_get_num_rx_contexts,
Sravan Kumar Kairamc71219e2019-04-19 22:08:16 +05308901 .get_tx_ack_stats = dp_tx_get_success_ack_stats,
Leo Chang5ea93a42016-11-03 12:39:49 -07008902};
8903
8904static struct cdp_flowctl_ops dp_ops_flowctl = {
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07008905 /* WIFI 3.0 DP implement as required. */
8906#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -08008907 .flow_pool_map_handler = dp_tx_flow_pool_map,
8908 .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07008909 .register_pause_cb = dp_txrx_register_pause_cb,
8910 .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
Sravan Kumar Kairamb75565e2018-12-17 17:55:44 +05308911 .tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07008912#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
Leo Chang5ea93a42016-11-03 12:39:49 -07008913};
8914
8915static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
8916 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
8917};
8918
Yun Parkfde6b9e2017-06-26 17:13:11 -07008919#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07008920static struct cdp_ipa_ops dp_ops_ipa = {
Yun Parkfde6b9e2017-06-26 17:13:11 -07008921 .ipa_get_resource = dp_ipa_get_resource,
8922 .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
8923 .ipa_op_response = dp_ipa_op_response,
8924 .ipa_register_op_cb = dp_ipa_register_op_cb,
8925 .ipa_get_stat = dp_ipa_get_stat,
8926 .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
8927 .ipa_enable_autonomy = dp_ipa_enable_autonomy,
8928 .ipa_disable_autonomy = dp_ipa_disable_autonomy,
8929 .ipa_setup = dp_ipa_setup,
8930 .ipa_cleanup = dp_ipa_cleanup,
8931 .ipa_setup_iface = dp_ipa_setup_iface,
8932 .ipa_cleanup_iface = dp_ipa_cleanup_iface,
8933 .ipa_enable_pipes = dp_ipa_enable_pipes,
8934 .ipa_disable_pipes = dp_ipa_disable_pipes,
jiad5a4530f2019-03-25 15:33:52 +08008935 .ipa_set_perf_level = dp_ipa_set_perf_level,
8936 .ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
Leo Chang5ea93a42016-11-03 12:39:49 -07008937};
Yun Parkfde6b9e2017-06-26 17:13:11 -07008938#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07008939
Dustin Brown9ae22322019-01-25 09:51:47 -08008940static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
8941{
8942 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
8943 struct dp_soc *soc = pdev->soc;
8944 int timeout = SUSPEND_DRAIN_WAIT;
8945 int drain_wait_delay = 50; /* 50 ms */
8946
8947 /* Abort if there are any pending TX packets */
8948 while (dp_get_tx_pending(opaque_pdev) > 0) {
8949 qdf_sleep(drain_wait_delay);
8950 if (timeout <= 0) {
8951 dp_err("TX frames are pending, abort suspend");
8952 return QDF_STATUS_E_TIMEOUT;
8953 }
8954 timeout = timeout - drain_wait_delay;
8955 }
8956
8957 if (soc->intr_mode == DP_INTR_POLL)
8958 qdf_timer_stop(&soc->int_timer);
8959
8960 return QDF_STATUS_SUCCESS;
8961}
8962
8963static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
8964{
8965 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
8966 struct dp_soc *soc = pdev->soc;
8967
8968 if (soc->intr_mode == DP_INTR_POLL)
8969 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
8970
8971 return QDF_STATUS_SUCCESS;
8972}
8973
Leo Chang5ea93a42016-11-03 12:39:49 -07008974static struct cdp_bus_ops dp_ops_bus = {
Dustin Brown4a3b96b2017-05-10 15:49:38 -07008975 .bus_suspend = dp_bus_suspend,
8976 .bus_resume = dp_bus_resume
Leo Chang5ea93a42016-11-03 12:39:49 -07008977};
8978
8979static struct cdp_ocb_ops dp_ops_ocb = {
8980 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
8981};
8982
8983
8984static struct cdp_throttle_ops dp_ops_throttle = {
8985 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
8986};
8987
8988static struct cdp_mob_stats_ops dp_ops_mob_stats = {
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08008989 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
Leo Chang5ea93a42016-11-03 12:39:49 -07008990};
8991
8992static struct cdp_cfg_ops dp_ops_cfg = {
8993 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
8994};
8995
Mohit Khannaadfe9082017-11-17 13:11:17 -08008996/*
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +05308997 * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
Mohit Khannaadfe9082017-11-17 13:11:17 -08008998 * @dev: physical device instance
8999 * @peer_mac_addr: peer mac address
9000 * @local_id: local id for the peer
9001 * @debug_id: to track enum peer access
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +05309002 *
Mohit Khannaadfe9082017-11-17 13:11:17 -08009003 * Return: peer instance pointer
9004 */
9005static inline void *
Krunal Sonibe43d552018-10-03 11:20:20 -07009006dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
9007 uint8_t *local_id,
9008 enum peer_debug_id_type debug_id)
Mohit Khannaadfe9082017-11-17 13:11:17 -08009009{
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +05309010 struct dp_pdev *pdev = (struct dp_pdev *)dev;
9011 struct dp_peer *peer;
9012
9013 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
9014
9015 if (!peer)
9016 return NULL;
9017
9018 *local_id = peer->local_id;
9019 DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
9020
9021 return peer;
9022}
9023
9024/*
9025 * dp_peer_release_ref - release peer ref count
9026 * @peer: peer handle
9027 * @debug_id: to track enum peer access
9028 *
9029 * Return: None
9030 */
9031static inline
9032void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
9033{
9034 dp_peer_unref_delete(peer);
Mohit Khannaadfe9082017-11-17 13:11:17 -08009035}
9036
Leo Chang5ea93a42016-11-03 12:39:49 -07009037static struct cdp_peer_ops dp_ops_peer = {
9038 .register_peer = dp_register_peer,
9039 .clear_peer = dp_clear_peer,
9040 .find_peer_by_addr = dp_find_peer_by_addr,
9041 .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +05309042 .peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
9043 .peer_release_ref = dp_peer_release_ref,
Leo Chang5ea93a42016-11-03 12:39:49 -07009044 .local_peer_id = dp_local_peer_id,
9045 .peer_find_by_local_id = dp_peer_find_by_local_id,
9046 .peer_state_update = dp_peer_state_update,
9047 .get_vdevid = dp_get_vdevid,
Yun Parkfde6b9e2017-06-26 17:13:11 -07009048 .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
Leo Chang5ea93a42016-11-03 12:39:49 -07009049 .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
9050 .get_vdev_for_peer = dp_get_vdev_for_peer,
9051 .get_peer_state = dp_get_peer_state,
9052};
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05309053#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07009054
9055static struct cdp_ops dp_txrx_ops = {
9056 .cmn_drv_ops = &dp_ops_cmn,
9057 .ctrl_ops = &dp_ops_ctrl,
9058 .me_ops = &dp_ops_me,
9059 .mon_ops = &dp_ops_mon,
9060 .host_stats_ops = &dp_ops_host_stats,
9061 .wds_ops = &dp_ops_wds,
9062 .raw_ops = &dp_ops_raw,
9063#ifdef CONFIG_WIN
9064 .pflow_ops = &dp_ops_pflow,
9065#endif /* CONFIG_WIN */
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05309066#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07009067 .misc_ops = &dp_ops_misc,
9068 .cfg_ops = &dp_ops_cfg,
9069 .flowctl_ops = &dp_ops_flowctl,
9070 .l_flowctl_ops = &dp_ops_l_flowctl,
Yun Parkfde6b9e2017-06-26 17:13:11 -07009071#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07009072 .ipa_ops = &dp_ops_ipa,
Yun Parkfde6b9e2017-06-26 17:13:11 -07009073#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07009074 .bus_ops = &dp_ops_bus,
9075 .ocb_ops = &dp_ops_ocb,
9076 .peer_ops = &dp_ops_peer,
9077 .throttle_ops = &dp_ops_throttle,
9078 .mob_stats_ops = &dp_ops_mob_stats,
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05309079#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07009080};
9081
9082/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05309083 * dp_soc_set_txrx_ring_map()
9084 * @dp_soc: DP handler for soc
9085 *
9086 * Return: Void
9087 */
nobeljdebe2b32019-04-23 11:18:47 -07009088void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05309089{
9090 uint32_t i;
9091 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
Aniruddha Paulc34164e2018-09-14 14:25:30 +05309092 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05309093 }
9094}
9095
Basamma Yakkanahallib85768e2019-04-27 05:24:00 +05309096#if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309097
9098#ifndef QCA_MEM_ATTACH_ON_WIFI3
9099
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07009100/**
Leo Chang5ea93a42016-11-03 12:39:49 -07009101 * dp_soc_attach_wifi3() - Attach txrx SOC
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309102 * @ctrl_psoc: Opaque SOC handle from control plane
9103 * @htc_handle: Opaque HTC handle
9104 * @hif_handle: Opaque HIF handle
9105 * @qdf_osdev: QDF device
9106 * @ol_ops: Offload Operations
9107 * @device_id: Device ID
Leo Chang5ea93a42016-11-03 12:39:49 -07009108 *
9109 * Return: DP SOC handle on success, NULL on failure
9110 */
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05309111void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07009112 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9113 struct ol_if_ops *ol_ops, uint16_t device_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07009114{
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309115 struct dp_soc *dp_soc = NULL;
9116
9117 dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9118 ol_ops, device_id);
9119 if (!dp_soc)
9120 return NULL;
9121
9122 if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
9123 return NULL;
9124
9125 return (void *)dp_soc;
9126}
9127#else
9128
9129/**
9130 * dp_soc_attach_wifi3() - Attach txrx SOC
9131 * @ctrl_psoc: Opaque SOC handle from control plane
9132 * @htc_handle: Opaque HTC handle
9133 * @hif_handle: Opaque HIF handle
9134 * @qdf_osdev: QDF device
9135 * @ol_ops: Offload Operations
9136 * @device_id: Device ID
9137 *
9138 * Return: DP SOC handle on success, NULL on failure
9139 */
9140void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
9141 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9142 struct ol_if_ops *ol_ops, uint16_t device_id)
9143{
9144 struct dp_soc *dp_soc = NULL;
9145
9146 dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9147 ol_ops, device_id);
9148 return (void *)dp_soc;
9149}
9150
9151#endif
9152
9153/**
9154 * dp_soc_attach() - Attach txrx SOC
9155 * @ctrl_psoc: Opaque SOC handle from control plane
9156 * @htc_handle: Opaque HTC handle
9157 * @qdf_osdev: QDF device
9158 * @ol_ops: Offload Operations
9159 * @device_id: Device ID
9160 *
9161 * Return: DP SOC handle on success, NULL on failure
9162 */
9163static struct dp_soc *
9164dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9165 struct ol_if_ops *ol_ops, uint16_t device_id)
9166{
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07009167 int int_ctx;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309168 struct dp_soc *soc = NULL;
9169 struct htt_soc *htt_soc = NULL;
9170
9171 soc = qdf_mem_malloc(sizeof(*soc));
Leo Chang5ea93a42016-11-03 12:39:49 -07009172
9173 if (!soc) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309174 dp_err("DP SOC memory allocation failed");
Leo Chang5ea93a42016-11-03 12:39:49 -07009175 goto fail0;
9176 }
9177
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07009178 int_ctx = 0;
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07009179 soc->device_id = device_id;
Leo Chang5ea93a42016-11-03 12:39:49 -07009180 soc->cdp_soc.ops = &dp_txrx_ops;
9181 soc->cdp_soc.ol_ops = ol_ops;
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05309182 soc->ctrl_psoc = ctrl_psoc;
Leo Chang5ea93a42016-11-03 12:39:49 -07009183 soc->osdev = qdf_osdev;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05309184 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
9185
Vivek126db5d2018-07-25 22:05:04 +05309186 soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
Leo Chang5ea93a42016-11-03 12:39:49 -07009187 if (!soc->wlan_cfg_ctx) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309188 dp_err("wlan_cfg_ctx failed\n");
9189 goto fail1;
Leo Chang5ea93a42016-11-03 12:39:49 -07009190 }
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309191 htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
9192 if (!htt_soc) {
9193 dp_err("HTT attach failed");
9194 goto fail1;
9195 }
9196 soc->htt_handle = htt_soc;
9197 htt_soc->dp_soc = soc;
9198 htt_soc->htc_soc = htc_handle;
9199
9200 if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
9201 goto fail2;
9202
9203 return (void *)soc;
9204fail2:
9205 qdf_mem_free(htt_soc);
9206fail1:
9207 qdf_mem_free(soc);
9208fail0:
9209 return NULL;
9210}
9211
9212/**
9213 * dp_soc_init() - Initialize txrx SOC
9214 * @dp_soc: Opaque DP SOC handle
9215 * @htc_handle: Opaque HTC handle
9216 * @hif_handle: Opaque HIF handle
9217 *
9218 * Return: DP SOC handle on success, NULL on failure
9219 */
9220void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
9221{
9222 int target_type;
9223 struct dp_soc *soc = (struct dp_soc *)dpsoc;
9224 struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
9225
9226 htt_soc->htc_soc = htc_handle;
9227 soc->hif_handle = hif_handle;
9228
9229 soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
9230 if (!soc->hal_soc)
9231 return NULL;
9232
9233 htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
9234 soc->hal_soc, soc->osdev);
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05309235 target_type = hal_get_target_type(soc->hal_soc);
9236 switch (target_type) {
9237 case TARGET_TYPE_QCA6290:
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +05309238 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9239 REO_DST_RING_SIZE_QCA6290);
9240 soc->ast_override_support = 1;
Chaithanya Garrepallidedc49b2019-02-20 23:21:20 +05309241 soc->da_war_enabled = false;
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +05309242 break;
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +05309243#ifdef QCA_WIFI_QCA6390
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05309244 case TARGET_TYPE_QCA6390:
9245 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9246 REO_DST_RING_SIZE_QCA6290);
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +05309247 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +05309248 soc->ast_override_support = 1;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07009249 if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
Krunal Soni07215e82018-11-30 14:57:10 -08009250 int int_ctx;
9251
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07009252 for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
9253 soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
9254 soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
9255 }
9256 }
9257 soc->wlan_cfg_ctx->rxdma1_enable = 0;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05309258 break;
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +05309259#endif
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05309260 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +05309261 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9262 REO_DST_RING_SIZE_QCA8074);
9263 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
Chaithanya Garrepallidedc49b2019-02-20 23:21:20 +05309264 soc->hw_nac_monitor_support = 1;
9265 soc->da_war_enabled = true;
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +05309266 break;
Venkateswara Swamy Bandaru29757ad2018-08-07 13:06:55 +05309267 case TARGET_TYPE_QCA8074V2:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +05309268 case TARGET_TYPE_QCA6018:
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05309269 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9270 REO_DST_RING_SIZE_QCA8074);
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +05309271 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
sumedh baikady59a2d332018-05-22 01:50:38 -07009272 soc->hw_nac_monitor_support = 1;
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +05309273 soc->ast_override_support = 1;
sumedh baikady61cbe852018-10-09 11:04:34 -07009274 soc->per_tid_basize_max_tid = 8;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05309275 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
Chaithanya Garrepallidedc49b2019-02-20 23:21:20 +05309276 soc->da_war_enabled = false;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05309277 break;
9278 default:
9279 qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
9280 qdf_assert_always(0);
9281 break;
9282 }
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05309283
Vivek126db5d2018-07-25 22:05:04 +05309284 wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309285 cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
Ruchi, Agrawal34721392017-11-13 18:02:09 +05309286 soc->cce_disable = false;
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +05309287
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05309288 if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05309289 int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05309290 CDP_CFG_MAX_PEER_ID);
9291
9292 if (ret != -EINVAL) {
9293 wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
9294 }
Ruchi, Agrawal34721392017-11-13 18:02:09 +05309295
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05309296 ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Ruchi, Agrawal34721392017-11-13 18:02:09 +05309297 CDP_CFG_CCE_DISABLE);
Ruchi, Agrawalf279a4a2018-02-26 18:12:44 +05309298 if (ret == 1)
Ruchi, Agrawal34721392017-11-13 18:02:09 +05309299 soc->cce_disable = true;
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05309300 }
9301
Leo Chang5ea93a42016-11-03 12:39:49 -07009302 qdf_spinlock_create(&soc->peer_ref_mutex);
Pamidipati, Vijay8a4c03a2018-12-08 12:52:38 +05309303 qdf_spinlock_create(&soc->ast_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07009304
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08009305 qdf_spinlock_create(&soc->reo_desc_freelist_lock);
9306 qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
9307
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05309308 /* fill the tx/rx cpu ring map*/
9309 dp_soc_set_txrx_ring_map(soc);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05309310
9311 qdf_spinlock_create(&soc->htt_stats.lock);
9312 /* initialize work queue for stats processing */
9313 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
9314
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309315 return soc;
Leo Chang5ea93a42016-11-03 12:39:49 -07009316
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07009317}
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309318
9319/**
9320 * dp_soc_init_wifi3() - Initialize txrx SOC
9321 * @dp_soc: Opaque DP SOC handle
9322 * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
9323 * @hif_handle: Opaque HIF handle
9324 * @htc_handle: Opaque HTC handle
9325 * @qdf_osdev: QDF device (Unused)
9326 * @ol_ops: Offload Operations (Unused)
9327 * @device_id: Device ID (Unused)
9328 *
9329 * Return: DP SOC handle on success, NULL on failure
9330 */
9331void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
9332 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9333 struct ol_if_ops *ol_ops, uint16_t device_id)
9334{
9335 return dp_soc_init(dpsoc, htc_handle, hif_handle);
9336}
9337
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07009338#endif
Keyur Parekhfad6d082017-05-07 08:54:47 -07009339
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08009340/*
9341 * dp_get_pdev_for_mac_id() - Return pdev for mac_id
9342 *
9343 * @soc: handle to DP soc
9344 * @mac_id: MAC id
9345 *
9346 * Return: Return pdev corresponding to MAC
9347 */
9348void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
9349{
9350 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
9351 return soc->pdev_list[mac_id];
9352
9353 /* Typically for MCL as there only 1 PDEV*/
9354 return soc->pdev_list[0];
9355}
9356
9357/*
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009358 * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
9359 * @soc: DP SoC context
9360 * @max_mac_rings: No of MAC rings
9361 *
9362 * Return: None
9363 */
9364static
9365void dp_is_hw_dbs_enable(struct dp_soc *soc,
9366 int *max_mac_rings)
9367{
9368 bool dbs_enable = false;
9369 if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
9370 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05309371 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009372
9373 *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
9374}
9375
Keyur Parekhfad6d082017-05-07 08:54:47 -07009376/*
phadimana1f79822019-02-15 15:02:37 +05309377* dp_is_soc_reinit() - Check if soc reinit is true
9378* @soc: DP SoC context
9379*
9380* Return: true or false
9381*/
9382bool dp_is_soc_reinit(struct dp_soc *soc)
9383{
9384 return soc->dp_soc_reinit;
9385}
9386
9387/*
Keyur Parekhfad6d082017-05-07 08:54:47 -07009388* dp_set_pktlog_wifi3() - attach txrx vdev
9389* @pdev: Datapath PDEV handle
9390* @event: which event's notifications are being subscribed to
9391* @enable: WDI event subscribe or not. (True or False)
9392*
9393* Return: Success, NULL on failure
9394*/
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009395#ifdef WDI_EVENT_ENABLE
Keyur Parekhfad6d082017-05-07 08:54:47 -07009396int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
Mainak Sen2e43fb22019-02-21 14:03:24 +05309397 bool enable)
Keyur Parekhfad6d082017-05-07 08:54:47 -07009398{
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309399 struct dp_soc *soc = NULL;
Keyur Parekhfad6d082017-05-07 08:54:47 -07009400 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009401 int max_mac_rings = wlan_cfg_get_num_mac_rings
9402 (pdev->wlan_cfg_ctx);
9403 uint8_t mac_id = 0;
9404
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05309405 soc = pdev->soc;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009406 dp_is_hw_dbs_enable(soc, &max_mac_rings);
9407
9408 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +05309409 FL("Max_mac_rings %d "),
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009410 max_mac_rings);
Keyur Parekhfad6d082017-05-07 08:54:47 -07009411
9412 if (enable) {
9413 switch (event) {
9414 case WDI_EVENT_RX_DESC:
9415 if (pdev->monitor_vdev) {
9416 /* Nothing needs to be done if monitor mode is
9417 * enabled
9418 */
9419 return 0;
9420 }
9421 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
9422 pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
9423 htt_tlv_filter.mpdu_start = 1;
9424 htt_tlv_filter.msdu_start = 1;
9425 htt_tlv_filter.msdu_end = 1;
9426 htt_tlv_filter.mpdu_end = 1;
9427 htt_tlv_filter.packet_header = 1;
9428 htt_tlv_filter.attention = 1;
9429 htt_tlv_filter.ppdu_start = 1;
9430 htt_tlv_filter.ppdu_end = 1;
9431 htt_tlv_filter.ppdu_end_user_stats = 1;
9432 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9433 htt_tlv_filter.ppdu_end_status_done = 1;
9434 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07009435 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9436 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9437 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9438 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9439 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9440 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Kiran Venkatappa07921612019-03-02 23:14:12 +05309441 htt_tlv_filter.offset_valid = false;
Keyur Parekhfad6d082017-05-07 08:54:47 -07009442
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009443 for (mac_id = 0; mac_id < max_mac_rings;
9444 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009445 int mac_for_pdev =
9446 dp_get_mac_id_for_pdev(mac_id,
9447 pdev->pdev_id);
9448
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009449 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009450 mac_for_pdev,
9451 pdev->rxdma_mon_status_ring[mac_id]
9452 .hal_srng,
9453 RXDMA_MONITOR_STATUS,
9454 RX_BUFFER_SIZE,
9455 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009456
9457 }
9458
9459 if (soc->reap_timer_init)
9460 qdf_timer_mod(&soc->mon_reap_timer,
9461 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07009462 }
9463 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009464
Keyur Parekhfad6d082017-05-07 08:54:47 -07009465 case WDI_EVENT_LITE_RX:
9466 if (pdev->monitor_vdev) {
9467 /* Nothing needs to be done if monitor mode is
9468 * enabled
9469 */
9470 return 0;
9471 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009472
Keyur Parekhfad6d082017-05-07 08:54:47 -07009473 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
9474 pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009475
Keyur Parekhfad6d082017-05-07 08:54:47 -07009476 htt_tlv_filter.ppdu_start = 1;
9477 htt_tlv_filter.ppdu_end = 1;
9478 htt_tlv_filter.ppdu_end_user_stats = 1;
9479 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
9480 htt_tlv_filter.ppdu_end_status_done = 1;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009481 htt_tlv_filter.mpdu_start = 1;
Keyur Parekhfad6d082017-05-07 08:54:47 -07009482 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07009483 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
9484 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
9485 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
9486 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
9487 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
9488 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Kiran Venkatappa07921612019-03-02 23:14:12 +05309489 htt_tlv_filter.offset_valid = false;
Keyur Parekhfad6d082017-05-07 08:54:47 -07009490
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009491 for (mac_id = 0; mac_id < max_mac_rings;
9492 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009493 int mac_for_pdev =
9494 dp_get_mac_id_for_pdev(mac_id,
9495 pdev->pdev_id);
9496
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009497 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009498 mac_for_pdev,
9499 pdev->rxdma_mon_status_ring[mac_id]
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009500 .hal_srng,
Keyur Parekhfad6d082017-05-07 08:54:47 -07009501 RXDMA_MONITOR_STATUS,
9502 RX_BUFFER_SIZE_PKTLOG_LITE,
9503 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009504 }
9505
9506 if (soc->reap_timer_init)
9507 qdf_timer_mod(&soc->mon_reap_timer,
9508 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07009509 }
9510 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009511
Keyur Parekhdb0fa142017-07-13 19:40:22 -07009512 case WDI_EVENT_LITE_T2H:
9513 if (pdev->monitor_vdev) {
9514 /* Nothing needs to be done if monitor mode is
9515 * enabled
9516 */
9517 return 0;
9518 }
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -08009519
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009520 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009521 int mac_for_pdev = dp_get_mac_id_for_pdev(
9522 mac_id, pdev->pdev_id);
9523
Soumya Bhat0d6245c2018-02-08 21:02:57 +05309524 pdev->pktlog_ppdu_stats = true;
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -08009525 dp_h2t_cfg_stats_msg_send(pdev,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009526 DP_PPDU_TXLITE_STATS_BITMASK_CFG,
9527 mac_for_pdev);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009528 }
Keyur Parekhdb0fa142017-07-13 19:40:22 -07009529 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009530
Keyur Parekhfad6d082017-05-07 08:54:47 -07009531 default:
9532 /* Nothing needs to be done for other pktlog types */
9533 break;
9534 }
9535 } else {
9536 switch (event) {
9537 case WDI_EVENT_RX_DESC:
9538 case WDI_EVENT_LITE_RX:
9539 if (pdev->monitor_vdev) {
9540 /* Nothing needs to be done if monitor mode is
9541 * enabled
9542 */
9543 return 0;
9544 }
9545 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
9546 pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009547
9548 for (mac_id = 0; mac_id < max_mac_rings;
9549 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009550 int mac_for_pdev =
9551 dp_get_mac_id_for_pdev(mac_id,
9552 pdev->pdev_id);
9553
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009554 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009555 mac_for_pdev,
9556 pdev->rxdma_mon_status_ring[mac_id]
9557 .hal_srng,
9558 RXDMA_MONITOR_STATUS,
9559 RX_BUFFER_SIZE,
9560 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009561 }
9562
9563 if (soc->reap_timer_init)
9564 qdf_timer_stop(&soc->mon_reap_timer);
Keyur Parekhfad6d082017-05-07 08:54:47 -07009565 }
9566 break;
Keyur Parekhdb0fa142017-07-13 19:40:22 -07009567 case WDI_EVENT_LITE_T2H:
9568 if (pdev->monitor_vdev) {
9569 /* Nothing needs to be done if monitor mode is
9570 * enabled
9571 */
9572 return 0;
9573 }
9574 /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
9575 * passing value 0. Once these macros will define in htt
9576 * header file will use proper macros
9577 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009578 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009579 int mac_for_pdev =
9580 dp_get_mac_id_for_pdev(mac_id,
9581 pdev->pdev_id);
9582
Soumya Bhat0d6245c2018-02-08 21:02:57 +05309583 pdev->pktlog_ppdu_stats = false;
9584 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
9585 dp_h2t_cfg_stats_msg_send(pdev, 0,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009586 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05309587 } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
9588 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009589 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05309590 } else if (pdev->enhanced_stats_en) {
9591 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08009592 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05309593 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07009594 }
9595
Keyur Parekhdb0fa142017-07-13 19:40:22 -07009596 break;
Keyur Parekhfad6d082017-05-07 08:54:47 -07009597 default:
9598 /* Nothing needs to be done for other pktlog types */
9599 break;
9600 }
9601 }
9602 return 0;
9603}
9604#endif
Varsha Mishraa331e6e2019-03-11 12:16:14 +05309605
9606/**
9607 * dp_bucket_index() - Return index from array
9608 *
9609 * @delay: delay measured
9610 * @array: array used to index corresponding delay
9611 *
9612 * Return: index
9613 */
9614static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
9615{
Varsha Mishra3e9d6472019-03-14 17:56:58 +05309616 uint8_t i = CDP_DELAY_BUCKET_0;
Varsha Mishraa331e6e2019-03-11 12:16:14 +05309617
9618 for (; i < CDP_DELAY_BUCKET_MAX; i++) {
Varsha Mishra3e9d6472019-03-14 17:56:58 +05309619 if (delay >= array[i] && delay <= array[i + 1])
Varsha Mishraa331e6e2019-03-11 12:16:14 +05309620 return i;
9621 }
9622
9623 return (CDP_DELAY_BUCKET_MAX - 1);
9624}
9625
9626/**
9627 * dp_fill_delay_buckets() - Fill delay statistics bucket for each
9628 * type of delay
9629 *
9630 * @pdev: pdev handle
9631 * @delay: delay in ms
9632 * @t: tid value
9633 * @mode: type of tx delay mode
9634 * Return: pointer to cdp_delay_stats structure
9635 */
9636static struct cdp_delay_stats *
9637dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
9638 uint8_t tid, uint8_t mode)
9639{
9640 uint8_t delay_index = 0;
9641 struct cdp_tid_tx_stats *tstats =
9642 &pdev->stats.tid_stats.tid_tx_stats[tid];
9643 struct cdp_tid_rx_stats *rstats =
9644 &pdev->stats.tid_stats.tid_rx_stats[tid];
9645 /*
9646 * cdp_fw_to_hw_delay_range
9647 * Fw to hw delay ranges in milliseconds
9648 */
9649 uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
Varsha Mishra3e9d6472019-03-14 17:56:58 +05309650 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
Varsha Mishraa331e6e2019-03-11 12:16:14 +05309651
9652 /*
9653 * cdp_sw_enq_delay_range
9654 * Software enqueue delay ranges in milliseconds
9655 */
9656 uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
Varsha Mishra3e9d6472019-03-14 17:56:58 +05309657 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
Varsha Mishraa331e6e2019-03-11 12:16:14 +05309658
9659 /*
9660 * cdp_intfrm_delay_range
9661 * Interframe delay ranges in milliseconds
9662 */
9663 uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
Varsha Mishra3e9d6472019-03-14 17:56:58 +05309664 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
Varsha Mishraa331e6e2019-03-11 12:16:14 +05309665
9666 /*
9667 * Update delay stats in proper bucket
9668 */
9669 switch (mode) {
9670 /* Software Enqueue delay ranges */
9671 case CDP_DELAY_STATS_SW_ENQ:
9672
9673 delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
9674 tstats->swq_delay.delay_bucket[delay_index]++;
9675 return &tstats->swq_delay;
9676
9677 /* Tx Completion delay ranges */
9678 case CDP_DELAY_STATS_FW_HW_TRANSMIT:
9679
9680 delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
9681 tstats->hwtx_delay.delay_bucket[delay_index]++;
9682 return &tstats->hwtx_delay;
9683
9684 /* Interframe tx delay ranges */
9685 case CDP_DELAY_STATS_TX_INTERFRAME:
9686
9687 delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
9688 tstats->intfrm_delay.delay_bucket[delay_index]++;
9689 return &tstats->intfrm_delay;
9690
9691 /* Interframe rx delay ranges */
9692 case CDP_DELAY_STATS_RX_INTERFRAME:
9693
9694 delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
9695 rstats->intfrm_delay.delay_bucket[delay_index]++;
9696 return &rstats->intfrm_delay;
9697
9698 /* Ring reap to indication to network stack */
9699 case CDP_DELAY_STATS_REAP_STACK:
9700
9701 delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
9702 rstats->to_stack_delay.delay_bucket[delay_index]++;
Varsha Mishra3e9d6472019-03-14 17:56:58 +05309703 return &rstats->to_stack_delay;
Varsha Mishraa331e6e2019-03-11 12:16:14 +05309704 default:
9705 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
9706 "%s Incorrect delay mode: %d", __func__, mode);
9707 }
9708
9709 return NULL;
9710}
9711
9712/**
9713 * dp_update_delay_stats() - Update delay statistics in structure
9714 * and fill min, max and avg delay
9715 *
9716 * @pdev: pdev handle
9717 * @delay: delay in ms
9718 * @tid: tid value
9719 * @mode: type of tx delay mode
9720 * Return: none
9721 */
9722void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
9723 uint8_t tid, uint8_t mode)
9724{
9725 struct cdp_delay_stats *dstats = NULL;
9726
9727 /*
9728 * Delay ranges are different for different delay modes
9729 * Get the correct index to update delay bucket
9730 */
9731 dstats = dp_fill_delay_buckets(pdev, delay, tid, mode);
9732 if (qdf_unlikely(!dstats))
9733 return;
9734
9735 if (delay != 0) {
9736 /*
9737 * Compute minimum,average and maximum
9738 * delay
9739 */
9740 if (delay < dstats->min_delay)
9741 dstats->min_delay = delay;
9742
9743 if (delay > dstats->max_delay)
9744 dstats->max_delay = delay;
9745
9746 /*
9747 * Average over delay measured till now
9748 */
9749 if (!dstats->avg_delay)
9750 dstats->avg_delay = delay;
9751 else
9752 dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
9753 }
9754}