blob: 8aedeaa4a9c2d413439b166a19800acb8a7bceb4 [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Vivek Natarajan95f004f2019-01-10 22:15:46 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Dhanashri Atre14049172016-11-11 18:32:36 -080021#include <qdf_net_types.h>
Dhanashri Atre0da31222017-03-23 12:30:58 -070022#include <qdf_lro.h>
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +053023#include <qdf_module.h>
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053024#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070025#include <hal_api.h>
26#include <hif.h>
27#include <htt.h>
28#include <wdi_event.h>
29#include <queue.h>
30#include "dp_htt.h"
31#include "dp_types.h"
32#include "dp_internal.h"
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +053033#include "dp_tx.h"
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070034#include "dp_tx_desc.h"
Leo Chang5ea93a42016-11-03 12:39:49 -070035#include "dp_rx.h"
Kai Chen52ef33f2019-03-05 18:33:40 -080036#include "dp_rx_mon.h"
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +053037#ifdef DP_RATETABLE_SUPPORT
38#include "dp_ratetable.h"
39#endif
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080040#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080041#include <wlan_cfg.h>
Ishank Jainbc2d91f2017-01-03 18:14:54 +053042#include "cdp_txrx_cmn_struct.h"
Prathyusha Guduri184b6402018-02-04 23:01:49 +053043#include "cdp_txrx_stats_struct.h"
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -070044#include "cdp_txrx_cmn_reg.h"
Dhanashri Atre14049172016-11-11 18:32:36 -080045#include <qdf_util.h>
Ishank Jain1e7401c2017-02-17 15:38:39 +053046#include "dp_peer.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080047#include "dp_rx_mon.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053048#include "htt_stats.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070049#include "qdf_mem.h" /* qdf_mem_malloc,free */
Vivek126db5d2018-07-25 22:05:04 +053050#include "cfg_ucfg_api.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070051#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070052#include "cdp_txrx_flow_ctrl_v2.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070053#else
54static inline void
55cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
56{
57 return;
58}
59#endif
Yun Parkfde6b9e2017-06-26 17:13:11 -070060#include "dp_ipa.h"
Ruchi, Agrawal234753c2018-06-28 14:53:37 +053061#include "dp_cal_client_api.h"
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070062#ifdef CONFIG_MCL
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -070063extern int con_mode_monitor;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070064#ifndef REMOVE_PKT_LOG
65#include <pktlog_ac_api.h>
66#include <pktlog_ac.h>
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070067#endif
68#endif
Kai Chen52ef33f2019-03-05 18:33:40 -080069
70#ifdef WLAN_RX_PKT_CAPTURE_ENH
71#include "dp_rx_mon_feature.h"
72#else
73/*
74 * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
75 * @pdev_handle: DP_PDEV handle
76 * @val: user provided value
77 *
78 * Return: QDF_STATUS
79 */
80static QDF_STATUS
81dp_config_enh_rx_capture(struct cdp_pdev *pdev_handle, int val)
82{
83 return QDF_STATUS_E_INVAL;
84}
85#endif
86
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053087void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
88static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
89static struct dp_soc *
90dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
91 struct ol_if_ops *ol_ops, uint16_t device_id);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070092static void dp_pktlogmod_exit(struct dp_pdev *handle);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053093static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +053094 uint8_t *peer_mac_addr,
95 struct cdp_ctrl_objmgr_peer *ctrl_peer);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053096static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +053097static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
98static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
Krunal Soni03ba0f52019-02-12 11:44:46 -080099#ifdef ENABLE_VERBOSE_DEBUG
100bool is_dp_verbose_debug_enabled;
101#endif
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -0700102
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -0700103#define DP_INTR_POLL_TIMER_MS 10
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530104/* Generic AST entry aging timer value */
105#define DP_AST_AGING_TIMER_DEFAULT_MS 1000
Venkata Sharath Chandra Manchala69a0ed32018-12-12 14:22:11 -0800106
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +0530107/* WDS AST entry aging timer value */
108#define DP_WDS_AST_AGING_TIMER_DEFAULT_MS 120000
109#define DP_WDS_AST_AGING_TIMER_CNT \
110((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
Ishank Jainbc2d91f2017-01-03 18:14:54 +0530111#define DP_MCS_LENGTH (6*MAX_MCS)
112#define DP_NSS_LENGTH (6*SS_COUNT)
nobelj4e9d51f2018-08-07 19:36:47 -0700113#define DP_MU_GROUP_SHOW 16
114#define DP_MU_GROUP_LENGTH (6 * DP_MU_GROUP_SHOW)
Venkata Sharath Chandra Manchala69a0ed32018-12-12 14:22:11 -0800115
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530116#define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
Venkata Sharath Chandra Manchalaf167af12018-10-09 20:23:02 -0700117#define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS)
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530118#define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
119#define DP_MAX_MCS_STRING_LEN 30
Ishank Jain6290a3c2017-03-21 10:49:39 +0530120#define DP_CURR_FW_STATS_AVAIL 19
121#define DP_HTT_DBG_EXT_STATS_MAX 256
Prathyusha Guduri43bb0562018-02-12 18:30:54 +0530122#define DP_MAX_SLEEP_TIME 100
Krunal Sonid9dea642018-12-18 00:25:03 -0800123#ifndef QCA_WIFI_3_0_EMU
124#define SUSPEND_DRAIN_WAIT 500
125#else
126#define SUSPEND_DRAIN_WAIT 3000
127#endif
Ishank Jain949674c2017-02-27 17:09:29 +0530128
Yun Parkfde6b9e2017-06-26 17:13:11 -0700129#ifdef IPA_OFFLOAD
130/* Exclude IPA rings from the interrupt context */
Yun Park601d0d82017-08-28 21:49:31 -0700131#define TX_RING_MASK_VAL 0xb
Yun Parkfde6b9e2017-06-26 17:13:11 -0700132#define RX_RING_MASK_VAL 0x7
133#else
134#define TX_RING_MASK_VAL 0xF
135#define RX_RING_MASK_VAL 0xF
136#endif
Venkateswara Swamy Bandarued15e74a2017-08-18 19:13:10 +0530137
sumedh baikady72b1c712017-08-24 12:11:46 -0700138#define STR_MAXLEN 64
Soumya Bhat89647ef2017-11-16 17:23:48 +0530139
Soumya Bhat0d6245c2018-02-08 21:02:57 +0530140#define DP_PPDU_STATS_CFG_ALL 0xFFFF
141
142/* PPDU stats mask sent to FW to enable enhanced stats */
143#define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
144/* PPDU stats mask sent to FW to support debug sniffer feature */
145#define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
Vinay Adella873dc402018-05-28 12:06:34 +0530146/* PPDU stats mask sent to FW to support BPR feature*/
147#define DP_PPDU_STATS_CFG_BPR 0x2000
148/* PPDU stats mask sent to FW to support BPR and enhanced stats feature */
149#define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \
150 DP_PPDU_STATS_CFG_ENH_STATS)
151/* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */
152#define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \
153 DP_PPDU_TXLITE_STATS_BITMASK_CFG)
154
Vivek126db5d2018-07-25 22:05:04 +0530155#define RNG_ERR "SRNG setup failed for"
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +0530156
157/* Threshold for peer's cached buf queue beyond which frames are dropped */
158#define DP_RX_CACHED_BUFQ_THRESH 64
159
Ishank Jain949674c2017-02-27 17:09:29 +0530160/**
161 * default_dscp_tid_map - Default DSCP-TID mapping
162 *
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530163 * DSCP TID
164 * 000000 0
165 * 001000 1
166 * 010000 2
167 * 011000 3
168 * 100000 4
169 * 101000 5
170 * 110000 6
171 * 111000 7
Ishank Jain949674c2017-02-27 17:09:29 +0530172 */
173static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
174 0, 0, 0, 0, 0, 0, 0, 0,
175 1, 1, 1, 1, 1, 1, 1, 1,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530176 2, 2, 2, 2, 2, 2, 2, 2,
177 3, 3, 3, 3, 3, 3, 3, 3,
178 4, 4, 4, 4, 4, 4, 4, 4,
Ishank Jain949674c2017-02-27 17:09:29 +0530179 5, 5, 5, 5, 5, 5, 5, 5,
180 6, 6, 6, 6, 6, 6, 6, 6,
Pamidipati, Vijayef2cbc62017-09-27 23:09:06 +0530181 7, 7, 7, 7, 7, 7, 7, 7,
Ishank Jain949674c2017-02-27 17:09:29 +0530182};
183
Debasis Dasc39a68d2019-01-28 17:02:06 +0530184/**
185 * default_pcp_tid_map - Default PCP-TID mapping
186 *
187 * PCP TID
188 * 000 0
189 * 001 1
190 * 010 2
191 * 011 3
192 * 100 4
193 * 101 5
194 * 110 6
195 * 111 7
196 */
197static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
198 0, 1, 2, 3, 4, 5, 6, 7,
199};
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530200/*
201 * struct dp_rate_debug
202 *
203 * @mcs_type: print string for a given mcs
204 * @valid: valid mcs rate?
205 */
206struct dp_rate_debug {
207 char mcs_type[DP_MAX_MCS_STRING_LEN];
208 uint8_t valid;
209};
210
211#define MCS_VALID 1
212#define MCS_INVALID 0
213
214static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
Anish Nataraj072d8972018-01-09 18:23:33 +0530215
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530216 {
217 {"OFDM 48 Mbps", MCS_VALID},
218 {"OFDM 24 Mbps", MCS_VALID},
219 {"OFDM 12 Mbps", MCS_VALID},
220 {"OFDM 6 Mbps ", MCS_VALID},
221 {"OFDM 54 Mbps", MCS_VALID},
222 {"OFDM 36 Mbps", MCS_VALID},
223 {"OFDM 18 Mbps", MCS_VALID},
224 {"OFDM 9 Mbps ", MCS_VALID},
225 {"INVALID ", MCS_INVALID},
226 {"INVALID ", MCS_INVALID},
227 {"INVALID ", MCS_INVALID},
228 {"INVALID ", MCS_INVALID},
229 {"INVALID ", MCS_VALID},
230 },
231 {
Anish Nataraj072d8972018-01-09 18:23:33 +0530232 {"CCK 11 Mbps Long ", MCS_VALID},
233 {"CCK 5.5 Mbps Long ", MCS_VALID},
234 {"CCK 2 Mbps Long ", MCS_VALID},
235 {"CCK 1 Mbps Long ", MCS_VALID},
236 {"CCK 11 Mbps Short ", MCS_VALID},
237 {"CCK 5.5 Mbps Short", MCS_VALID},
238 {"CCK 2 Mbps Short ", MCS_VALID},
239 {"INVALID ", MCS_INVALID},
240 {"INVALID ", MCS_INVALID},
241 {"INVALID ", MCS_INVALID},
242 {"INVALID ", MCS_INVALID},
243 {"INVALID ", MCS_INVALID},
244 {"INVALID ", MCS_VALID},
245 },
246 {
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530247 {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
248 {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
249 {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
250 {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
251 {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
252 {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
253 {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
254 {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
255 {"INVALID ", MCS_INVALID},
256 {"INVALID ", MCS_INVALID},
257 {"INVALID ", MCS_INVALID},
258 {"INVALID ", MCS_INVALID},
259 {"INVALID ", MCS_VALID},
260 },
261 {
262 {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
263 {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
264 {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
265 {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
266 {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
267 {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
268 {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
269 {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
270 {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
271 {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
272 {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530273 {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530274 {"INVALID ", MCS_VALID},
275 },
276 {
277 {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
278 {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
279 {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
280 {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
281 {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
282 {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
283 {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
284 {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
285 {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
286 {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
287 {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530288 {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530289 {"INVALID ", MCS_VALID},
290 }
291};
292
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700293/**
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530294 * dp_cpu_ring_map_type - dp tx cpu ring map
295 * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
296 * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
297 * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
298 * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
299 * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
300 * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530301 */
302enum dp_cpu_ring_map_types {
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530303 DP_NSS_DEFAULT_MAP,
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530304 DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
305 DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530306 DP_NSS_DBDC_OFFLOADED_MAP,
307 DP_NSS_DBTC_OFFLOADED_MAP,
308 DP_NSS_CPU_RING_MAP_MAX
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530309};
310
311/**
312 * @brief Cpu to tx ring map
313 */
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530314#ifdef CONFIG_WIN
315static uint8_t
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530316dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530317 {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
318 {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
319 {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530320 {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
321 {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530322};
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530323#else
324static uint8_t
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530325dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530326 {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
327 {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
328 {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530329 {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
330 {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}
Pamidipati, Vijaya59b5602018-11-08 10:09:03 +0530331};
332#endif
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530333
334/**
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800335 * @brief Select the type of statistics
336 */
337enum dp_stats_type {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530338 STATS_FW = 0,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800339 STATS_HOST = 1,
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530340 STATS_TYPE_MAX = 2,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800341};
342
343/**
344 * @brief General Firmware statistics options
345 *
346 */
347enum dp_fw_stats {
348 TXRX_FW_STATS_INVALID = -1,
349};
350
351/**
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530352 * dp_stats_mapping_table - Firmware and Host statistics
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800353 * currently supported
354 */
355const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
Ishank Jain6290a3c2017-03-21 10:49:39 +0530356 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
357 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
358 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
359 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
360 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
361 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
362 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
363 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
364 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
365 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
366 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800367 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530368 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
369 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
370 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
371 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
372 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
373 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
374 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
375 /* Last ENUM for HTT FW STATS */
376 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800377 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530378 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
379 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
380 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800381 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530382 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
sumedh baikady72b1c712017-08-24 12:11:46 -0700383 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
Kai Chen783e0382018-01-25 16:29:08 -0800384 {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -0700385 {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
Venkata Sharath Chandra Manchalaf167af12018-10-09 20:23:02 -0700386 {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
387 {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800388};
389
Mohit Khannadba82f22018-07-12 10:59:17 -0700390/* MCL specific functions */
391#ifdef CONFIG_MCL
392/**
393 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
394 * @soc: pointer to dp_soc handle
395 * @intr_ctx_num: interrupt context number for which mon mask is needed
396 *
397 * For MCL, monitor mode rings are being processed in timer contexts (polled).
398 * This function is returning 0, since in interrupt mode(softirq based RX),
399 * we donot want to process monitor mode rings in a softirq.
400 *
401 * So, in case packet log is enabled for SAP/STA/P2P modes,
402 * regular interrupt processing will not process monitor mode rings. It would be
403 * done in a separate timer context.
404 *
405 * Return: 0
406 */
407static inline
408uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
409{
410 return 0;
411}
412
413/*
414 * dp_service_mon_rings()- timer to reap monitor rings
415 * reqd as we are not getting ppdu end interrupts
416 * @arg: SoC Handle
417 *
418 * Return:
419 *
420 */
421static void dp_service_mon_rings(void *arg)
422{
423 struct dp_soc *soc = (struct dp_soc *)arg;
424 int ring = 0, work_done, mac_id;
425 struct dp_pdev *pdev = NULL;
426
427 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
428 pdev = soc->pdev_list[ring];
429 if (!pdev)
430 continue;
431 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
432 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
433 pdev->pdev_id);
434 work_done = dp_mon_process(soc, mac_for_pdev,
435 QCA_NAPI_BUDGET);
436
437 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
438 FL("Reaped %d descs from Monitor rings"),
439 work_done);
440 }
441 }
442
443 qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
444}
445
446#ifndef REMOVE_PKT_LOG
447/**
448 * dp_pkt_log_init() - API to initialize packet log
449 * @ppdev: physical device handle
450 * @scn: HIF context
451 *
452 * Return: none
453 */
454void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
455{
456 struct dp_pdev *handle = (struct dp_pdev *)ppdev;
457
458 if (handle->pkt_log_init) {
459 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
460 "%s: Packet log not initialized", __func__);
461 return;
462 }
463
464 pktlog_sethandle(&handle->pl_dev, scn);
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800465 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
Mohit Khannadba82f22018-07-12 10:59:17 -0700466
467 if (pktlogmod_init(scn)) {
468 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
469 "%s: pktlogmod_init failed", __func__);
470 handle->pkt_log_init = false;
471 } else {
472 handle->pkt_log_init = true;
473 }
474}
475
476/**
477 * dp_pkt_log_con_service() - connect packet log service
478 * @ppdev: physical device handle
479 * @scn: device context
480 *
481 * Return: none
482 */
483static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
484{
485 struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
486
487 dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
488 pktlog_htc_attach();
489}
490
491/**
Mohit Khanna16816ae2018-10-30 14:12:03 -0700492 * dp_get_num_rx_contexts() - get number of RX contexts
493 * @soc_hdl: cdp opaque soc handle
494 *
495 * Return: number of RX contexts
496 */
497static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
498{
499 int i;
500 int num_rx_contexts = 0;
501
502 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
503
504 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
505 if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
506 num_rx_contexts++;
507
508 return num_rx_contexts;
509}
510
511/**
Mohit Khannadba82f22018-07-12 10:59:17 -0700512 * dp_pktlogmod_exit() - API to cleanup pktlog info
513 * @handle: Pdev handle
514 *
515 * Return: none
516 */
517static void dp_pktlogmod_exit(struct dp_pdev *handle)
518{
519 void *scn = (void *)handle->soc->hif_handle;
520
521 if (!scn) {
522 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
523 "%s: Invalid hif(scn) handle", __func__);
524 return;
525 }
526
527 pktlogmod_exit(scn);
528 handle->pkt_log_init = false;
529}
530#endif
531#else
532static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
533
534/**
535 * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
536 * @soc: pointer to dp_soc handle
537 * @intr_ctx_num: interrupt context number for which mon mask is needed
538 *
539 * Return: mon mask value
540 */
541static inline
542uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
543{
544 return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
545}
546#endif
547
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700548/**
549 * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
550 * @cdp_opaque_vdev: pointer to cdp_vdev
551 *
552 * Return: pointer to dp_vdev
553 */
554static
Aniruddha Paulc34164e2018-09-14 14:25:30 +0530555struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700556{
557 return (struct dp_vdev *)cdp_opaque_vdev;
558}
559
560
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530561static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
562 struct cdp_peer *peer_hdl,
563 uint8_t *mac_addr,
564 enum cdp_txrx_ast_entry_type type,
565 uint32_t flags)
566{
567
568 return dp_peer_add_ast((struct dp_soc *)soc_hdl,
569 (struct dp_peer *)peer_hdl,
570 mac_addr,
571 type,
572 flags);
573}
574
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530575static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
576 struct cdp_peer *peer_hdl,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530577 uint8_t *wds_macaddr,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530578 uint32_t flags)
579{
phadiman0381f562018-06-29 15:40:52 +0530580 int status = -1;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530581 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530582 struct dp_ast_entry *ast_entry = NULL;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530583 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530584
585 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530586 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
587 peer->vdev->pdev->pdev_id);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530588
phadiman0381f562018-06-29 15:40:52 +0530589 if (ast_entry) {
590 status = dp_peer_update_ast(soc,
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530591 peer,
592 ast_entry, flags);
phadiman0381f562018-06-29 15:40:52 +0530593 }
594
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530595 qdf_spin_unlock_bh(&soc->ast_lock);
596
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530597 return status;
598}
599
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530600/*
601 * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530602 * @soc_handle: Datapath SOC handle
603 * @wds_macaddr: WDS entry MAC Address
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530604 * Return: None
605 */
606static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530607 uint8_t *wds_macaddr,
608 uint8_t *peer_mac_addr,
609 void *vdev_handle)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530610{
611 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
612 struct dp_ast_entry *ast_entry = NULL;
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530613 struct dp_ast_entry *tmp_ast_entry;
614 struct dp_peer *peer;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530615 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530616 struct dp_pdev *pdev;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530617
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530618 if (!vdev)
619 return;
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530620
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530621 pdev = vdev->pdev;
622
623 if (peer_mac_addr) {
624 peer = dp_peer_find_hash_find(soc, peer_mac_addr,
625 0, vdev->vdev_id);
626 if (!peer)
627 return;
628 qdf_spin_lock_bh(&soc->ast_lock);
629 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
630 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
631 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
632 dp_peer_del_ast(soc, ast_entry);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530633 }
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530634 qdf_spin_unlock_bh(&soc->ast_lock);
635 dp_peer_unref_delete(peer);
phadiman0381f562018-06-29 15:40:52 +0530636
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530637 } else if (wds_macaddr) {
638 qdf_spin_lock_bh(&soc->ast_lock);
639 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
640 pdev->pdev_id);
641
642 if (ast_entry) {
643 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
644 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
645 dp_peer_del_ast(soc, ast_entry);
646 }
647 qdf_spin_unlock_bh(&soc->ast_lock);
648 }
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530649}
650
651/*
652 * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530653 * @soc: Datapath SOC handle
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530654 *
655 * Return: None
656 */
Pamidipati, Vijay3756b762018-05-12 11:10:37 +0530657static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
Santosh Anbu76693bc2018-04-23 16:38:54 +0530658 void *vdev_hdl)
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530659{
660 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
661 struct dp_pdev *pdev;
662 struct dp_vdev *vdev;
663 struct dp_peer *peer;
664 struct dp_ast_entry *ase, *temp_ase;
665 int i;
666
667 qdf_spin_lock_bh(&soc->ast_lock);
668
669 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
670 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530671 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530672 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
673 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
674 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Chaithanya Garrepallia822b192018-08-07 20:41:41 +0530675 if ((ase->type ==
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530676 CDP_TXRX_AST_TYPE_WDS_HM) ||
677 (ase->type ==
678 CDP_TXRX_AST_TYPE_WDS_HM_SEC))
679 dp_peer_del_ast(soc, ase);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530680 }
681 }
682 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530683 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530684 }
685
686 qdf_spin_unlock_bh(&soc->ast_lock);
687}
688
689/*
690 * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
691 * @soc: Datapath SOC handle
692 *
693 * Return: None
694 */
695static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
696{
697 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
698 struct dp_pdev *pdev;
699 struct dp_vdev *vdev;
700 struct dp_peer *peer;
701 struct dp_ast_entry *ase, *temp_ase;
702 int i;
703
704 qdf_spin_lock_bh(&soc->ast_lock);
705
706 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
707 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530708 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530709 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
710 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
711 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Chaithanya Garrepallia822b192018-08-07 20:41:41 +0530712 if ((ase->type ==
Radha krishna Simha Jiguru27340792018-09-06 15:08:12 +0530713 CDP_TXRX_AST_TYPE_STATIC) ||
714 (ase->type ==
715 CDP_TXRX_AST_TYPE_SELF) ||
716 (ase->type ==
717 CDP_TXRX_AST_TYPE_STA_BSS))
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530718 continue;
719 dp_peer_del_ast(soc, ase);
720 }
721 }
722 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +0530723 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530724 }
725
726 qdf_spin_unlock_bh(&soc->ast_lock);
727}
728
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530729/**
730 * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
731 * and return ast entry information
732 * of first ast entry found in the
733 * table with given mac address
734 *
735 * @soc : data path soc handle
736 * @ast_mac_addr : AST entry mac address
737 * @ast_entry_info : ast entry information
738 *
739 * return : true if ast entry found with ast_mac_addr
740 * false if ast entry not found
741 */
742static bool dp_peer_get_ast_info_by_soc_wifi3
743 (struct cdp_soc_t *soc_hdl,
744 uint8_t *ast_mac_addr,
745 struct cdp_ast_entry_info *ast_entry_info)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530746{
747 struct dp_ast_entry *ast_entry;
748 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530749
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530750 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530751
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530752 ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530753 if (!ast_entry || !ast_entry->peer) {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530754 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530755 return false;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530756 }
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530757 if (ast_entry->delete_in_progress && !ast_entry->callback) {
758 qdf_spin_unlock_bh(&soc->ast_lock);
759 return false;
760 }
761 ast_entry_info->type = ast_entry->type;
762 ast_entry_info->pdev_id = ast_entry->pdev_id;
763 ast_entry_info->vdev_id = ast_entry->vdev_id;
764 ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
765 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
766 &ast_entry->peer->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800767 QDF_MAC_ADDR_SIZE);
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530768 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530769 return true;
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530770}
771
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530772/**
773 * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
774 * and return ast entry information
775 * if mac address and pdev_id matches
776 *
777 * @soc : data path soc handle
778 * @ast_mac_addr : AST entry mac address
779 * @pdev_id : pdev_id
780 * @ast_entry_info : ast entry information
781 *
782 * return : true if ast entry found with ast_mac_addr
783 * false if ast entry not found
784 */
785static bool dp_peer_get_ast_info_by_pdevid_wifi3
786 (struct cdp_soc_t *soc_hdl,
787 uint8_t *ast_mac_addr,
788 uint8_t pdev_id,
789 struct cdp_ast_entry_info *ast_entry_info)
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530790{
791 struct dp_ast_entry *ast_entry;
792 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
793
794 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530795
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530796 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530797
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530798 if (!ast_entry || !ast_entry->peer) {
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530799 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530800 return false;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530801 }
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530802 if (ast_entry->delete_in_progress && !ast_entry->callback) {
803 qdf_spin_unlock_bh(&soc->ast_lock);
804 return false;
805 }
806 ast_entry_info->type = ast_entry->type;
807 ast_entry_info->pdev_id = ast_entry->pdev_id;
808 ast_entry_info->vdev_id = ast_entry->vdev_id;
809 ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
810 qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
811 &ast_entry->peer->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -0800812 QDF_MAC_ADDR_SIZE);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530813 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli4fd2fe42019-02-19 23:48:21 +0530814 return true;
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530815}
816
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530817/**
818 * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
819 * with given mac address
820 *
821 * @soc : data path soc handle
822 * @ast_mac_addr : AST entry mac address
823 * @callback : callback function to called on ast delete response from FW
824 * @cookie : argument to be passed to callback
825 *
826 * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
827 * is sent
828 * QDF_STATUS_E_INVAL false if ast entry not found
829 */
830static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
831 uint8_t *mac_addr,
832 txrx_ast_free_cb callback,
833 void *cookie)
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530834
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530835{
836 struct dp_soc *soc = (struct dp_soc *)soc_handle;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530837 struct dp_ast_entry *ast_entry;
838 txrx_ast_free_cb cb = NULL;
839 void *arg = NULL;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530840
841 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530842 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
843 if (!ast_entry) {
844 qdf_spin_unlock_bh(&soc->ast_lock);
845 return -QDF_STATUS_E_INVAL;
846 }
847
848 if (ast_entry->callback) {
849 cb = ast_entry->callback;
850 arg = ast_entry->cookie;
851 }
852
853 ast_entry->callback = callback;
854 ast_entry->cookie = cookie;
855
856 /*
857 * if delete_in_progress is set AST delete is sent to target
858 * and host is waiting for response should not send delete
859 * again
860 */
861 if (!ast_entry->delete_in_progress)
862 dp_peer_del_ast(soc, ast_entry);
863
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530864 qdf_spin_unlock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530865 if (cb) {
866 cb(soc->ctrl_psoc,
867 soc,
868 arg,
869 CDP_TXRX_AST_DELETE_IN_PROGRESS);
870 }
871 return QDF_STATUS_SUCCESS;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530872}
873
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530874/**
875 * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
876 * table if mac address and pdev_id matches
877 *
878 * @soc : data path soc handle
879 * @ast_mac_addr : AST entry mac address
880 * @pdev_id : pdev id
881 * @callback : callback function to called on ast delete response from FW
882 * @cookie : argument to be passed to callback
883 *
884 * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
885 * is sent
886 * QDF_STATUS_E_INVAL false if ast entry not found
887 */
888
889static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
890 uint8_t *mac_addr,
891 uint8_t pdev_id,
892 txrx_ast_free_cb callback,
893 void *cookie)
894
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530895{
896 struct dp_soc *soc = (struct dp_soc *)soc_handle;
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530897 struct dp_ast_entry *ast_entry;
898 txrx_ast_free_cb cb = NULL;
899 void *arg = NULL;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530900
901 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530902 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
903
904 if (!ast_entry) {
905 qdf_spin_unlock_bh(&soc->ast_lock);
906 return -QDF_STATUS_E_INVAL;
907 }
908
909 if (ast_entry->callback) {
910 cb = ast_entry->callback;
911 arg = ast_entry->cookie;
912 }
913
914 ast_entry->callback = callback;
915 ast_entry->cookie = cookie;
916
917 /*
918 * if delete_in_progress is set AST delete is sent to target
919 * and host is waiting for response should not sent delete
920 * again
921 */
922 if (!ast_entry->delete_in_progress)
923 dp_peer_del_ast(soc, ast_entry);
924
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530925 qdf_spin_unlock_bh(&soc->ast_lock);
926
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530927 if (cb) {
928 cb(soc->ctrl_psoc,
929 soc,
930 arg,
931 CDP_TXRX_AST_DELETE_IN_PROGRESS);
932 }
933 return QDF_STATUS_SUCCESS;
Kiran Venkatappaed35f442018-07-19 22:22:29 +0530934}
935
Houston Hoffman648a9182017-05-21 23:27:50 -0700936/**
937 * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
938 * @ring_num: ring num of the ring being queried
939 * @grp_mask: the grp_mask array for the ring type in question.
940 *
941 * The grp_mask array is indexed by group number and the bit fields correspond
942 * to ring numbers. We are finding which interrupt group a ring belongs to.
943 *
944 * Return: the index in the grp_mask array with the ring number.
945 * -QDF_STATUS_E_NOENT if no entry is found
946 */
947static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
948{
949 int ext_group_num;
950 int mask = 1 << ring_num;
951
952 for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
953 ext_group_num++) {
954 if (mask & grp_mask[ext_group_num])
955 return ext_group_num;
956 }
957
958 return -QDF_STATUS_E_NOENT;
959}
960
961static int dp_srng_calculate_msi_group(struct dp_soc *soc,
962 enum hal_ring_type ring_type,
963 int ring_num)
964{
965 int *grp_mask;
966
967 switch (ring_type) {
968 case WBM2SW_RELEASE:
969 /* dp_tx_comp_handler - soc->tx_comp_ring */
970 if (ring_num < 3)
971 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
972
973 /* dp_rx_wbm_err_process - soc->rx_rel_ring */
974 else if (ring_num == 3) {
975 /* sw treats this as a separate ring type */
976 grp_mask = &soc->wlan_cfg_ctx->
977 int_rx_wbm_rel_ring_mask[0];
978 ring_num = 0;
979 } else {
980 qdf_assert(0);
981 return -QDF_STATUS_E_NOENT;
982 }
983 break;
984
985 case REO_EXCEPTION:
986 /* dp_rx_err_process - &soc->reo_exception_ring */
987 grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
988 break;
989
990 case REO_DST:
991 /* dp_rx_process - soc->reo_dest_ring */
992 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
993 break;
994
995 case REO_STATUS:
996 /* dp_reo_status_ring_handler - soc->reo_status_ring */
997 grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
998 break;
999
1000 /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1001 case RXDMA_MONITOR_STATUS:
1002 /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1003 case RXDMA_MONITOR_DST:
1004 /* dp_mon_process */
1005 grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1006 break;
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001007 case RXDMA_DST:
1008 /* dp_rxdma_err_process */
1009 grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1010 break;
Houston Hoffman648a9182017-05-21 23:27:50 -07001011
Houston Hoffman648a9182017-05-21 23:27:50 -07001012 case RXDMA_BUF:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001013 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1014 break;
1015
1016 case RXDMA_MONITOR_BUF:
Houston Hoffman648a9182017-05-21 23:27:50 -07001017 /* TODO: support low_thresh interrupt */
1018 return -QDF_STATUS_E_NOENT;
1019 break;
1020
1021 case TCL_DATA:
1022 case TCL_CMD:
1023 case REO_CMD:
1024 case SW2WBM_RELEASE:
1025 case WBM_IDLE_LINK:
1026 /* normally empty SW_TO_HW rings */
1027 return -QDF_STATUS_E_NOENT;
1028 break;
1029
1030 case TCL_STATUS:
1031 case REO_REINJECT:
Houston Hoffman648a9182017-05-21 23:27:50 -07001032 /* misc unused rings */
1033 return -QDF_STATUS_E_NOENT;
1034 break;
1035
1036 case CE_SRC:
1037 case CE_DST:
1038 case CE_DST_STATUS:
1039 /* CE_rings - currently handled by hif */
1040 default:
1041 return -QDF_STATUS_E_NOENT;
1042 break;
1043 }
1044
1045 return dp_srng_find_ring_in_mask(ring_num, grp_mask);
1046}
1047
1048static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1049 *ring_params, int ring_type, int ring_num)
1050{
1051 int msi_group_number;
1052 int msi_data_count;
1053 int ret;
1054 uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1055
1056 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1057 &msi_data_count, &msi_data_start,
1058 &msi_irq_start);
1059
1060 if (ret)
1061 return;
1062
1063 msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
1064 ring_num);
1065 if (msi_group_number < 0) {
Houston Hoffman41b912c2017-08-30 14:27:51 -07001066 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Houston Hoffman648a9182017-05-21 23:27:50 -07001067 FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
1068 ring_type, ring_num);
1069 ring_params->msi_addr = 0;
1070 ring_params->msi_data = 0;
1071 return;
1072 }
1073
1074 if (msi_group_number > msi_data_count) {
1075 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1076 FL("2 msi_groups will share an msi; msi_group_num %d"),
1077 msi_group_number);
1078
1079 QDF_ASSERT(0);
1080 }
1081
1082 pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1083
1084 ring_params->msi_addr = addr_low;
1085 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1086 ring_params->msi_data = (msi_group_number % msi_data_count)
1087 + msi_data_start;
1088 ring_params->flags |= HAL_SRNG_MSI_INTR;
1089}
1090
1091/**
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301092 * dp_print_ast_stats() - Dump AST table contents
1093 * @soc: Datapath soc handle
1094 *
1095 * return void
1096 */
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05301097#ifdef FEATURE_AST
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301098void dp_print_ast_stats(struct dp_soc *soc)
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301099{
1100 uint8_t i;
1101 uint8_t num_entries = 0;
1102 struct dp_vdev *vdev;
1103 struct dp_pdev *pdev;
1104 struct dp_peer *peer;
1105 struct dp_ast_entry *ase, *tmp_ase;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05301106 char type[CDP_TXRX_AST_TYPE_MAX][10] = {
Tallapragada Kalyan5e3a39c2018-08-24 16:34:12 +05301107 "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1108 "DA", "HMWDS_SEC"};
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301109
1110 DP_PRINT_STATS("AST Stats:");
1111 DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
1112 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
1113 DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
1114 DP_PRINT_STATS("AST Table:");
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05301115
1116 qdf_spin_lock_bh(&soc->ast_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301117 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1118 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05301119 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301120 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1121 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1122 DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1123 DP_PRINT_STATS("%6d mac_addr = %pM"
1124 " peer_mac_addr = %pM"
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301125 " peer_id = %u"
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05301126 " type = %s"
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301127 " next_hop = %d"
1128 " is_active = %d"
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301129 " is_bss = %d"
1130 " ast_idx = %d"
Chaithanya Garrepalli1f64b242018-09-21 22:50:23 +05301131 " ast_hash = %d"
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301132 " delete_in_progress = %d"
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301133 " pdev_id = %d"
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301134 " vdev_id = %d",
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301135 ++num_entries,
1136 ase->mac_addr.raw,
1137 ase->peer->mac_addr.raw,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301138 ase->peer->peer_ids[0],
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05301139 type[ase->type],
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301140 ase->next_hop,
1141 ase->is_active,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301142 ase->is_bss,
1143 ase->ast_idx,
Chaithanya Garrepalli1f64b242018-09-21 22:50:23 +05301144 ase->ast_hash_value,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301145 ase->delete_in_progress,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301146 ase->pdev_id,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301147 ase->vdev_id);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301148 }
1149 }
1150 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05301151 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301152 }
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05301153 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301154}
1155#else
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301156void dp_print_ast_stats(struct dp_soc *soc)
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301157{
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05301158 DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301159 return;
1160}
1161#endif
1162
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05301163/**
1164 * dp_print_peer_table() - Dump all Peer stats
1165 * @vdev: Datapath Vdev handle
1166 *
1167 * return void
1168 */
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05301169static void dp_print_peer_table(struct dp_vdev *vdev)
1170{
1171 struct dp_peer *peer = NULL;
1172
1173 DP_PRINT_STATS("Dumping Peer Table Stats:");
1174 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1175 if (!peer) {
1176 DP_PRINT_STATS("Invalid Peer");
1177 return;
1178 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301179 DP_PRINT_STATS(" peer_mac_addr = %pM"
1180 " nawds_enabled = %d"
1181 " bss_peer = %d"
1182 " wapi = %d"
1183 " wds_enabled = %d"
1184 " delete in progress = %d"
1185 " peer id = %d",
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05301186 peer->mac_addr.raw,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301187 peer->nawds_enabled,
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05301188 peer->bss_peer,
1189 peer->wapi,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05301190 peer->wds_enabled,
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05301191 peer->delete_in_progress,
1192 peer->peer_ids[0]);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05301193 }
1194}
1195
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301196/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001197 * dp_setup_srng - Internal function to setup SRNG rings used by data path
1198 */
1199static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08001200 int ring_type, int ring_num, int mac_id, uint32_t num_entries)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001201{
1202 void *hal_soc = soc->hal_soc;
1203 uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1204 /* TODO: See if we should get align size from hal */
1205 uint32_t ring_base_align = 8;
1206 struct hal_srng_params ring_params;
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -08001207 uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001208
Houston Hoffman648a9182017-05-21 23:27:50 -07001209 /* TODO: Currently hal layer takes care of endianness related settings.
1210 * See if these settings need to passed from DP layer
1211 */
1212 ring_params.flags = 0;
1213
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -08001214 num_entries = (num_entries > max_entries) ? max_entries : num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001215 srng->hal_srng = NULL;
1216 srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001217 srng->num_entries = num_entries;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301218
phadimana1f79822019-02-15 15:02:37 +05301219 if (!dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301220 srng->base_vaddr_unaligned =
1221 qdf_mem_alloc_consistent(soc->osdev,
1222 soc->osdev->dev,
1223 srng->alloc_size,
1224 &srng->base_paddr_unaligned);
1225 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001226
1227 if (!srng->base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301228 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1229 FL("alloc failed - ring_type: %d, ring_num %d"),
1230 ring_type, ring_num);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001231 return QDF_STATUS_E_NOMEM;
1232 }
1233
1234 ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
1235 ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
1236 ring_params.ring_base_paddr = srng->base_paddr_unaligned +
1237 ((unsigned long)(ring_params.ring_base_vaddr) -
1238 (unsigned long)srng->base_vaddr_unaligned);
1239 ring_params.num_entries = num_entries;
1240
Krunal Sonic96a1162019-02-21 11:33:26 -08001241 dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1242 ring_type, ring_num,
1243 (void *)ring_params.ring_base_vaddr,
1244 (void *)ring_params.ring_base_paddr,
1245 ring_params.num_entries);
Mohit Khanna81179cb2018-08-16 20:50:43 -07001246
psimhac983d7e2017-07-26 15:20:07 -07001247 if (soc->intr_mode == DP_INTR_MSI) {
1248 dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
Krunal Sonic96a1162019-02-21 11:33:26 -08001249 dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1250 ring_type, ring_num);
psimhac983d7e2017-07-26 15:20:07 -07001251
1252 } else {
1253 ring_params.msi_data = 0;
1254 ring_params.msi_addr = 0;
Krunal Sonic96a1162019-02-21 11:33:26 -08001255 dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1256 ring_type, ring_num);
psimhac983d7e2017-07-26 15:20:07 -07001257 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001258
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +05301259 /*
1260 * Setup interrupt timer and batch counter thresholds for
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001261 * interrupt mitigation based on ring type
1262 */
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +05301263 if (ring_type == REO_DST) {
1264 ring_params.intr_timer_thres_us =
1265 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1266 ring_params.intr_batch_cntr_thres_entries =
1267 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1268 } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1269 ring_params.intr_timer_thres_us =
1270 wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1271 ring_params.intr_batch_cntr_thres_entries =
1272 wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1273 } else {
1274 ring_params.intr_timer_thres_us =
1275 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1276 ring_params.intr_batch_cntr_thres_entries =
Karunakar Dasineni25f1b042018-02-15 23:26:17 -08001277 wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +05301278 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001279
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001280 /* Enable low threshold interrupts for rx buffer rings (regular and
1281 * monitor buffer rings.
1282 * TODO: See if this is required for any other ring
1283 */
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001284 if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1285 (ring_type == RXDMA_MONITOR_STATUS)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001286 /* TODO: Setting low threshold to 1/8th of ring size
1287 * see if this needs to be configurable
1288 */
1289 ring_params.low_threshold = num_entries >> 3;
1290 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
Karunakar Dasinenibef3b1b2018-03-28 22:23:57 -07001291 ring_params.intr_timer_thres_us =
1292 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1293 ring_params.intr_batch_cntr_thres_entries = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001294 }
1295
1296 srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08001297 mac_id, &ring_params);
Manoj Ekbote376116e2017-12-19 10:44:41 -08001298
1299 if (!srng->hal_srng) {
1300 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1301 srng->alloc_size,
1302 srng->base_vaddr_unaligned,
1303 srng->base_paddr_unaligned, 0);
1304 }
1305
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001306 return 0;
1307}
1308
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301309/*
1310 * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1311 * @soc: DP SOC handle
1312 * @srng: source ring structure
1313 * @ring_type: type of ring
1314 * @ring_num: ring number
1315 *
1316 * Return: None
1317 */
1318static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1319 int ring_type, int ring_num)
1320{
Vinay Adellaa06e8c82018-12-14 20:31:49 +05301321 if (!srng->hal_srng) {
1322 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1323 FL("Ring type: %d, num:%d not setup"),
1324 ring_type, ring_num);
1325 return;
1326 }
1327
1328 hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1329 srng->hal_srng = NULL;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301330}
1331
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001332/**
1333 * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1334 * Any buffers allocated and attached to ring entries are expected to be freed
1335 * before calling this function.
1336 */
1337static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1338 int ring_type, int ring_num)
1339{
phadimana1f79822019-02-15 15:02:37 +05301340 if (!dp_is_soc_reinit(soc)) {
Vinay Adellaa06e8c82018-12-14 20:31:49 +05301341 if (!srng->hal_srng && (srng->alloc_size == 0)) {
1342 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1343 FL("Ring type: %d, num:%d not setup"),
1344 ring_type, ring_num);
1345 return;
1346 }
1347
1348 if (srng->hal_srng) {
1349 hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1350 srng->hal_srng = NULL;
1351 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001352 }
1353
Vinay Adellaa06e8c82018-12-14 20:31:49 +05301354 if (srng->alloc_size) {
1355 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1356 srng->alloc_size,
1357 srng->base_vaddr_unaligned,
1358 srng->base_paddr_unaligned, 0);
1359 srng->alloc_size = 0;
1360 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001361}
1362
1363/* TODO: Need this interface from HIF */
1364void *hif_get_hal_handle(void *hif_handle);
1365
1366/*
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301367 * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1368 * @dp_ctx: DP SOC handle
1369 * @budget: Number of frames/descriptors that can be processed in one shot
1370 *
1371 * Return: remaining budget/quota for the soc device
1372 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001373static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301374{
1375 struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1376 struct dp_soc *soc = int_ctx->soc;
1377 int ring = 0;
1378 uint32_t work_done = 0;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301379 int budget = dp_budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301380 uint8_t tx_mask = int_ctx->tx_ring_mask;
1381 uint8_t rx_mask = int_ctx->rx_ring_mask;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301382 uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1383 uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001384 uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301385 uint32_t remaining_quota = dp_budget;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001386 struct dp_pdev *pdev = NULL;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001387 int mac_id;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301388
1389 /* Process Tx completion interrupts first to return back buffers */
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301390 while (tx_mask) {
1391 if (tx_mask & 0x1) {
Houston Hoffmanae850c62017-08-11 16:47:50 -07001392 work_done = dp_tx_comp_handler(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301393 soc->tx_comp_ring[ring].hal_srng,
1394 remaining_quota);
1395
Krunal Sonic96a1162019-02-21 11:33:26 -08001396 dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1397 tx_mask, ring, budget, work_done);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301398
1399 budget -= work_done;
1400 if (budget <= 0)
1401 goto budget_done;
1402
1403 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301404 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301405 tx_mask = tx_mask >> 1;
1406 ring++;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301407 }
1408
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301409
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301410 /* Process REO Exception ring interrupt */
1411 if (rx_err_mask) {
1412 work_done = dp_rx_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301413 soc->reo_exception_ring.hal_srng,
1414 remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301415
Krunal Sonic96a1162019-02-21 11:33:26 -08001416 dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1417 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301418
1419 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301420 if (budget <= 0) {
1421 goto budget_done;
1422 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301423 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301424 }
1425
1426 /* Process Rx WBM release ring interrupt */
1427 if (rx_wbm_rel_mask) {
1428 work_done = dp_rx_wbm_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301429 soc->rx_rel_ring.hal_srng, remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301430
Krunal Sonic96a1162019-02-21 11:33:26 -08001431 dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1432 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301433
1434 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301435 if (budget <= 0) {
1436 goto budget_done;
1437 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301438 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301439 }
1440
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301441 /* Process Rx interrupts */
1442 if (rx_mask) {
1443 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1444 if (rx_mask & (1 << ring)) {
Houston Hoffmanae850c62017-08-11 16:47:50 -07001445 work_done = dp_rx_process(int_ctx,
Leo Chang5ea93a42016-11-03 12:39:49 -07001446 soc->reo_dest_ring[ring].hal_srng,
Mohit Khanna7ac554b2018-05-24 11:58:13 -07001447 ring,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301448 remaining_quota);
1449
Krunal Sonic96a1162019-02-21 11:33:26 -08001450 dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1451 rx_mask, ring,
1452 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301453
1454 budget -= work_done;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301455 if (budget <= 0)
1456 goto budget_done;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +05301457 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301458 }
1459 }
1460 }
1461
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -08001462 if (reo_status_mask)
1463 dp_reo_status_ring_handler(soc);
1464
Karunakar Dasineni10185472017-06-19 16:32:06 -07001465 /* Process LMAC interrupts */
Kai Chen6eca1a62017-01-12 10:17:53 -08001466 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001467 pdev = soc->pdev_list[ring];
Jeff Johnsona8edf332019-03-18 09:51:52 -07001468 if (!pdev)
Karunakar Dasineni10185472017-06-19 16:32:06 -07001469 continue;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001470 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1471 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1472 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001473 if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1474 work_done = dp_mon_process(soc, mac_for_pdev,
1475 remaining_quota);
1476 budget -= work_done;
1477 if (budget <= 0)
1478 goto budget_done;
1479 remaining_quota = budget;
1480 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001481
chenguocd0f3132018-02-28 15:53:50 -08001482 if (int_ctx->rxdma2host_ring_mask &
1483 (1 << mac_for_pdev)) {
1484 work_done = dp_rxdma_err_process(soc,
1485 mac_for_pdev,
1486 remaining_quota);
1487 budget -= work_done;
1488 if (budget <= 0)
1489 goto budget_done;
1490 remaining_quota = budget;
1491 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001492
chenguocd0f3132018-02-28 15:53:50 -08001493 if (int_ctx->host2rxdma_ring_mask &
1494 (1 << mac_for_pdev)) {
1495 union dp_rx_desc_list_elem_t *desc_list = NULL;
1496 union dp_rx_desc_list_elem_t *tail = NULL;
1497 struct dp_srng *rx_refill_buf_ring =
1498 &pdev->rx_refill_buf_ring;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001499
chenguocd0f3132018-02-28 15:53:50 -08001500 DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1501 1);
1502 dp_rx_buffers_replenish(soc, mac_for_pdev,
1503 rx_refill_buf_ring,
1504 &soc->rx_desc_buf[mac_for_pdev], 0,
Venkata Sharath Chandra Manchala16fcceb2018-01-03 11:27:15 -08001505 &desc_list, &tail);
chenguocd0f3132018-02-28 15:53:50 -08001506 }
Pramod Simhae382ff82017-06-05 18:09:26 -07001507 }
Kai Chen6eca1a62017-01-12 10:17:53 -08001508 }
1509
Dhanashri Atre0da31222017-03-23 12:30:58 -07001510 qdf_lro_flush(int_ctx->lro_ctx);
1511
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301512budget_done:
1513 return dp_budget - budget;
1514}
1515
1516/* dp_interrupt_timer()- timer poll for interrupts
1517 *
1518 * @arg: SoC Handle
1519 *
1520 * Return:
1521 *
1522 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001523static void dp_interrupt_timer(void *arg)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301524{
1525 struct dp_soc *soc = (struct dp_soc *) arg;
1526 int i;
1527
Ravi Joshi86e98262017-03-01 13:47:03 -08001528 if (qdf_atomic_read(&soc->cmn_init_done)) {
1529 for (i = 0;
1530 i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1531 dp_service_srngs(&soc->intr_ctx[i], 0xffff);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301532
Ravi Joshi86e98262017-03-01 13:47:03 -08001533 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1534 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301535}
1536
1537/*
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001538 * dp_soc_attach_poll() - Register handlers for DP interrupts
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301539 * @txrx_soc: DP SOC handle
1540 *
1541 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1542 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1543 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1544 *
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001545 * Return: 0 for success, nonzero for failure.
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301546 */
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301547static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301548{
1549 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1550 int i;
1551
psimhac983d7e2017-07-26 15:20:07 -07001552 soc->intr_mode = DP_INTR_POLL;
1553
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301554 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Houston Hoffman648a9182017-05-21 23:27:50 -07001555 soc->intr_ctx[i].dp_intr_id = i;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07001556 soc->intr_ctx[i].tx_ring_mask =
1557 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1558 soc->intr_ctx[i].rx_ring_mask =
1559 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1560 soc->intr_ctx[i].rx_mon_ring_mask =
1561 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1562 soc->intr_ctx[i].rx_err_ring_mask =
1563 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1564 soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1565 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1566 soc->intr_ctx[i].reo_status_ring_mask =
1567 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1568 soc->intr_ctx[i].rxdma2host_ring_mask =
1569 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301570 soc->intr_ctx[i].soc = soc;
Dhanashri Atre0da31222017-03-23 12:30:58 -07001571 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301572 }
1573
1574 qdf_timer_init(soc->osdev, &soc->int_timer,
1575 dp_interrupt_timer, (void *)soc,
1576 QDF_TIMER_TYPE_WAKE_APPS);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301577
1578 return QDF_STATUS_SUCCESS;
1579}
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301580
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301581static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001582#if defined(CONFIG_MCL)
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301583/*
1584 * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1585 * @txrx_soc: DP SOC handle
1586 *
1587 * Call the appropriate attach function based on the mode of operation.
1588 * This is a WAR for enabling monitor mode.
1589 *
1590 * Return: 0 for success. nonzero for failure.
1591 */
1592static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1593{
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001594 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1595
1596 if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1597 con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001598 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1599 "%s: Poll mode", __func__);
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301600 return dp_soc_attach_poll(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301601 } else {
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07001602
Mohit Khanna9a6fdd52017-12-12 10:55:48 +08001603 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1604 "%s: Interrupt mode", __func__);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301605 return dp_soc_interrupt_attach(txrx_soc);
1606 }
1607}
1608#else
Venkateswara Swamy Bandaru37ce7092018-08-09 17:00:30 +05301609#if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1610static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1611{
1612 return dp_soc_attach_poll(txrx_soc);
1613}
1614#else
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301615static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1616{
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05301617 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1618
1619 if (hif_is_polled_mode_enabled(soc->hif_handle))
1620 return dp_soc_attach_poll(txrx_soc);
1621 else
1622 return dp_soc_interrupt_attach(txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +05301623}
1624#endif
Venkateswara Swamy Bandaru37ce7092018-08-09 17:00:30 +05301625#endif
Houston Hoffman648a9182017-05-21 23:27:50 -07001626
1627static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1628 int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1629{
1630 int j;
1631 int num_irq = 0;
1632
1633 int tx_mask =
1634 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1635 int rx_mask =
1636 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1637 int rx_mon_mask =
1638 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1639 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1640 soc->wlan_cfg_ctx, intr_ctx_num);
1641 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1642 soc->wlan_cfg_ctx, intr_ctx_num);
1643 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1644 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001645 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1646 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001647 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1648 soc->wlan_cfg_ctx, intr_ctx_num);
Keyur Parekh11865212018-10-12 18:03:12 -07001649 int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1650 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001651
1652 for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1653
1654 if (tx_mask & (1 << j)) {
1655 irq_id_map[num_irq++] =
1656 (wbm2host_tx_completions_ring1 - j);
1657 }
1658
1659 if (rx_mask & (1 << j)) {
1660 irq_id_map[num_irq++] =
1661 (reo2host_destination_ring1 - j);
1662 }
1663
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001664 if (rxdma2host_ring_mask & (1 << j)) {
1665 irq_id_map[num_irq++] =
1666 rxdma2host_destination_ring_mac1 -
1667 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1668 }
1669
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001670 if (host2rxdma_ring_mask & (1 << j)) {
1671 irq_id_map[num_irq++] =
1672 host2rxdma_host_buf_ring_mac1 -
1673 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1674 }
1675
Keyur Parekh11865212018-10-12 18:03:12 -07001676 if (host2rxdma_mon_ring_mask & (1 << j)) {
1677 irq_id_map[num_irq++] =
1678 host2rxdma_monitor_ring1 -
1679 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1680 }
1681
Houston Hoffman648a9182017-05-21 23:27:50 -07001682 if (rx_mon_mask & (1 << j)) {
1683 irq_id_map[num_irq++] =
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001684 ppdu_end_interrupts_mac1 -
1685 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001686 irq_id_map[num_irq++] =
1687 rxdma2host_monitor_status_ring_mac1 -
1688 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Houston Hoffman648a9182017-05-21 23:27:50 -07001689 }
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001690
Houston Hoffman648a9182017-05-21 23:27:50 -07001691 if (rx_wbm_rel_ring_mask & (1 << j))
1692 irq_id_map[num_irq++] = wbm2host_rx_release;
1693
1694 if (rx_err_ring_mask & (1 << j))
1695 irq_id_map[num_irq++] = reo2host_exception;
1696
1697 if (reo_status_ring_mask & (1 << j))
1698 irq_id_map[num_irq++] = reo2host_status;
1699
1700 }
1701 *num_irq_r = num_irq;
1702}
1703
1704static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1705 int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1706 int msi_vector_count, int msi_vector_start)
1707{
1708 int tx_mask = wlan_cfg_get_tx_ring_mask(
1709 soc->wlan_cfg_ctx, intr_ctx_num);
1710 int rx_mask = wlan_cfg_get_rx_ring_mask(
1711 soc->wlan_cfg_ctx, intr_ctx_num);
1712 int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1713 soc->wlan_cfg_ctx, intr_ctx_num);
1714 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1715 soc->wlan_cfg_ctx, intr_ctx_num);
1716 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1717 soc->wlan_cfg_ctx, intr_ctx_num);
1718 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1719 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001720 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1721 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001722
1723 unsigned int vector =
1724 (intr_ctx_num % msi_vector_count) + msi_vector_start;
1725 int num_irq = 0;
1726
psimhac983d7e2017-07-26 15:20:07 -07001727 soc->intr_mode = DP_INTR_MSI;
1728
Houston Hoffman648a9182017-05-21 23:27:50 -07001729 if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001730 rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
Houston Hoffman648a9182017-05-21 23:27:50 -07001731 irq_id_map[num_irq++] =
1732 pld_get_msi_irq(soc->osdev->dev, vector);
1733
1734 *num_irq_r = num_irq;
1735}
1736
1737static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1738 int *irq_id_map, int *num_irq)
1739{
1740 int msi_vector_count, ret;
1741 uint32_t msi_base_data, msi_vector_start;
1742
1743 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1744 &msi_vector_count,
1745 &msi_base_data,
1746 &msi_vector_start);
1747 if (ret)
1748 return dp_soc_interrupt_map_calculate_integrated(soc,
1749 intr_ctx_num, irq_id_map, num_irq);
1750
1751 else
1752 dp_soc_interrupt_map_calculate_msi(soc,
1753 intr_ctx_num, irq_id_map, num_irq,
1754 msi_vector_count, msi_vector_start);
1755}
1756
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301757/*
1758 * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1759 * @txrx_soc: DP SOC handle
1760 *
1761 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1762 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1763 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1764 *
1765 * Return: 0 for success. nonzero for failure.
1766 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001767static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301768{
1769 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1770
1771 int i = 0;
1772 int num_irq = 0;
1773
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301774 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Leo Chang5ea93a42016-11-03 12:39:49 -07001775 int ret = 0;
1776
1777 /* Map of IRQ ids registered with one interrupt context */
1778 int irq_id_map[HIF_MAX_GRP_IRQ];
1779
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301780 int tx_mask =
1781 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1782 int rx_mask =
1783 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1784 int rx_mon_mask =
Mohit Khannadba82f22018-07-12 10:59:17 -07001785 dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
Nandha Kishore Easwaran82ac62e2017-06-20 17:55:07 +05301786 int rx_err_ring_mask =
1787 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1788 int rx_wbm_rel_ring_mask =
1789 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1790 int reo_status_ring_mask =
1791 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni10185472017-06-19 16:32:06 -07001792 int rxdma2host_ring_mask =
1793 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001794 int host2rxdma_ring_mask =
1795 wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
Keyur Parekh11865212018-10-12 18:03:12 -07001796 int host2rxdma_mon_ring_mask =
1797 wlan_cfg_get_host2rxdma_mon_ring_mask(
1798 soc->wlan_cfg_ctx, i);
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301799
Houston Hoffman648a9182017-05-21 23:27:50 -07001800 soc->intr_ctx[i].dp_intr_id = i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301801 soc->intr_ctx[i].tx_ring_mask = tx_mask;
1802 soc->intr_ctx[i].rx_ring_mask = rx_mask;
1803 soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301804 soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
Karunakar Dasineni10185472017-06-19 16:32:06 -07001805 soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001806 soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301807 soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1808 soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
Keyur Parekh11865212018-10-12 18:03:12 -07001809 soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1810 host2rxdma_mon_ring_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301811
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301812 soc->intr_ctx[i].soc = soc;
1813
1814 num_irq = 0;
1815
Houston Hoffman648a9182017-05-21 23:27:50 -07001816 dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1817 &num_irq);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301818
Houston Hoffmandef86a32017-04-21 20:23:45 -07001819 ret = hif_register_ext_group(soc->hif_handle,
1820 num_irq, irq_id_map, dp_service_srngs,
1821 &soc->intr_ctx[i], "dp_intr",
chenguof2548862017-11-08 16:33:25 +08001822 HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301823
1824 if (ret) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301825 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1826 FL("failed, ret = %d"), ret);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301827
1828 return QDF_STATUS_E_FAILURE;
1829 }
Dhanashri Atre0da31222017-03-23 12:30:58 -07001830 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301831 }
1832
Venkateswara Swamy Bandaru19dc8b22017-03-13 15:09:24 +05301833 hif_configure_ext_group_interrupts(soc->hif_handle);
1834
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301835 return QDF_STATUS_SUCCESS;
1836}
1837
1838/*
1839 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1840 * @txrx_soc: DP SOC handle
1841 *
1842 * Return: void
1843 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001844static void dp_soc_interrupt_detach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301845{
1846 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Leo Chang5ea93a42016-11-03 12:39:49 -07001847 int i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301848
psimhac983d7e2017-07-26 15:20:07 -07001849 if (soc->intr_mode == DP_INTR_POLL) {
1850 qdf_timer_stop(&soc->int_timer);
1851 qdf_timer_free(&soc->int_timer);
psimhaa079b8c2017-08-02 17:27:14 -07001852 } else {
1853 hif_deregister_exec_group(soc->hif_handle, "dp_intr");
psimhac983d7e2017-07-26 15:20:07 -07001854 }
1855
Leo Chang5ea93a42016-11-03 12:39:49 -07001856 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1857 soc->intr_ctx[i].tx_ring_mask = 0;
1858 soc->intr_ctx[i].rx_ring_mask = 0;
1859 soc->intr_ctx[i].rx_mon_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001860 soc->intr_ctx[i].rx_err_ring_mask = 0;
1861 soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1862 soc->intr_ctx[i].reo_status_ring_mask = 0;
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001863 soc->intr_ctx[i].rxdma2host_ring_mask = 0;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001864 soc->intr_ctx[i].host2rxdma_ring_mask = 0;
Keyur Parekh11865212018-10-12 18:03:12 -07001865 soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001866
Dhanashri Atre0da31222017-03-23 12:30:58 -07001867 qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
Leo Chang5ea93a42016-11-03 12:39:49 -07001868 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301869}
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301870
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001871#define AVG_MAX_MPDUS_PER_TID 128
1872#define AVG_TIDS_PER_CLIENT 2
1873#define AVG_FLOWS_PER_TID 2
1874#define AVG_MSDUS_PER_FLOW 128
1875#define AVG_MSDUS_PER_MPDU 4
1876
1877/*
1878 * Allocate and setup link descriptor pool that will be used by HW for
1879 * various link and queue descriptors and managed by WBM
1880 */
1881static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1882{
1883 int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1884 int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1885 uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1886 uint32_t num_mpdus_per_link_desc =
1887 hal_num_mpdus_per_link_desc(soc->hal_soc);
1888 uint32_t num_msdus_per_link_desc =
1889 hal_num_msdus_per_link_desc(soc->hal_soc);
1890 uint32_t num_mpdu_links_per_queue_desc =
1891 hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1892 uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1893 uint32_t total_link_descs, total_mem_size;
1894 uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1895 uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1896 uint32_t num_link_desc_banks;
1897 uint32_t last_bank_size = 0;
1898 uint32_t entry_size, num_entries;
1899 int i;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001900 uint32_t desc_id = 0;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301901 qdf_dma_addr_t *baseaddr = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001902
1903 /* Only Tx queue descriptors are allocated from common link descriptor
1904 * pool Rx queue descriptors are not included in this because (REO queue
1905 * extension descriptors) they are expected to be allocated contiguously
1906 * with REO queue descriptors
1907 */
1908 num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1909 AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1910
1911 num_mpdu_queue_descs = num_mpdu_link_descs /
1912 num_mpdu_links_per_queue_desc;
1913
1914 num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1915 AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1916 num_msdus_per_link_desc;
1917
1918 num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1919 AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1920
1921 num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1922 num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1923
1924 /* Round up to power of 2 */
1925 total_link_descs = 1;
1926 while (total_link_descs < num_entries)
1927 total_link_descs <<= 1;
1928
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301929 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1930 FL("total_link_descs: %u, link_desc_size: %d"),
1931 total_link_descs, link_desc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001932 total_mem_size = total_link_descs * link_desc_size;
1933
1934 total_mem_size += link_desc_align;
1935
1936 if (total_mem_size <= max_alloc_size) {
1937 num_link_desc_banks = 0;
1938 last_bank_size = total_mem_size;
1939 } else {
1940 num_link_desc_banks = (total_mem_size) /
1941 (max_alloc_size - link_desc_align);
1942 last_bank_size = total_mem_size %
1943 (max_alloc_size - link_desc_align);
1944 }
1945
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301946 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1947 FL("total_mem_size: %d, num_link_desc_banks: %u"),
1948 total_mem_size, num_link_desc_banks);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001949
1950 for (i = 0; i < num_link_desc_banks; i++) {
phadimana1f79822019-02-15 15:02:37 +05301951 if (!dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301952 baseaddr = &soc->link_desc_banks[i].
1953 base_paddr_unaligned;
1954 soc->link_desc_banks[i].base_vaddr_unaligned =
1955 qdf_mem_alloc_consistent(soc->osdev,
1956 soc->osdev->dev,
1957 max_alloc_size,
1958 baseaddr);
1959 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001960 soc->link_desc_banks[i].size = max_alloc_size;
1961
1962 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1963 soc->link_desc_banks[i].base_vaddr_unaligned) +
1964 ((unsigned long)(
1965 soc->link_desc_banks[i].base_vaddr_unaligned) %
1966 link_desc_align));
1967
1968 soc->link_desc_banks[i].base_paddr = (unsigned long)(
1969 soc->link_desc_banks[i].base_paddr_unaligned) +
1970 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1971 (unsigned long)(
1972 soc->link_desc_banks[i].base_vaddr_unaligned));
1973
1974 if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301975 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1976 FL("Link descriptor memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001977 goto fail;
1978 }
1979 }
1980
1981 if (last_bank_size) {
1982 /* Allocate last bank in case total memory required is not exact
1983 * multiple of max_alloc_size
1984 */
phadimana1f79822019-02-15 15:02:37 +05301985 if (!dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05301986 baseaddr = &soc->link_desc_banks[i].
1987 base_paddr_unaligned;
1988 soc->link_desc_banks[i].base_vaddr_unaligned =
1989 qdf_mem_alloc_consistent(soc->osdev,
1990 soc->osdev->dev,
1991 last_bank_size,
1992 baseaddr);
1993 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001994 soc->link_desc_banks[i].size = last_bank_size;
1995
1996 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1997 (soc->link_desc_banks[i].base_vaddr_unaligned) +
1998 ((unsigned long)(
1999 soc->link_desc_banks[i].base_vaddr_unaligned) %
2000 link_desc_align));
2001
2002 soc->link_desc_banks[i].base_paddr =
2003 (unsigned long)(
2004 soc->link_desc_banks[i].base_paddr_unaligned) +
2005 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2006 (unsigned long)(
2007 soc->link_desc_banks[i].base_vaddr_unaligned));
2008 }
2009
2010
2011 /* Allocate and setup link descriptor idle list for HW internal use */
2012 entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
2013 total_mem_size = entry_size * total_link_descs;
2014
2015 if (total_mem_size <= max_alloc_size) {
2016 void *desc;
2017
2018 if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
2019 WBM_IDLE_LINK, 0, 0, total_link_descs)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302020 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2021 FL("Link desc idle ring setup failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002022 goto fail;
2023 }
2024
2025 hal_srng_access_start_unlocked(soc->hal_soc,
2026 soc->wbm_idle_link_ring.hal_srng);
2027
2028 for (i = 0; i < MAX_LINK_DESC_BANKS &&
2029 soc->link_desc_banks[i].base_paddr; i++) {
2030 uint32_t num_entries = (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07002031 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002032 soc->link_desc_banks[i].base_vaddr) -
2033 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07002034 soc->link_desc_banks[i].base_vaddr_unaligned)))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002035 / link_desc_size;
2036 unsigned long paddr = (unsigned long)(
2037 soc->link_desc_banks[i].base_paddr);
2038
2039 while (num_entries && (desc = hal_srng_src_get_next(
2040 soc->hal_soc,
2041 soc->wbm_idle_link_ring.hal_srng))) {
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002042 hal_set_link_desc_addr(desc,
2043 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002044 num_entries--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002045 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002046 paddr += link_desc_size;
2047 }
2048 }
2049 hal_srng_access_end_unlocked(soc->hal_soc,
2050 soc->wbm_idle_link_ring.hal_srng);
2051 } else {
2052 uint32_t num_scatter_bufs;
2053 uint32_t num_entries_per_buf;
2054 uint32_t rem_entries;
2055 uint8_t *scatter_buf_ptr;
2056 uint16_t scatter_buf_num;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05302057 uint32_t buf_size = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002058
2059 soc->wbm_idle_scatter_buf_size =
2060 hal_idle_list_scatter_buf_size(soc->hal_soc);
2061 num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2062 soc->hal_soc, soc->wbm_idle_scatter_buf_size);
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002063 num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2064 soc->hal_soc, total_mem_size,
2065 soc->wbm_idle_scatter_buf_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002066
Shaakir Mohamed41323bb2018-03-20 15:57:15 -07002067 if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2068 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2069 FL("scatter bufs size out of bounds"));
2070 goto fail;
2071 }
2072
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002073 for (i = 0; i < num_scatter_bufs; i++) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05302074 baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
phadimana1f79822019-02-15 15:02:37 +05302075 if (!dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05302076 buf_size = soc->wbm_idle_scatter_buf_size;
2077 soc->wbm_idle_scatter_buf_base_vaddr[i] =
2078 qdf_mem_alloc_consistent(soc->osdev,
2079 soc->osdev->
2080 dev,
2081 buf_size,
2082 baseaddr);
2083 }
Jeff Johnsona8edf332019-03-18 09:51:52 -07002084 if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302085 QDF_TRACE(QDF_MODULE_ID_DP,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05302086 QDF_TRACE_LEVEL_ERROR,
2087 FL("Scatter lst memory alloc fail"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002088 goto fail;
2089 }
2090 }
2091
2092 /* Populate idle list scatter buffers with link descriptor
2093 * pointers
2094 */
2095 scatter_buf_num = 0;
2096 scatter_buf_ptr = (uint8_t *)(
2097 soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2098 rem_entries = num_entries_per_buf;
2099
2100 for (i = 0; i < MAX_LINK_DESC_BANKS &&
2101 soc->link_desc_banks[i].base_paddr; i++) {
2102 uint32_t num_link_descs =
2103 (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07002104 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002105 soc->link_desc_banks[i].base_vaddr) -
2106 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07002107 soc->link_desc_banks[i].base_vaddr_unaligned)))
2108 / link_desc_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002109 unsigned long paddr = (unsigned long)(
2110 soc->link_desc_banks[i].base_paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002111
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002112 while (num_link_descs) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002113 hal_set_link_desc_addr((void *)scatter_buf_ptr,
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002114 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002115 num_link_descs--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07002116 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002117 paddr += link_desc_size;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002118 rem_entries--;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002119 if (rem_entries) {
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002120 scatter_buf_ptr += entry_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002121 } else {
2122 rem_entries = num_entries_per_buf;
2123 scatter_buf_num++;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002124
2125 if (scatter_buf_num >= num_scatter_bufs)
2126 break;
2127
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002128 scatter_buf_ptr = (uint8_t *)(
2129 soc->wbm_idle_scatter_buf_base_vaddr[
2130 scatter_buf_num]);
2131 }
2132 }
2133 }
2134 /* Setup link descriptor idle list in HW */
2135 hal_setup_link_idle_list(soc->hal_soc,
2136 soc->wbm_idle_scatter_buf_base_paddr,
2137 soc->wbm_idle_scatter_buf_base_vaddr,
2138 num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
Leo Chang5ea93a42016-11-03 12:39:49 -07002139 (uint32_t)(scatter_buf_ptr -
Pramod Simhaccb15fb2017-06-19 12:21:13 -07002140 (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2141 scatter_buf_num-1])), total_link_descs);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002142 }
2143 return 0;
2144
2145fail:
2146 if (soc->wbm_idle_link_ring.hal_srng) {
Balamurugan Mahalingam3715aa42018-08-22 02:13:14 +05302147 dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2148 WBM_IDLE_LINK, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002149 }
2150
2151 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2152 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07002153 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002154 soc->wbm_idle_scatter_buf_size,
2155 soc->wbm_idle_scatter_buf_base_vaddr[i],
2156 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08002157 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002158 }
2159 }
2160
2161 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2162 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07002163 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002164 soc->link_desc_banks[i].size,
2165 soc->link_desc_banks[i].base_vaddr_unaligned,
2166 soc->link_desc_banks[i].base_paddr_unaligned,
2167 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08002168 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002169 }
2170 }
2171 return QDF_STATUS_E_FAILURE;
2172}
2173
2174/*
2175 * Free link descriptor pool that was setup HW
2176 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08002177static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002178{
2179 int i;
2180
2181 if (soc->wbm_idle_link_ring.hal_srng) {
Manoj Ekbote525bcab2017-09-01 17:23:32 -07002182 dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002183 WBM_IDLE_LINK, 0);
2184 }
2185
2186 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2187 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07002188 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002189 soc->wbm_idle_scatter_buf_size,
2190 soc->wbm_idle_scatter_buf_base_vaddr[i],
2191 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08002192 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002193 }
2194 }
2195
2196 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2197 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07002198 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002199 soc->link_desc_banks[i].size,
2200 soc->link_desc_banks[i].base_vaddr_unaligned,
2201 soc->link_desc_banks[i].base_paddr_unaligned,
2202 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08002203 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002204 }
2205 }
2206}
2207
Mohit Khanna81179cb2018-08-16 20:50:43 -07002208#ifdef IPA_OFFLOAD
2209#define REO_DST_RING_SIZE_QCA6290 1023
2210#ifndef QCA_WIFI_QCA8074_VP
2211#define REO_DST_RING_SIZE_QCA8074 1023
2212#else
2213#define REO_DST_RING_SIZE_QCA8074 8
2214#endif /* QCA_WIFI_QCA8074_VP */
2215
2216#else
2217
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302218#define REO_DST_RING_SIZE_QCA6290 1024
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05302219#ifndef QCA_WIFI_QCA8074_VP
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302220#define REO_DST_RING_SIZE_QCA8074 2048
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05302221#else
2222#define REO_DST_RING_SIZE_QCA8074 8
Mohit Khanna81179cb2018-08-16 20:50:43 -07002223#endif /* QCA_WIFI_QCA8074_VP */
2224#endif /* IPA_OFFLOAD */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002225
2226/*
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302227 * dp_ast_aging_timer_fn() - Timer callback function for WDS aging
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302228 * @soc: Datapath SOC handle
2229 *
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05302230 * This is a timer function used to age out stale AST nodes from
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302231 * AST table
2232 */
2233#ifdef FEATURE_WDS
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302234static void dp_ast_aging_timer_fn(void *soc_hdl)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302235{
2236 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
2237 struct dp_pdev *pdev;
2238 struct dp_vdev *vdev;
2239 struct dp_peer *peer;
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05302240 struct dp_ast_entry *ase, *temp_ase;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302241 int i;
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302242 bool check_wds_ase = false;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302243
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302244 if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
2245 soc->wds_ast_aging_timer_cnt = 0;
2246 check_wds_ase = true;
2247 }
Pamidipati, Vijay8a4c03a2018-12-08 12:52:38 +05302248
2249 /* Peer list access lock */
2250 qdf_spin_lock_bh(&soc->peer_ref_mutex);
2251
2252 /* AST list access lock */
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302253 qdf_spin_lock_bh(&soc->ast_lock);
2254
2255 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
2256 pdev = soc->pdev_list[i];
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302257 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302258 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2259 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05302260 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302261 /*
2262 * Do not expire static ast entries
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05302263 * and HM WDS entries
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302264 */
Sathyanarayanan Esakkiappan4af55842018-10-23 12:58:07 +05302265 if (ase->type !=
2266 CDP_TXRX_AST_TYPE_WDS &&
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302267 ase->type !=
2268 CDP_TXRX_AST_TYPE_MEC &&
2269 ase->type !=
2270 CDP_TXRX_AST_TYPE_DA)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302271 continue;
2272
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302273 /* Expire MEC entry every n sec.
2274 * This needs to be expired in
2275 * case if STA backbone is made as
2276 * AP backbone, In this case it needs
2277 * to be re-added as a WDS entry.
2278 */
2279 if (ase->is_active && ase->type ==
2280 CDP_TXRX_AST_TYPE_MEC) {
2281 ase->is_active = FALSE;
2282 continue;
2283 } else if (ase->is_active &&
2284 check_wds_ase) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302285 ase->is_active = FALSE;
2286 continue;
2287 }
2288
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302289 if (ase->type ==
2290 CDP_TXRX_AST_TYPE_MEC) {
2291 DP_STATS_INC(soc,
2292 ast.aged_out, 1);
2293 dp_peer_del_ast(soc, ase);
2294 } else if (check_wds_ase) {
2295 DP_STATS_INC(soc,
2296 ast.aged_out, 1);
2297 dp_peer_del_ast(soc, ase);
2298 }
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302299 }
2300 }
2301 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05302302 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302303 }
2304
2305 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijay8a4c03a2018-12-08 12:52:38 +05302306 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302307
2308 if (qdf_atomic_read(&soc->cmn_init_done))
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302309 qdf_timer_mod(&soc->ast_aging_timer,
2310 DP_AST_AGING_TIMER_DEFAULT_MS);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302311}
2312
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05302313
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302314/*
2315 * dp_soc_wds_attach() - Setup WDS timer and AST table
2316 * @soc: Datapath SOC handle
2317 *
2318 * Return: None
2319 */
2320static void dp_soc_wds_attach(struct dp_soc *soc)
2321{
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302322 soc->wds_ast_aging_timer_cnt = 0;
2323 qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
2324 dp_ast_aging_timer_fn, (void *)soc,
2325 QDF_TIMER_TYPE_WAKE_APPS);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302326
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302327 qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302328}
2329
2330/*
2331 * dp_soc_wds_detach() - Detach WDS data structures and timers
2332 * @txrx_soc: DP SOC handle
2333 *
2334 * Return: None
2335 */
2336static void dp_soc_wds_detach(struct dp_soc *soc)
2337{
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05302338 qdf_timer_stop(&soc->ast_aging_timer);
2339 qdf_timer_free(&soc->ast_aging_timer);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302340}
2341#else
2342static void dp_soc_wds_attach(struct dp_soc *soc)
2343{
2344}
2345
2346static void dp_soc_wds_detach(struct dp_soc *soc)
2347{
2348}
2349#endif
2350
2351/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302352 * dp_soc_reset_ring_map() - Reset cpu ring map
2353 * @soc: Datapath soc handler
2354 *
2355 * This api resets the default cpu ring map
2356 */
2357
2358static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2359{
2360 uint8_t i;
2361 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2362
2363 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302364 switch (nss_config) {
2365 case dp_nss_cfg_first_radio:
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302366 /*
2367 * Setting Tx ring map for one nss offloaded radio
2368 */
2369 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302370 break;
2371
2372 case dp_nss_cfg_second_radio:
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302373 /*
2374 * Setting Tx ring for two nss offloaded radios
2375 */
2376 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302377 break;
2378
2379 case dp_nss_cfg_dbdc:
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302380 /*
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302381 * Setting Tx ring map for 2 nss offloaded radios
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302382 */
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302383 soc->tx_ring_map[i] =
2384 dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2385 break;
2386
2387 case dp_nss_cfg_dbtc:
2388 /*
2389 * Setting Tx ring map for 3 nss offloaded radios
2390 */
2391 soc->tx_ring_map[i] =
2392 dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2393 break;
2394
2395 default:
2396 dp_err("tx_ring_map failed due to invalid nss cfg");
2397 break;
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302398 }
2399 }
2400}
2401
Aniruddha Paule3a03342017-09-19 16:42:10 +05302402/*
2403 * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2404 * @dp_soc - DP soc handle
2405 * @ring_type - ring type
2406 * @ring_num - ring_num
2407 *
2408 * return 0 or 1
2409 */
2410static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2411{
2412 uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2413 uint8_t status = 0;
2414
2415 switch (ring_type) {
2416 case WBM2SW_RELEASE:
2417 case REO_DST:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002418 case RXDMA_BUF:
Aniruddha Paule3a03342017-09-19 16:42:10 +05302419 status = ((nss_config) & (1 << ring_num));
2420 break;
2421 default:
2422 break;
2423 }
2424
2425 return status;
2426}
2427
2428/*
2429 * dp_soc_reset_intr_mask() - reset interrupt mask
2430 * @dp_soc - DP Soc handle
2431 *
2432 * Return: Return void
2433 */
2434static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2435{
2436 uint8_t j;
2437 int *grp_mask = NULL;
2438 int group_number, mask, num_ring;
2439
2440 /* number of tx ring */
2441 num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2442
2443 /*
2444 * group mask for tx completion ring.
2445 */
2446 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2447
2448 /* loop and reset the mask for only offloaded ring */
2449 for (j = 0; j < num_ring; j++) {
2450 if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2451 continue;
2452 }
2453
2454 /*
2455 * Group number corresponding to tx offloaded ring.
2456 */
2457 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2458 if (group_number < 0) {
2459 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002460 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302461 WBM2SW_RELEASE, j);
2462 return;
2463 }
2464
2465 /* reset the tx mask for offloaded ring */
2466 mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2467 mask &= (~(1 << j));
2468
2469 /*
2470 * reset the interrupt mask for offloaded ring.
2471 */
2472 wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2473 }
2474
2475 /* number of rx rings */
2476 num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2477
2478 /*
2479 * group mask for reo destination ring.
2480 */
2481 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2482
2483 /* loop and reset the mask for only offloaded ring */
2484 for (j = 0; j < num_ring; j++) {
2485 if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2486 continue;
2487 }
2488
2489 /*
2490 * Group number corresponding to rx offloaded ring.
2491 */
2492 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2493 if (group_number < 0) {
2494 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002495 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05302496 REO_DST, j);
2497 return;
2498 }
2499
2500 /* set the interrupt mask for offloaded ring */
2501 mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2502 mask &= (~(1 << j));
2503
2504 /*
2505 * set the interrupt mask to zero for rx offloaded radio.
2506 */
2507 wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2508 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07002509
2510 /*
2511 * group mask for Rx buffer refill ring
2512 */
2513 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2514
2515 /* loop and reset the mask for only offloaded ring */
2516 for (j = 0; j < MAX_PDEV_CNT; j++) {
2517 if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2518 continue;
2519 }
2520
2521 /*
2522 * Group number corresponding to rx offloaded ring.
2523 */
2524 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2525 if (group_number < 0) {
2526 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2527 FL("ring not part of any group; ring_type: %d,ring_num %d"),
2528 REO_DST, j);
2529 return;
2530 }
2531
2532 /* set the interrupt mask for offloaded ring */
2533 mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2534 group_number);
2535 mask &= (~(1 << j));
2536
2537 /*
2538 * set the interrupt mask to zero for rx offloaded radio.
2539 */
2540 wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2541 group_number, mask);
2542 }
Aniruddha Paule3a03342017-09-19 16:42:10 +05302543}
2544
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302545#ifdef IPA_OFFLOAD
2546/**
2547 * dp_reo_remap_config() - configure reo remap register value based
2548 * nss configuration.
2549 * based on offload_radio value below remap configuration
2550 * get applied.
2551 * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2552 * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2553 * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2554 * 3 - both Radios handled by NSS (remap not required)
2555 * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2556 *
2557 * @remap1: output parameter indicates reo remap 1 register value
2558 * @remap2: output parameter indicates reo remap 2 register value
2559 * Return: bool type, true if remap is configured else false.
2560 */
jiad09526ac2019-04-12 17:42:40 +08002561bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302562{
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302563 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
2564 (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
2565
2566 *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
2567 (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
2568
Mohit Khanna81179cb2018-08-16 20:50:43 -07002569 dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2570
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302571 return true;
2572}
2573#else
2574static bool dp_reo_remap_config(struct dp_soc *soc,
2575 uint32_t *remap1,
2576 uint32_t *remap2)
2577{
2578 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2579
2580 switch (offload_radio) {
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302581 case dp_nss_cfg_default:
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302582 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2583 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2584 (0x3 << 18) | (0x4 << 21)) << 8;
2585
2586 *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
2587 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
2588 (0x3 << 18) | (0x4 << 21)) << 8;
2589 break;
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302590 case dp_nss_cfg_first_radio:
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302591 *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
2592 (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
2593 (0x2 << 18) | (0x3 << 21)) << 8;
2594
2595 *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
2596 (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
2597 (0x4 << 18) | (0x2 << 21)) << 8;
2598 break;
2599
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302600 case dp_nss_cfg_second_radio:
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302601 *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
2602 (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
2603 (0x1 << 18) | (0x3 << 21)) << 8;
2604
2605 *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
2606 (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
2607 (0x4 << 18) | (0x1 << 21)) << 8;
2608 break;
2609
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302610 case dp_nss_cfg_dbdc:
2611 case dp_nss_cfg_dbtc:
2612 /* return false if both or all are offloaded to NSS */
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302613 return false;
2614 }
Mohit Khanna81179cb2018-08-16 20:50:43 -07002615
2616 dp_debug("remap1 %x remap2 %x offload_radio %u",
2617 *remap1, *remap2, offload_radio);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302618 return true;
2619}
2620#endif
2621
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302622/*
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302623 * dp_reo_frag_dst_set() - configure reo register to set the
2624 * fragment destination ring
2625 * @soc : Datapath soc
2626 * @frag_dst_ring : output parameter to set fragment destination ring
2627 *
2628 * Based on offload_radio below fragment destination rings is selected
2629 * 0 - TCL
2630 * 1 - SW1
2631 * 2 - SW2
2632 * 3 - SW3
2633 * 4 - SW4
2634 * 5 - Release
2635 * 6 - FW
2636 * 7 - alternate select
2637 *
2638 * return: void
2639 */
2640static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2641{
2642 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2643
2644 switch (offload_radio) {
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302645 case dp_nss_cfg_default:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302646 *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2647 break;
Aniruddha Paulc34164e2018-09-14 14:25:30 +05302648 case dp_nss_cfg_dbdc:
2649 case dp_nss_cfg_dbtc:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302650 *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2651 break;
2652 default:
2653 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2654 FL("dp_reo_frag_dst_set invalid offload radio config"));
2655 break;
2656 }
2657}
2658
Krunal Soni03ba0f52019-02-12 11:44:46 -08002659#ifdef ENABLE_VERBOSE_DEBUG
2660static void dp_enable_verbose_debug(struct dp_soc *soc)
2661{
2662 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2663
2664 soc_cfg_ctx = soc->wlan_cfg_ctx;
2665
2666 if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
2667 is_dp_verbose_debug_enabled = true;
Krunal Soni9911b442019-02-22 15:39:03 -08002668
2669 if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
2670 hal_set_verbose_debug(true);
2671 else
2672 hal_set_verbose_debug(false);
Krunal Soni03ba0f52019-02-12 11:44:46 -08002673}
2674#else
2675static void dp_enable_verbose_debug(struct dp_soc *soc)
2676{
2677}
2678#endif
2679
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302680/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002681 * dp_soc_cmn_setup() - Common SoC level initializion
2682 * @soc: Datapath SOC handle
2683 *
2684 * This is an internal function used to setup common SOC data structures,
2685 * to be called from PDEV attach after receiving HW mode capabilities from FW
2686 */
2687static int dp_soc_cmn_setup(struct dp_soc *soc)
2688{
2689 int i;
Dhanashri Atre14049172016-11-11 18:32:36 -08002690 struct hal_reo_params reo_params;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302691 int tx_ring_size;
2692 int tx_comp_ring_size;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302693 int reo_dst_ring_size;
Vivek126db5d2018-07-25 22:05:04 +05302694 uint32_t entries;
2695 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002696
Ravi Joshi86e98262017-03-01 13:47:03 -08002697 if (qdf_atomic_read(&soc->cmn_init_done))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002698 return 0;
2699
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002700 if (dp_hw_link_desc_pool_setup(soc))
2701 goto fail1;
2702
Vivek126db5d2018-07-25 22:05:04 +05302703 soc_cfg_ctx = soc->wlan_cfg_ctx;
Krunal Soni03ba0f52019-02-12 11:44:46 -08002704
2705 dp_enable_verbose_debug(soc);
2706
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002707 /* Setup SRNG rings */
2708 /* Common rings */
2709 if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302710 wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302711 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2712 FL("dp_srng_setup failed for wbm_desc_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002713 goto fail1;
2714 }
2715
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302716 soc->num_tcl_data_rings = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002717 /* Tx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302718 if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002719 soc->num_tcl_data_rings =
Vivek126db5d2018-07-25 22:05:04 +05302720 wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302721 tx_comp_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302722 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302723 tx_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05302724 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002725 for (i = 0; i < soc->num_tcl_data_rings; i++) {
2726 if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302727 TCL_DATA, i, 0, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302728 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002729 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302730 FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002731 goto fail1;
2732 }
Yun Parkfde6b9e2017-06-26 17:13:11 -07002733 /*
2734 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2735 * count
2736 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002737 if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302738 WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302739 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002740 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302741 FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002742 goto fail1;
2743 }
2744 }
2745 } else {
2746 /* This will be incremented during per pdev ring setup */
2747 soc->num_tcl_data_rings = 0;
2748 }
2749
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302750 if (dp_tx_soc_attach(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302751 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2752 FL("dp_tx_soc_attach failed"));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302753 goto fail1;
2754 }
2755
Vivek126db5d2018-07-25 22:05:04 +05302756 entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002757 /* TCL command and status rings */
2758 if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302759 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302760 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2761 FL("dp_srng_setup failed for tcl_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002762 goto fail1;
2763 }
2764
Vivek126db5d2018-07-25 22:05:04 +05302765 entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002766 if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302767 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302768 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2769 FL("dp_srng_setup failed for tcl_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002770 goto fail1;
2771 }
2772
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302773 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002774
2775 /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2776 * descriptors
2777 */
2778
2779 /* Rx data rings */
Vivek126db5d2018-07-25 22:05:04 +05302780 if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002781 soc->num_reo_dest_rings =
Vivek126db5d2018-07-25 22:05:04 +05302782 wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
Dhanashri Atre14049172016-11-11 18:32:36 -08002783 QDF_TRACE(QDF_MODULE_ID_DP,
Aditya Sathishded018e2018-07-02 16:25:21 +05302784 QDF_TRACE_LEVEL_INFO,
2785 FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002786 for (i = 0; i < soc->num_reo_dest_rings; i++) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002787 if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05302788 i, 0, reo_dst_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302789 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302790 QDF_TRACE_LEVEL_ERROR,
2791 FL(RNG_ERR "reo_dest_ring [%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002792 goto fail1;
2793 }
2794 }
2795 } else {
2796 /* This will be incremented during per pdev ring setup */
2797 soc->num_reo_dest_rings = 0;
2798 }
2799
Vivek126db5d2018-07-25 22:05:04 +05302800 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002801 /* LMAC RxDMA to SW Rings configuration */
Vivek126db5d2018-07-25 22:05:04 +05302802 if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002803 /* Only valid for MCL */
2804 struct dp_pdev *pdev = soc->pdev_list[0];
2805
2806 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2807 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
Vivek126db5d2018-07-25 22:05:04 +05302808 RXDMA_DST, 0, i,
2809 entries)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002810 QDF_TRACE(QDF_MODULE_ID_DP,
Vivek126db5d2018-07-25 22:05:04 +05302811 QDF_TRACE_LEVEL_ERROR,
2812 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002813 goto fail1;
2814 }
2815 }
2816 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002817 /* TBD: call dp_rx_init to setup Rx SW descriptors */
2818
2819 /* REO reinjection ring */
Vivek126db5d2018-07-25 22:05:04 +05302820 entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002821 if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302822 entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302823 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302824 FL("dp_srng_setup failed for reo_reinject_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002825 goto fail1;
2826 }
2827
2828
2829 /* Rx release ring */
2830 if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
Vivek126db5d2018-07-25 22:05:04 +05302831 wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302832 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302833 FL("dp_srng_setup failed for rx_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002834 goto fail1;
2835 }
2836
2837
2838 /* Rx exception ring */
Vivek126db5d2018-07-25 22:05:04 +05302839 entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2840 if (dp_srng_setup(soc, &soc->reo_exception_ring,
2841 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302842 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05302843 FL("dp_srng_setup failed for reo_exception_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002844 goto fail1;
2845 }
2846
2847
2848 /* REO command and status rings */
2849 if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302850 wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302851 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2852 FL("dp_srng_setup failed for reo_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002853 goto fail1;
2854 }
2855
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07002856 hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2857 TAILQ_INIT(&soc->rx.reo_cmd_list);
2858 qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2859
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002860 if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
Vivek126db5d2018-07-25 22:05:04 +05302861 wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302862 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2863 FL("dp_srng_setup failed for reo_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002864 goto fail1;
2865 }
2866
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302867
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302868 /* Reset the cpu ring map if radio is NSS offloaded */
Vivek126db5d2018-07-25 22:05:04 +05302869 if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302870 dp_soc_reset_cpu_ring_map(soc);
Aniruddha Paule3a03342017-09-19 16:42:10 +05302871 dp_soc_reset_intr_mask(soc);
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302872 }
2873
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002874 /* Setup HW REO */
Dhanashri Atre14049172016-11-11 18:32:36 -08002875 qdf_mem_zero(&reo_params, sizeof(reo_params));
2876
Vivek126db5d2018-07-25 22:05:04 +05302877 if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
Dhanashri Atre14049172016-11-11 18:32:36 -08002878
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302879 /*
2880 * Reo ring remap is not required if both radios
2881 * are offloaded to NSS
2882 */
2883 if (!dp_reo_remap_config(soc,
2884 &reo_params.remap1,
2885 &reo_params.remap2))
2886 goto out;
2887
2888 reo_params.rx_hash_enabled = true;
2889 }
2890
psimhafc2f91b2018-01-10 15:30:03 -08002891 /* setup the global rx defrag waitlist */
2892 TAILQ_INIT(&soc->rx.defrag.waitlist);
2893 soc->rx.defrag.timeout_ms =
Vivek126db5d2018-07-25 22:05:04 +05302894 wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
Karunakar Dasinenif8ec0cb2019-01-29 13:07:05 -08002895 soc->rx.defrag.next_flush_ms = 0;
psimhafc2f91b2018-01-10 15:30:03 -08002896 soc->rx.flags.defrag_timeout_check =
Vivek126db5d2018-07-25 22:05:04 +05302897 wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
Lin Baif1c577e2018-05-22 20:45:42 +08002898 qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
psimhafc2f91b2018-01-10 15:30:03 -08002899
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302900out:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302901 /*
2902 * set the fragment destination ring
2903 */
2904 dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2905
Dhanashri Atre14049172016-11-11 18:32:36 -08002906 hal_reo_setup(soc->hal_soc, &reo_params);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002907
Ravi Joshi86e98262017-03-01 13:47:03 -08002908 qdf_atomic_set(&soc->cmn_init_done, 1);
Pamidipati, Vijayb113bbc2019-01-22 22:06:36 +05302909 dp_soc_wds_attach(soc);
2910
Om Prakash Tripathi12126822017-08-03 10:21:24 +05302911 qdf_nbuf_queue_init(&soc->htt_stats.msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002912 return 0;
2913fail1:
2914 /*
2915 * Cleanup will be done as part of soc_detach, which will
2916 * be called on pdev attach failure
2917 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002918 return QDF_STATUS_E_FAILURE;
2919}
2920
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002921static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002922
Mohit Khanna16816ae2018-10-30 14:12:03 -07002923static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
Dhanashri Atre14049172016-11-11 18:32:36 -08002924{
2925 struct cdp_lro_hash_config lro_hash;
Mohit Khanna16816ae2018-10-30 14:12:03 -07002926 QDF_STATUS status;
Dhanashri Atre14049172016-11-11 18:32:36 -08002927
2928 if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
Mohit Khanna16816ae2018-10-30 14:12:03 -07002929 !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
2930 !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2931 dp_err("LRO, GRO and RX hash disabled");
2932 return QDF_STATUS_E_FAILURE;
Dhanashri Atre14049172016-11-11 18:32:36 -08002933 }
2934
2935 qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2936
Mohit Khanna16816ae2018-10-30 14:12:03 -07002937 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
2938 wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
Dhanashri Atre14049172016-11-11 18:32:36 -08002939 lro_hash.lro_enable = 1;
2940 lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2941 lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
Houston Hoffman41b912c2017-08-30 14:27:51 -07002942 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2943 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
Dhanashri Atre14049172016-11-11 18:32:36 -08002944 }
2945
Houston Hoffman41b912c2017-08-30 14:27:51 -07002946 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
Dhanashri Atre14049172016-11-11 18:32:36 -08002947 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2948 LRO_IPV4_SEED_ARR_SZ));
Dhanashri Atre14049172016-11-11 18:32:36 -08002949 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2950 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2951 LRO_IPV6_SEED_ARR_SZ));
2952
Dhanashri Atre14049172016-11-11 18:32:36 -08002953 qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2954
Mohit Khanna16816ae2018-10-30 14:12:03 -07002955 if (!soc->cdp_soc.ol_ops->lro_hash_config) {
2956 QDF_BUG(0);
2957 dp_err("lro_hash_config not configured");
2958 return QDF_STATUS_E_FAILURE;
2959 }
2960
2961 status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
2962 &lro_hash);
2963 if (!QDF_IS_STATUS_SUCCESS(status)) {
2964 dp_err("failed to send lro_hash_config to FW %u", status);
2965 return status;
2966 }
2967
2968 dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
2969 lro_hash.lro_enable, lro_hash.tcp_flag,
2970 lro_hash.tcp_flag_mask);
2971
2972 dp_info("toeplitz_hash_ipv4:");
2973 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2974 (void *)lro_hash.toeplitz_hash_ipv4,
2975 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2976 LRO_IPV4_SEED_ARR_SZ));
2977
2978 dp_info("toeplitz_hash_ipv6:");
2979 qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2980 (void *)lro_hash.toeplitz_hash_ipv6,
2981 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2982 LRO_IPV6_SEED_ARR_SZ));
2983
2984 return status;
Dhanashri Atre14049172016-11-11 18:32:36 -08002985}
2986
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002987/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002988* dp_rxdma_ring_setup() - configure the RX DMA rings
2989* @soc: data path SoC handle
2990* @pdev: Physical device handle
2991*
2992* Return: 0 - success, > 0 - failure
2993*/
2994#ifdef QCA_HOST2FW_RXBUF_RING
2995static int dp_rxdma_ring_setup(struct dp_soc *soc,
2996 struct dp_pdev *pdev)
2997{
Vivek126db5d2018-07-25 22:05:04 +05302998 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2999 int max_mac_rings;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003000 int i;
3001
Vivek126db5d2018-07-25 22:05:04 +05303002 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3003 max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
3004
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003005 for (i = 0; i < max_mac_rings; i++) {
Krunal Sonic96a1162019-02-21 11:33:26 -08003006 dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003007 if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
Vivek126db5d2018-07-25 22:05:04 +05303008 RXDMA_BUF, 1, i,
3009 wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx))) {
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003010 QDF_TRACE(QDF_MODULE_ID_DP,
3011 QDF_TRACE_LEVEL_ERROR,
3012 FL("failed rx mac ring setup"));
3013 return QDF_STATUS_E_FAILURE;
3014 }
3015 }
3016 return QDF_STATUS_SUCCESS;
3017}
3018#else
3019static int dp_rxdma_ring_setup(struct dp_soc *soc,
3020 struct dp_pdev *pdev)
3021{
3022 return QDF_STATUS_SUCCESS;
3023}
3024#endif
Ishank Jain949674c2017-02-27 17:09:29 +05303025
3026/**
3027 * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3028 * @pdev - DP_PDEV handle
3029 *
3030 * Return: void
3031 */
3032static inline void
3033dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3034{
3035 uint8_t map_id;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05303036 struct dp_soc *soc = pdev->soc;
3037
3038 if (!soc)
3039 return;
3040
Ishank Jain949674c2017-02-27 17:09:29 +05303041 for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05303042 qdf_mem_copy(pdev->dscp_tid_map[map_id],
3043 default_dscp_tid_map,
3044 sizeof(default_dscp_tid_map));
Ishank Jain949674c2017-02-27 17:09:29 +05303045 }
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05303046
3047 for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3048 hal_tx_set_dscp_tid_map(soc->hal_soc,
3049 default_dscp_tid_map,
3050 map_id);
Ishank Jain949674c2017-02-27 17:09:29 +05303051 }
3052}
3053
Debasis Dasc39a68d2019-01-28 17:02:06 +05303054/**
3055 * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3056 * @pdev - DP_PDEV handle
3057 *
3058 * Return: void
3059 */
3060static inline void
3061dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3062{
3063 struct dp_soc *soc = pdev->soc;
3064
3065 if (!soc)
3066 return;
3067
3068 qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3069 sizeof(default_pcp_tid_map));
3070 hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3071}
3072
Yun Park47e6af82018-01-17 12:15:01 -08003073#ifdef IPA_OFFLOAD
3074/**
3075 * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3076 * @soc: data path instance
3077 * @pdev: core txrx pdev context
3078 *
3079 * Return: QDF_STATUS_SUCCESS: success
3080 * QDF_STATUS_E_RESOURCES: Error return
3081 */
3082static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3083 struct dp_pdev *pdev)
3084{
Vivek126db5d2018-07-25 22:05:04 +05303085 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3086 int entries;
3087
3088 soc_cfg_ctx = soc->wlan_cfg_ctx;
3089 entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3090
Yun Park47e6af82018-01-17 12:15:01 -08003091 /* Setup second Rx refill buffer ring */
3092 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3093 IPA_RX_REFILL_BUF_RING_IDX,
Vivek126db5d2018-07-25 22:05:04 +05303094 pdev->pdev_id,
3095 entries)) {
Yun Park47e6af82018-01-17 12:15:01 -08003096 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3097 FL("dp_srng_setup failed second rx refill ring"));
3098 return QDF_STATUS_E_FAILURE;
3099 }
3100 return QDF_STATUS_SUCCESS;
3101}
3102
3103/**
3104 * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3105 * @soc: data path instance
3106 * @pdev: core txrx pdev context
3107 *
3108 * Return: void
3109 */
3110static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3111 struct dp_pdev *pdev)
3112{
3113 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3114 IPA_RX_REFILL_BUF_RING_IDX);
3115}
3116
3117#else
Yun Park47e6af82018-01-17 12:15:01 -08003118static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3119 struct dp_pdev *pdev)
3120{
3121 return QDF_STATUS_SUCCESS;
3122}
3123
3124static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3125 struct dp_pdev *pdev)
3126{
3127}
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003128#endif
Yun Park47e6af82018-01-17 12:15:01 -08003129
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003130#if !defined(DISABLE_MON_CONFIG)
3131/**
3132 * dp_mon_rings_setup() - Initialize Monitor rings based on target
3133 * @soc: soc handle
3134 * @pdev: physical device handle
3135 *
3136 * Return: nonzero on failure and zero on success
3137 */
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003138static
3139QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3140{
3141 int mac_id = 0;
3142 int pdev_id = pdev->pdev_id;
Vivek126db5d2018-07-25 22:05:04 +05303143 int entries;
3144 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3145
3146 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003147
3148 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3149 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3150
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003151 if (soc->wlan_cfg_ctx->rxdma1_enable) {
3152 entries =
3153 wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3154 if (dp_srng_setup(soc,
3155 &pdev->rxdma_mon_buf_ring[mac_id],
3156 RXDMA_MONITOR_BUF, 0, mac_for_pdev,
3157 entries)) {
3158 QDF_TRACE(QDF_MODULE_ID_DP,
3159 QDF_TRACE_LEVEL_ERROR,
3160 FL(RNG_ERR "rxdma_mon_buf_ring "));
3161 return QDF_STATUS_E_NOMEM;
3162 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003163
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003164 entries =
3165 wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3166 if (dp_srng_setup(soc,
3167 &pdev->rxdma_mon_dst_ring[mac_id],
3168 RXDMA_MONITOR_DST, 0, mac_for_pdev,
3169 entries)) {
3170 QDF_TRACE(QDF_MODULE_ID_DP,
3171 QDF_TRACE_LEVEL_ERROR,
3172 FL(RNG_ERR "rxdma_mon_dst_ring"));
3173 return QDF_STATUS_E_NOMEM;
3174 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003175
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003176 entries =
3177 wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3178 if (dp_srng_setup(soc,
3179 &pdev->rxdma_mon_status_ring[mac_id],
3180 RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3181 entries)) {
3182 QDF_TRACE(QDF_MODULE_ID_DP,
3183 QDF_TRACE_LEVEL_ERROR,
3184 FL(RNG_ERR "rxdma_mon_status_ring"));
3185 return QDF_STATUS_E_NOMEM;
3186 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003187
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003188 entries =
3189 wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3190 if (dp_srng_setup(soc,
3191 &pdev->rxdma_mon_desc_ring[mac_id],
3192 RXDMA_MONITOR_DESC, 0, mac_for_pdev,
3193 entries)) {
3194 QDF_TRACE(QDF_MODULE_ID_DP,
3195 QDF_TRACE_LEVEL_ERROR,
3196 FL(RNG_ERR "rxdma_mon_desc_ring"));
3197 return QDF_STATUS_E_NOMEM;
3198 }
3199 } else {
3200 entries =
3201 wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3202 if (dp_srng_setup(soc,
3203 &pdev->rxdma_mon_status_ring[mac_id],
3204 RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3205 entries)) {
3206 QDF_TRACE(QDF_MODULE_ID_DP,
3207 QDF_TRACE_LEVEL_ERROR,
3208 FL(RNG_ERR "rxdma_mon_status_ring"));
3209 return QDF_STATUS_E_NOMEM;
3210 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003211 }
3212 }
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003213
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003214 return QDF_STATUS_SUCCESS;
3215}
3216#else
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003217static
3218QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003219{
3220 return QDF_STATUS_SUCCESS;
3221}
Yun Park47e6af82018-01-17 12:15:01 -08003222#endif
3223
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303224/*dp_iterate_update_peer_list - update peer stats on cal client timer
3225 * @pdev_hdl: pdev handle
3226 */
3227#ifdef ATH_SUPPORT_EXT_STAT
3228void dp_iterate_update_peer_list(void *pdev_hdl)
3229{
3230 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
Amir Patelee49ad52018-12-18 13:23:36 +05303231 struct dp_soc *soc = pdev->soc;
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303232 struct dp_vdev *vdev = NULL;
3233 struct dp_peer *peer = NULL;
3234
Amir Patel594a3d02018-12-27 12:43:45 +05303235 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Amir Patel17b91782019-01-08 12:17:15 +05303236 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303237 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3238 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3239 dp_cal_client_update_peer_stats(&peer->stats);
3240 }
3241 }
Amir Patelee49ad52018-12-18 13:23:36 +05303242 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Amir Patel17b91782019-01-08 12:17:15 +05303243 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303244}
3245#else
3246void dp_iterate_update_peer_list(void *pdev_hdl)
3247{
3248}
3249#endif
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003250/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003251* dp_pdev_attach_wifi3() - attach txrx pdev
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05303252* @ctrl_pdev: Opaque PDEV object
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003253* @txrx_soc: Datapath SOC handle
3254* @htc_handle: HTC handle for host-target interface
3255* @qdf_osdev: QDF OS device
3256* @pdev_id: PDEV ID
3257*
3258* Return: DP PDEV handle on success, NULL on failure
3259*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003260static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05303261 struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -07003262 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003263{
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05303264 int tx_ring_size;
3265 int tx_comp_ring_size;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05303266 int reo_dst_ring_size;
Vivek126db5d2018-07-25 22:05:04 +05303267 int entries;
3268 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3269 int nss_cfg;
Amir Patelac7d9462019-03-28 16:16:01 +05303270 void *sojourn_buf;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05303271
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003272 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303273 struct dp_pdev *pdev = NULL;
3274
phadimana1f79822019-02-15 15:02:37 +05303275 if (dp_is_soc_reinit(soc))
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303276 pdev = soc->pdev_list[pdev_id];
3277 else
3278 pdev = qdf_mem_malloc(sizeof(*pdev));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003279
3280 if (!pdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303281 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3282 FL("DP PDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003283 goto fail0;
3284 }
3285
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303286 /*
3287 * Variable to prevent double pdev deinitialization during
3288 * radio detach execution .i.e. in the absence of any vdev.
3289 */
3290 pdev->pdev_deinit = 0;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +05303291 pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3292
3293 if (!pdev->invalid_peer) {
3294 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3295 FL("Invalid peer memory allocation failed"));
3296 qdf_mem_free(pdev);
3297 goto fail0;
3298 }
3299
Vivek126db5d2018-07-25 22:05:04 +05303300 soc_cfg_ctx = soc->wlan_cfg_ctx;
3301 pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303302
3303 if (!pdev->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303304 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3305 FL("pdev cfg_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303306
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +05303307 qdf_mem_free(pdev->invalid_peer);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303308 qdf_mem_free(pdev);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303309 goto fail0;
3310 }
3311
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303312 /*
3313 * set nss pdev config based on soc config
3314 */
Vivek126db5d2018-07-25 22:05:04 +05303315 nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303316 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
Vivek126db5d2018-07-25 22:05:04 +05303317 (nss_cfg & (1 << pdev_id)));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303318
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003319 pdev->soc = soc;
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05303320 pdev->ctrl_pdev = ctrl_pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003321 pdev->pdev_id = pdev_id;
3322 soc->pdev_list[pdev_id] = pdev;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303323
3324 pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08003325 soc->pdev_count++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003326
3327 TAILQ_INIT(&pdev->vdev_list);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303328 qdf_spinlock_create(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003329 pdev->vdev_count = 0;
3330
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05303331 qdf_spinlock_create(&pdev->tx_mutex);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303332 qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3333 TAILQ_INIT(&pdev->neighbour_peers_list);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05303334 pdev->neighbour_peers_added = false;
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05303335 pdev->monitor_configured = false;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303336
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003337 if (dp_soc_cmn_setup(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303338 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3339 FL("dp_soc_cmn_setup failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303340 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003341 }
3342
3343 /* Setup per PDEV TCL rings if configured */
3344 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05303345 tx_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05303346 wlan_cfg_tx_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05303347 tx_comp_ring_size =
Vivek126db5d2018-07-25 22:05:04 +05303348 wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05303349
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003350 if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05303351 pdev_id, pdev_id, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303352 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3353 FL("dp_srng_setup failed for tcl_data_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303354 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003355 }
3356 if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05303357 WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303358 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3359 FL("dp_srng_setup failed for tx_comp_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303360 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003361 }
3362 soc->num_tcl_data_rings++;
3363 }
3364
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303365 /* Tx specific init */
3366 if (dp_tx_pdev_attach(pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303367 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3368 FL("dp_tx_pdev_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303369 goto fail1;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303370 }
3371
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05303372 reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003373 /* Setup per PDEV REO rings if configured */
Vivek126db5d2018-07-25 22:05:04 +05303374 if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003375 if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +05303376 pdev_id, pdev_id, reo_dst_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303377 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3378 FL("dp_srng_setup failed for reo_dest_ringn"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303379 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003380 }
3381 soc->num_reo_dest_rings++;
3382
3383 }
Dhanashri Atre7351d172016-10-12 13:08:09 -07003384 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
Vivek126db5d2018-07-25 22:05:04 +05303385 wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx))) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303386 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3387 FL("dp_srng_setup failed rx refill ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303388 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003389 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003390
3391 if (dp_rxdma_ring_setup(soc, pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303392 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003393 FL("RXDMA ring config failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303394 goto fail1;
Dhanashri Atre7351d172016-10-12 13:08:09 -07003395 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003396
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003397 if (dp_mon_rings_setup(soc, pdev)) {
3398 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3399 FL("MONITOR rings setup failed"));
3400 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08003401 }
3402
Vivek126db5d2018-07-25 22:05:04 +05303403 entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003404 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3405 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
Vivek126db5d2018-07-25 22:05:04 +05303406 0, pdev_id,
3407 entries)) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003408 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Vivek126db5d2018-07-25 22:05:04 +05303409 FL(RNG_ERR "rxdma_err_dst_ring"));
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003410 goto fail1;
3411 }
Pramod Simhae382ff82017-06-05 18:09:26 -07003412 }
3413
Yun Park47e6af82018-01-17 12:15:01 -08003414 if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
Yun Park601d0d82017-08-28 21:49:31 -07003415 goto fail1;
Yun Park601d0d82017-08-28 21:49:31 -07003416
Yun Parkfde6b9e2017-06-26 17:13:11 -07003417 if (dp_ipa_ring_resource_setup(soc, pdev))
3418 goto fail1;
3419
3420 if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
Yun Park601d0d82017-08-28 21:49:31 -07003421 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3422 FL("dp_ipa_uc_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07003423 goto fail1;
3424 }
3425
Leo Chang5ea93a42016-11-03 12:39:49 -07003426 /* Rx specific init */
3427 if (dp_rx_pdev_attach(pdev)) {
Yun Parkfde6b9e2017-06-26 17:13:11 -07003428 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003429 FL("dp_rx_pdev_attach failed"));
3430 goto fail1;
Leo Chang5ea93a42016-11-03 12:39:49 -07003431 }
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003432
Ishank Jainbc2d91f2017-01-03 18:14:54 +05303433 DP_STATS_INIT(pdev);
Leo Chang5ea93a42016-11-03 12:39:49 -07003434
nobeljd124b742017-10-16 11:59:12 -07003435 /* Monitor filter init */
3436 pdev->mon_filter_mode = MON_FILTER_ALL;
3437 pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3438 pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3439 pdev->fp_data_filter = FILTER_DATA_ALL;
3440 pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3441 pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3442 pdev->mo_data_filter = FILTER_DATA_ALL;
3443
Leo Chang5ea93a42016-11-03 12:39:49 -07003444 dp_local_peer_id_pool_init(pdev);
Sravan Kumar Kairamf1e07662018-06-18 21:36:14 +05303445
Ishank Jain949674c2017-02-27 17:09:29 +05303446 dp_dscp_tid_map_setup(pdev);
Debasis Dasc39a68d2019-01-28 17:02:06 +05303447 dp_pcp_tid_map_setup(pdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003448
Kai Chen6eca1a62017-01-12 10:17:53 -08003449 /* Rx monitor mode specific init */
3450 if (dp_rx_pdev_mon_attach(pdev)) {
3451 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003452 "dp_rx_pdev_mon_attach failed");
Keyur Parekhfad6d082017-05-07 08:54:47 -07003453 goto fail1;
3454 }
3455
3456 if (dp_wdi_event_attach(pdev)) {
3457 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303458 "dp_wdi_evet_attach failed");
Keyur Parekhfad6d082017-05-07 08:54:47 -07003459 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08003460 }
3461
Om Prakash Tripathia7fb93f2017-06-27 18:41:41 +05303462 /* set the reo destination during initialization */
3463 pdev->reo_dest = pdev->pdev_id + 1;
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05303464
Anish Natarajb9e7d012018-02-16 00:38:10 +05303465 /*
3466 * initialize ppdu tlv list
3467 */
3468 TAILQ_INIT(&pdev->ppdu_info_list);
3469 pdev->tlv_count = 0;
3470 pdev->list_depth = 0;
3471
Ruchi, Agrawal2cbca3b2018-06-20 19:31:03 +05303472 qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3473
3474 pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3475 sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3476 TRUE);
3477
Amir Patelac7d9462019-03-28 16:16:01 +05303478 if (pdev->sojourn_buf) {
3479 sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
3480 qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
3481 }
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303482 /* initlialize cal client timer */
3483 dp_cal_client_attach(&pdev->cal_client_ctx, pdev, pdev->soc->osdev,
3484 &dp_iterate_update_peer_list);
Amir Patel1ea85d42019-01-09 15:19:10 +05303485 qdf_event_create(&pdev->fw_peer_stats_event);
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303486
Shashikala Prabhu550e69c2019-03-13 17:41:17 +05303487 pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3488
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003489 return (struct cdp_pdev *)pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003490
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303491fail1:
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303492 dp_pdev_detach((struct cdp_pdev *)pdev, 0);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05303493
3494fail0:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003495 return NULL;
3496}
3497
3498/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003499* dp_rxdma_ring_cleanup() - configure the RX DMA rings
3500* @soc: data path SoC handle
3501* @pdev: Physical device handle
3502*
3503* Return: void
3504*/
3505#ifdef QCA_HOST2FW_RXBUF_RING
3506static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3507 struct dp_pdev *pdev)
3508{
3509 int max_mac_rings =
3510 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
3511 int i;
3512
3513 max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
3514 max_mac_rings : MAX_RX_MAC_RINGS;
3515 for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3516 dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3517 RXDMA_BUF, 1);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003518
3519 qdf_timer_free(&soc->mon_reap_timer);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003520}
3521#else
3522static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3523 struct dp_pdev *pdev)
3524{
3525}
3526#endif
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303527
3528/*
3529 * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3530 * @pdev: device object
3531 *
3532 * Return: void
3533 */
3534static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3535{
3536 struct dp_neighbour_peer *peer = NULL;
3537 struct dp_neighbour_peer *temp_peer = NULL;
3538
3539 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3540 neighbour_peer_list_elem, temp_peer) {
3541 /* delete this peer from the list */
3542 TAILQ_REMOVE(&pdev->neighbour_peers_list,
3543 peer, neighbour_peer_list_elem);
3544 qdf_mem_free(peer);
3545 }
3546
3547 qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3548}
3549
Anish Natarajcf526b72018-03-26 15:55:30 +05303550/**
3551* dp_htt_ppdu_stats_detach() - detach stats resources
3552* @pdev: Datapath PDEV handle
3553*
3554* Return: void
3555*/
3556static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3557{
3558 struct ppdu_info *ppdu_info, *ppdu_info_next;
3559
3560 TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3561 ppdu_info_list_elem, ppdu_info_next) {
3562 if (!ppdu_info)
3563 break;
3564 qdf_assert_always(ppdu_info->nbuf);
3565 qdf_nbuf_free(ppdu_info->nbuf);
3566 qdf_mem_free(ppdu_info);
3567 }
3568}
3569
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003570#if !defined(DISABLE_MON_CONFIG)
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303571
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003572static
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303573void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3574 int mac_id)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003575{
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003576 if (soc->wlan_cfg_ctx->rxdma1_enable) {
3577 dp_srng_cleanup(soc,
3578 &pdev->rxdma_mon_buf_ring[mac_id],
3579 RXDMA_MONITOR_BUF, 0);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003580
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003581 dp_srng_cleanup(soc,
3582 &pdev->rxdma_mon_dst_ring[mac_id],
3583 RXDMA_MONITOR_DST, 0);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003584
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07003585 dp_srng_cleanup(soc,
3586 &pdev->rxdma_mon_status_ring[mac_id],
3587 RXDMA_MONITOR_STATUS, 0);
3588
3589 dp_srng_cleanup(soc,
3590 &pdev->rxdma_mon_desc_ring[mac_id],
3591 RXDMA_MONITOR_DESC, 0);
3592
3593 dp_srng_cleanup(soc,
3594 &pdev->rxdma_err_dst_ring[mac_id],
3595 RXDMA_DST, 0);
3596 } else {
3597 dp_srng_cleanup(soc,
3598 &pdev->rxdma_mon_status_ring[mac_id],
3599 RXDMA_MONITOR_STATUS, 0);
3600
3601 dp_srng_cleanup(soc,
3602 &pdev->rxdma_err_dst_ring[mac_id],
3603 RXDMA_DST, 0);
3604 }
3605
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003606}
3607#else
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303608static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3609 int mac_id)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003610{
3611}
3612#endif
3613
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303614/**
3615 * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3616 *
3617 * @soc: soc handle
3618 * @pdev: datapath physical dev handle
3619 * @mac_id: mac number
3620 *
3621 * Return: None
3622 */
3623static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3624 int mac_id)
3625{
3626}
3627
3628/**
3629 * dp_pdev_mem_reset() - Reset txrx pdev memory
3630 * @pdev: dp pdev handle
3631 *
3632 * Return: None
3633 */
3634static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3635{
3636 uint16_t len = 0;
3637 uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3638
3639 len = sizeof(struct dp_pdev) -
3640 offsetof(struct dp_pdev, pdev_deinit) -
3641 sizeof(pdev->pdev_deinit);
3642 dp_pdev_offset = dp_pdev_offset +
3643 offsetof(struct dp_pdev, pdev_deinit) +
3644 sizeof(pdev->pdev_deinit);
3645
3646 qdf_mem_zero(dp_pdev_offset, len);
3647}
3648
3649/**
3650 * dp_pdev_deinit() - Deinit txrx pdev
3651 * @txrx_pdev: Datapath PDEV handle
3652 * @force: Force deinit
3653 *
3654 * Return: None
3655 */
3656static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003657{
3658 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3659 struct dp_soc *soc = pdev->soc;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303660 qdf_nbuf_t curr_nbuf, next_nbuf;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003661 int mac_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003662
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303663 /*
3664 * Prevent double pdev deinitialization during radio detach
3665 * execution .i.e. in the absence of any vdev
3666 */
3667 if (pdev->pdev_deinit)
3668 return;
3669
3670 pdev->pdev_deinit = 1;
3671
Keyur Parekhfad6d082017-05-07 08:54:47 -07003672 dp_wdi_event_detach(pdev);
3673
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303674 dp_tx_pdev_detach(pdev);
3675
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003676 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303677 dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3678 TCL_DATA, pdev->pdev_id);
3679 dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3680 WBM2SW_RELEASE, pdev->pdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003681 }
3682
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003683 dp_pktlogmod_exit(pdev);
3684
Leo Chang5ea93a42016-11-03 12:39:49 -07003685 dp_rx_pdev_detach(pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08003686 dp_rx_pdev_mon_detach(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303687 dp_neighbour_peers_detach(pdev);
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05303688 qdf_spinlock_destroy(&pdev->tx_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05303689 qdf_spinlock_destroy(&pdev->vdev_list_lock);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303690
Yun Parkfde6b9e2017-06-26 17:13:11 -07003691 dp_ipa_uc_detach(soc, pdev);
3692
Yun Park47e6af82018-01-17 12:15:01 -08003693 dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
Yun Park601d0d82017-08-28 21:49:31 -07003694
Yun Parkfde6b9e2017-06-26 17:13:11 -07003695 /* Cleanup per PDEV REO rings if configured */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003696 if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303697 dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3698 REO_DST, pdev->pdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003699 }
3700
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303701 dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003702
Dhanashri Atred4032ab2017-01-17 15:05:41 -08003703 dp_rxdma_ring_cleanup(soc, pdev);
3704
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08003705 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07003706 dp_mon_ring_deinit(soc, pdev, mac_id);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303707 dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3708 RXDMA_DST, 0);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08003709 }
Pramod Simhae382ff82017-06-05 18:09:26 -07003710
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303711 curr_nbuf = pdev->invalid_peer_head_msdu;
3712 while (curr_nbuf) {
3713 next_nbuf = qdf_nbuf_next(curr_nbuf);
3714 qdf_nbuf_free(curr_nbuf);
3715 curr_nbuf = next_nbuf;
3716 }
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303717 pdev->invalid_peer_head_msdu = NULL;
3718 pdev->invalid_peer_tail_msdu = NULL;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05303719
Anish Natarajcf526b72018-03-26 15:55:30 +05303720 dp_htt_ppdu_stats_detach(pdev);
3721
Ruchi, Agrawal2cbca3b2018-06-20 19:31:03 +05303722 qdf_nbuf_free(pdev->sojourn_buf);
3723
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05303724 dp_cal_client_detach(&pdev->cal_client_ctx);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303725
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08003726 soc->pdev_count--;
Manikandan Mohanb01696b2017-05-09 18:03:19 -07003727 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +05303728 qdf_mem_free(pdev->invalid_peer);
Santosh Anbu2280e862018-01-03 22:25:53 +05303729 qdf_mem_free(pdev->dp_txrx_handle);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303730 dp_pdev_mem_reset(pdev);
3731}
3732
3733/**
3734 * dp_pdev_deinit_wifi3() - Deinit txrx pdev
3735 * @txrx_pdev: Datapath PDEV handle
3736 * @force: Force deinit
3737 *
3738 * Return: None
3739 */
3740static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
3741{
phadiman449a2682019-02-20 14:00:00 +05303742 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3743 struct dp_soc *soc = pdev->soc;
3744
3745 soc->dp_soc_reinit = TRUE;
3746
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303747 dp_pdev_deinit(txrx_pdev, force);
3748}
3749
3750/*
3751 * dp_pdev_detach() - Complete rest of pdev detach
3752 * @txrx_pdev: Datapath PDEV handle
3753 * @force: Force deinit
3754 *
3755 * Return: None
3756 */
3757static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
3758{
3759 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3760 struct dp_soc *soc = pdev->soc;
phadiman7dd261d2019-03-15 01:48:50 +05303761 struct rx_desc_pool *rx_desc_pool;
3762 int mac_id, mac_for_pdev;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303763
3764 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3765 dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3766 TCL_DATA, pdev->pdev_id);
3767 dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3768 WBM2SW_RELEASE, pdev->pdev_id);
3769 }
3770
3771 dp_mon_link_free(pdev);
3772
3773 /* Cleanup per PDEV REO rings if configured */
3774 if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3775 dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
3776 REO_DST, pdev->pdev_id);
3777 }
3778
3779 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3780
3781 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3782 dp_mon_ring_cleanup(soc, pdev, mac_id);
3783 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
3784 RXDMA_DST, 0);
phadiman7dd261d2019-03-15 01:48:50 +05303785 if (dp_is_soc_reinit(soc)) {
3786 mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
3787 pdev->pdev_id);
3788 rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
3789 dp_rx_desc_free_array(soc, rx_desc_pool);
3790 rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev];
3791 dp_rx_desc_free_array(soc, rx_desc_pool);
3792 }
3793 }
3794
3795 if (dp_is_soc_reinit(soc)) {
3796 rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
3797 dp_rx_desc_free_array(soc, rx_desc_pool);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303798 }
3799
3800 soc->pdev_list[pdev->pdev_id] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003801 qdf_mem_free(pdev);
3802}
3803
3804/*
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303805 * dp_pdev_detach_wifi3() - detach txrx pdev
3806 * @txrx_pdev: Datapath PDEV handle
3807 * @force: Force detach
3808 *
3809 * Return: None
3810 */
3811static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
3812{
3813 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3814 struct dp_soc *soc = pdev->soc;
3815
phadimana1f79822019-02-15 15:02:37 +05303816 if (dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303817 dp_pdev_detach(txrx_pdev, force);
3818 } else {
3819 dp_pdev_deinit(txrx_pdev, force);
3820 dp_pdev_detach(txrx_pdev, force);
3821 }
3822}
3823
3824/*
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003825 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3826 * @soc: DP SOC handle
3827 */
3828static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3829{
3830 struct reo_desc_list_node *desc;
3831 struct dp_rx_tid *rx_tid;
3832
3833 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
3834 while (qdf_list_remove_front(&soc->reo_desc_freelist,
3835 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
3836 rx_tid = &desc->rx_tid;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003837 qdf_mem_unmap_nbytes_single(soc->osdev,
Pramod Simha6b23f752017-03-30 11:54:18 -07003838 rx_tid->hw_qdesc_paddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08003839 QDF_DMA_BIDIRECTIONAL,
3840 rx_tid->hw_qdesc_alloc_size);
3841 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08003842 qdf_mem_free(desc);
3843 }
3844 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
3845 qdf_list_destroy(&soc->reo_desc_freelist);
3846 qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
3847}
3848
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303849/**
3850 * dp_soc_mem_reset() - Reset Dp Soc memory
3851 * @soc: DP handle
3852 *
3853 * Return: None
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003854 */
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303855static void dp_soc_mem_reset(struct dp_soc *soc)
3856{
3857 uint16_t len = 0;
3858 uint8_t *dp_soc_offset = (uint8_t *)soc;
3859
3860 len = sizeof(struct dp_soc) -
3861 offsetof(struct dp_soc, dp_soc_reinit) -
3862 sizeof(soc->dp_soc_reinit);
3863 dp_soc_offset = dp_soc_offset +
3864 offsetof(struct dp_soc, dp_soc_reinit) +
3865 sizeof(soc->dp_soc_reinit);
3866
3867 qdf_mem_zero(dp_soc_offset, len);
3868}
3869
3870/**
3871 * dp_soc_deinit() - Deinitialize txrx SOC
3872 * @txrx_soc: Opaque DP SOC handle
3873 *
3874 * Return: None
3875 */
3876static void dp_soc_deinit(void *txrx_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003877{
3878 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003879 int i;
3880
Ravi Joshi86e98262017-03-01 13:47:03 -08003881 qdf_atomic_set(&soc->cmn_init_done, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003882
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303883 for (i = 0; i < MAX_PDEV_CNT; i++) {
3884 if (soc->pdev_list[i])
3885 dp_pdev_deinit((struct cdp_pdev *)
3886 soc->pdev_list[i], 1);
3887 }
3888
Dustin Brownf653d162017-09-19 11:29:41 -07003889 qdf_flush_work(&soc->htt_stats.work);
3890 qdf_disable_work(&soc->htt_stats.work);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303891
3892 /* Free pending htt stats messages */
3893 qdf_nbuf_queue_free(&soc->htt_stats.msg);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05303894
Chaithanya Garrepalli291dfa02018-10-12 17:11:34 +05303895 dp_reo_cmdlist_destroy(soc);
3896
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303897 dp_peer_find_detach(soc);
3898
3899 /* Free the ring memories */
3900 /* Common rings */
3901 dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
3902
3903 /* Tx data rings */
3904 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3905 for (i = 0; i < soc->num_tcl_data_rings; i++) {
3906 dp_srng_deinit(soc, &soc->tcl_data_ring[i],
3907 TCL_DATA, i);
3908 dp_srng_deinit(soc, &soc->tx_comp_ring[i],
3909 WBM2SW_RELEASE, i);
3910 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003911 }
3912
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303913 /* TCL command and status rings */
3914 dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
3915 dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
3916
3917 /* Rx data rings */
3918 if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3919 soc->num_reo_dest_rings =
3920 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
3921 for (i = 0; i < soc->num_reo_dest_rings; i++) {
3922 /* TODO: Get number of rings and ring sizes
3923 * from wlan_cfg
3924 */
3925 dp_srng_deinit(soc, &soc->reo_dest_ring[i],
3926 REO_DST, i);
3927 }
3928 }
3929 /* REO reinjection ring */
3930 dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
3931
3932 /* Rx release ring */
3933 dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
3934
3935 /* Rx exception ring */
3936 /* TODO: Better to store ring_type and ring_num in
3937 * dp_srng during setup
3938 */
3939 dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
3940
3941 /* REO command and status rings */
3942 dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
3943 dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
3944
Pamidipati, Vijay8a4c03a2018-12-08 12:52:38 +05303945 dp_soc_wds_detach(soc);
3946
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303947 qdf_spinlock_destroy(&soc->peer_ref_mutex);
3948 qdf_spinlock_destroy(&soc->htt_stats.lock);
3949
Mohit Khanna40f76b52018-11-30 14:10:55 -08003950 htt_soc_htc_dealloc(soc->htt_handle);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303951
3952 qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3953
3954 dp_reo_cmdlist_destroy(soc);
3955 qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3956 dp_reo_desc_freelist_destroy(soc);
3957
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303958 qdf_spinlock_destroy(&soc->ast_lock);
3959
3960 dp_soc_mem_reset(soc);
3961}
3962
3963/**
3964 * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
3965 * @txrx_soc: Opaque DP SOC handle
3966 *
3967 * Return: None
3968 */
3969static void dp_soc_deinit_wifi3(void *txrx_soc)
3970{
3971 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3972
3973 soc->dp_soc_reinit = 1;
3974 dp_soc_deinit(txrx_soc);
3975}
3976
3977/*
3978 * dp_soc_detach() - Detach rest of txrx SOC
3979 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
3980 *
3981 * Return: None
3982 */
3983static void dp_soc_detach(void *txrx_soc)
3984{
3985 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3986 int i;
3987
3988 qdf_atomic_set(&soc->cmn_init_done, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003989
3990 /* TBD: Call Tx and Rx cleanup functions to free buffers and
3991 * SW descriptors
3992 */
3993
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05303994 for (i = 0; i < MAX_PDEV_CNT; i++) {
3995 if (soc->pdev_list[i])
3996 dp_pdev_detach((struct cdp_pdev *)
3997 soc->pdev_list[i], 1);
3998 }
3999
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004000 /* Free the ring memories */
4001 /* Common rings */
4002 dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4003
Manikandan Mohanb01696b2017-05-09 18:03:19 -07004004 dp_tx_soc_detach(soc);
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304005
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004006 /* Tx data rings */
4007 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4008 for (i = 0; i < soc->num_tcl_data_rings; i++) {
4009 dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
4010 TCL_DATA, i);
4011 dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
4012 WBM2SW_RELEASE, i);
4013 }
4014 }
4015
4016 /* TCL command and status rings */
4017 dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4018 dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4019
4020 /* Rx data rings */
4021 if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4022 soc->num_reo_dest_rings =
4023 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4024 for (i = 0; i < soc->num_reo_dest_rings; i++) {
4025 /* TODO: Get number of rings and ring sizes
4026 * from wlan_cfg
4027 */
4028 dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
4029 REO_DST, i);
4030 }
4031 }
4032 /* REO reinjection ring */
4033 dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4034
4035 /* Rx release ring */
4036 dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4037
4038 /* Rx exception ring */
4039 /* TODO: Better to store ring_type and ring_num in
4040 * dp_srng during setup
4041 */
4042 dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4043
4044 /* REO command and status rings */
4045 dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4046 dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -07004047 dp_hw_link_desc_pool_cleanup(soc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08004048
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004049 htt_soc_detach(soc->htt_handle);
Mohit Khanna40f76b52018-11-30 14:10:55 -08004050 soc->dp_soc_reinit = 0;
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08004051
Manikandan Mohanb01696b2017-05-09 18:03:19 -07004052 wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05304053
Venkata Sharath Chandra Manchala65bf2302017-03-09 17:28:56 -08004054 qdf_mem_free(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004055}
4056
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304057/*
4058 * dp_soc_detach_wifi3() - Detach txrx SOC
4059 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4060 *
4061 * Return: None
4062 */
4063static void dp_soc_detach_wifi3(void *txrx_soc)
4064{
4065 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4066
phadimana1f79822019-02-15 15:02:37 +05304067 if (dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304068 dp_soc_detach(txrx_soc);
4069 } else {
4070 dp_soc_deinit(txrx_soc);
4071 dp_soc_detach(txrx_soc);
4072 }
4073
4074}
4075
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004076#if !defined(DISABLE_MON_CONFIG)
4077/**
4078 * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4079 * @soc: soc handle
4080 * @pdev: physical device handle
4081 * @mac_id: ring number
4082 * @mac_for_pdev: mac_id
4083 *
4084 * Return: non-zero for failure, zero for success
4085 */
4086static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4087 struct dp_pdev *pdev,
4088 int mac_id,
4089 int mac_for_pdev)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004090{
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004091 QDF_STATUS status = QDF_STATUS_SUCCESS;
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004092
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004093 if (soc->wlan_cfg_ctx->rxdma1_enable) {
4094 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4095 pdev->rxdma_mon_buf_ring[mac_id]
4096 .hal_srng,
4097 RXDMA_MONITOR_BUF);
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004098
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004099 if (status != QDF_STATUS_SUCCESS) {
4100 dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4101 return status;
4102 }
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004103
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004104 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4105 pdev->rxdma_mon_dst_ring[mac_id]
4106 .hal_srng,
4107 RXDMA_MONITOR_DST);
4108
4109 if (status != QDF_STATUS_SUCCESS) {
4110 dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4111 return status;
4112 }
4113
4114 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4115 pdev->rxdma_mon_status_ring[mac_id]
4116 .hal_srng,
4117 RXDMA_MONITOR_STATUS);
4118
4119 if (status != QDF_STATUS_SUCCESS) {
4120 dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4121 return status;
4122 }
4123
4124 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4125 pdev->rxdma_mon_desc_ring[mac_id]
4126 .hal_srng,
4127 RXDMA_MONITOR_DESC);
4128
4129 if (status != QDF_STATUS_SUCCESS) {
4130 dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4131 return status;
4132 }
4133 } else {
4134 status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4135 pdev->rxdma_mon_status_ring[mac_id]
4136 .hal_srng,
4137 RXDMA_MONITOR_STATUS);
4138
4139 if (status != QDF_STATUS_SUCCESS) {
4140 dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4141 return status;
4142 }
4143 }
4144
4145 return status;
4146
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004147}
4148#else
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004149static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4150 struct dp_pdev *pdev,
4151 int mac_id,
4152 int mac_for_pdev)
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004153{
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004154 return QDF_STATUS_SUCCESS;
Venkata Sharath Chandra Manchala30e442b2018-06-26 12:29:24 -07004155}
4156#endif
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004157
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004158/*
Yun Parkfde6b9e2017-06-26 17:13:11 -07004159 * dp_rxdma_ring_config() - configure the RX DMA rings
4160 *
4161 * This function is used to configure the MAC rings.
4162 * On MCL host provides buffers in Host2FW ring
4163 * FW refills (copies) buffers to the ring and updates
4164 * ring_idx in register
4165 *
4166 * @soc: data path SoC handle
4167 *
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004168 * Return: zero on success, non-zero on failure
Yun Parkfde6b9e2017-06-26 17:13:11 -07004169 */
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004170#ifdef QCA_HOST2FW_RXBUF_RING
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004171static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004172{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004173 int i;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004174 QDF_STATUS status = QDF_STATUS_SUCCESS;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004175 for (i = 0; i < MAX_PDEV_CNT; i++) {
4176 struct dp_pdev *pdev = soc->pdev_list[i];
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004177
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004178 if (pdev) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004179 int mac_id;
Dhanashri Atre398935e2017-03-31 15:34:28 -07004180 bool dbs_enable = 0;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004181 int max_mac_rings =
4182 wlan_cfg_get_num_mac_rings
4183 (pdev->wlan_cfg_ctx);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004184
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004185 htt_srng_setup(soc->htt_handle, 0,
4186 pdev->rx_refill_buf_ring.hal_srng,
4187 RXDMA_BUF);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004188
Yun Park601d0d82017-08-28 21:49:31 -07004189 if (pdev->rx_refill_buf_ring2.hal_srng)
4190 htt_srng_setup(soc->htt_handle, 0,
4191 pdev->rx_refill_buf_ring2.hal_srng,
4192 RXDMA_BUF);
Yun Parkfde6b9e2017-06-26 17:13:11 -07004193
Dhanashri Atre2c6381d2017-03-30 19:33:52 -07004194 if (soc->cdp_soc.ol_ops->
4195 is_hw_dbs_2x2_capable) {
Dhanashri Atre398935e2017-03-31 15:34:28 -07004196 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05304197 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Dhanashri Atre398935e2017-03-31 15:34:28 -07004198 }
4199
4200 if (dbs_enable) {
4201 QDF_TRACE(QDF_MODULE_ID_TXRX,
4202 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304203 FL("DBS enabled max_mac_rings %d"),
Dhanashri Atre398935e2017-03-31 15:34:28 -07004204 max_mac_rings);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004205 } else {
Dhanashri Atre398935e2017-03-31 15:34:28 -07004206 max_mac_rings = 1;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004207 QDF_TRACE(QDF_MODULE_ID_TXRX,
4208 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304209 FL("DBS disabled, max_mac_rings %d"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004210 max_mac_rings);
4211 }
4212
4213 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304214 FL("pdev_id %d max_mac_rings %d"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004215 pdev->pdev_id, max_mac_rings);
4216
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004217 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4218 int mac_for_pdev = dp_get_mac_id_for_pdev(
4219 mac_id, pdev->pdev_id);
4220
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004221 QDF_TRACE(QDF_MODULE_ID_TXRX,
4222 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304223 FL("mac_id %d"), mac_for_pdev);
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004224
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004225 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4226 pdev->rx_mac_buf_ring[mac_id]
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004227 .hal_srng,
4228 RXDMA_BUF);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004229 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4230 pdev->rxdma_err_dst_ring[mac_id]
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08004231 .hal_srng,
4232 RXDMA_DST);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004233
4234 /* Configure monitor mode rings */
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004235 status = dp_mon_htt_srng_setup(soc, pdev,
4236 mac_id,
4237 mac_for_pdev);
4238 if (status != QDF_STATUS_SUCCESS) {
4239 dp_err("Failed to send htt monitor messages to target");
4240 return status;
4241 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004242
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004243 }
4244 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004245 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004246
4247 /*
4248 * Timer to reap rxdma status rings.
4249 * Needed until we enable ppdu end interrupts
4250 */
4251 qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4252 dp_service_mon_rings, (void *)soc,
4253 QDF_TIMER_TYPE_WAKE_APPS);
4254 soc->reap_timer_init = 1;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004255 return status;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004256}
4257#else
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004258/* This is only for WIN */
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004259static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004260{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004261 int i;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004262 int mac_id;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004263 QDF_STATUS status = QDF_STATUS_SUCCESS;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004264
4265 for (i = 0; i < MAX_PDEV_CNT; i++) {
4266 struct dp_pdev *pdev = soc->pdev_list[i];
4267
Jeff Johnsona8edf332019-03-18 09:51:52 -07004268 if (!pdev)
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004269 continue;
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08004270
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004271 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4272 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4273
4274 htt_srng_setup(soc->htt_handle, mac_for_pdev,
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004275 pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05304276#ifndef DISABLE_MON_CONFIG
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004277 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4278 pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4279 RXDMA_MONITOR_BUF);
4280 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4281 pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4282 RXDMA_MONITOR_DST);
4283 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4284 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08004285 RXDMA_MONITOR_STATUS);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004286 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4287 pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
Kai Chen6eca1a62017-01-12 10:17:53 -08004288 RXDMA_MONITOR_DESC);
Pratik Gandhi4cce3e02018-09-05 19:43:11 +05304289#endif
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08004290 htt_srng_setup(soc->htt_handle, mac_for_pdev,
4291 pdev->rxdma_err_dst_ring[mac_id].hal_srng,
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08004292 RXDMA_DST);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08004293 }
4294 }
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004295 return status;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004296}
4297#endif
4298
Kiran Venkatappa07921612019-03-02 23:14:12 +05304299#ifdef NO_RX_PKT_HDR_TLV
4300static QDF_STATUS
4301dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4302{
4303 int i;
4304 int mac_id;
4305 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4306 QDF_STATUS status = QDF_STATUS_SUCCESS;
4307
4308 htt_tlv_filter.mpdu_start = 1;
4309 htt_tlv_filter.msdu_start = 1;
4310 htt_tlv_filter.mpdu_end = 1;
4311 htt_tlv_filter.msdu_end = 1;
4312 htt_tlv_filter.attention = 1;
4313 htt_tlv_filter.packet = 1;
4314 htt_tlv_filter.packet_header = 0;
4315
4316 htt_tlv_filter.ppdu_start = 0;
4317 htt_tlv_filter.ppdu_end = 0;
4318 htt_tlv_filter.ppdu_end_user_stats = 0;
4319 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4320 htt_tlv_filter.ppdu_end_status_done = 0;
4321 htt_tlv_filter.enable_fp = 1;
4322 htt_tlv_filter.enable_md = 0;
4323 htt_tlv_filter.enable_md = 0;
4324 htt_tlv_filter.enable_mo = 0;
4325
4326 htt_tlv_filter.fp_mgmt_filter = 0;
4327 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4328 htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4329 FILTER_DATA_MCAST |
4330 FILTER_DATA_DATA);
4331 htt_tlv_filter.mo_mgmt_filter = 0;
4332 htt_tlv_filter.mo_ctrl_filter = 0;
4333 htt_tlv_filter.mo_data_filter = 0;
4334 htt_tlv_filter.md_data_filter = 0;
4335
4336 htt_tlv_filter.offset_valid = true;
4337
4338 htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4339 /*Not subscribing rx_pkt_header*/
4340 htt_tlv_filter.rx_header_offset = 0;
4341 htt_tlv_filter.rx_mpdu_start_offset =
4342 HAL_RX_PKT_TLV_MPDU_START_OFFSET(soc->hal_soc);
4343 htt_tlv_filter.rx_mpdu_end_offset =
4344 HAL_RX_PKT_TLV_MPDU_END_OFFSET(soc->hal_soc);
4345 htt_tlv_filter.rx_msdu_start_offset =
4346 HAL_RX_PKT_TLV_MSDU_START_OFFSET(soc->hal_soc);
4347 htt_tlv_filter.rx_msdu_end_offset =
4348 HAL_RX_PKT_TLV_MSDU_END_OFFSET(soc->hal_soc);
4349 htt_tlv_filter.rx_attn_offset =
4350 HAL_RX_PKT_TLV_ATTN_OFFSET(soc->hal_soc);
4351
4352 for (i = 0; i < MAX_PDEV_CNT; i++) {
4353 struct dp_pdev *pdev = soc->pdev_list[i];
4354
4355 if (!pdev)
4356 continue;
4357
4358 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4359 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4360 pdev->pdev_id);
4361
4362 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4363 pdev->rx_refill_buf_ring.hal_srng,
4364 RXDMA_BUF, RX_BUFFER_SIZE,
4365 &htt_tlv_filter);
4366 }
4367 }
4368 return status;
4369}
4370#else
4371static QDF_STATUS
4372dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4373{
4374 return QDF_STATUS_SUCCESS;
4375}
4376#endif
4377
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004378/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004379 * dp_soc_attach_target_wifi3() - SOC initialization in the target
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004380 * @cdp_soc: Opaque Datapath SOC handle
4381 *
4382 * Return: zero on success, non-zero on failure
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004383 */
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004384static QDF_STATUS
4385dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004386{
Leo Chang5ea93a42016-11-03 12:39:49 -07004387 struct dp_soc *soc = (struct dp_soc *)cdp_soc;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004388 QDF_STATUS status = QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004389
4390 htt_soc_attach_target(soc->htt_handle);
4391
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004392 status = dp_rxdma_ring_config(soc);
4393 if (status != QDF_STATUS_SUCCESS) {
4394 dp_err("Failed to send htt srng setup messages to target");
4395 return status;
4396 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08004397
Kiran Venkatappa07921612019-03-02 23:14:12 +05304398 status = dp_rxdma_ring_sel_cfg(soc);
4399 if (status != QDF_STATUS_SUCCESS) {
4400 dp_err("Failed to send htt ring config message to target");
4401 return status;
4402 }
4403
Ishank Jainbc2d91f2017-01-03 18:14:54 +05304404 DP_STATS_INIT(soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05304405
4406 /* initialize work queue for stats processing */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05304407 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05304408
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07004409 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004410}
4411
4412/*
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05304413 * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4414 * @txrx_soc: Datapath SOC handle
4415 */
4416static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4417{
4418 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4419 return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4420}
Krunal Soni03ba0f52019-02-12 11:44:46 -08004421
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05304422/*
4423 * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4424 * @txrx_soc: Datapath SOC handle
4425 * @nss_cfg: nss config
4426 */
4427static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4428{
4429 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05304430 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4431
4432 wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4433
4434 /*
4435 * TODO: masked out based on the per offloaded radio
4436 */
Aniruddha Paulc34164e2018-09-14 14:25:30 +05304437 switch (config) {
4438 case dp_nss_cfg_default:
4439 break;
4440 case dp_nss_cfg_dbdc:
4441 case dp_nss_cfg_dbtc:
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05304442 wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4443 wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4444 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4445 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
Aniruddha Paulc34164e2018-09-14 14:25:30 +05304446 break;
4447 default:
4448 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4449 "Invalid offload config %d", config);
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05304450 }
4451
Aditya Sathishded018e2018-07-02 16:25:21 +05304452 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4453 FL("nss-wifi<0> nss config is enabled"));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05304454}
Debasis Dasc39a68d2019-01-28 17:02:06 +05304455
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05304456/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004457* dp_vdev_attach_wifi3() - attach txrx vdev
4458* @txrx_pdev: Datapath PDEV handle
4459* @vdev_mac_addr: MAC address of the virtual interface
4460* @vdev_id: VDEV Id
4461* @wlan_op_mode: VDEV operating mode
4462*
4463* Return: DP VDEV handle on success, NULL on failure
4464*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004465static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004466 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
4467{
4468 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4469 struct dp_soc *soc = pdev->soc;
4470 struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4471
4472 if (!vdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304473 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4474 FL("DP VDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004475 goto fail0;
4476 }
4477
4478 vdev->pdev = pdev;
4479 vdev->vdev_id = vdev_id;
4480 vdev->opmode = op_mode;
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05304481 vdev->osdev = soc->osdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004482
4483 vdev->osif_rx = NULL;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05304484 vdev->osif_rsim_rx_decap = NULL;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05304485 vdev->osif_get_key = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004486 vdev->osif_rx_mon = NULL;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05304487 vdev->osif_tx_free_ext = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004488 vdev->osif_vdev = NULL;
4489
4490 vdev->delete.pending = 0;
4491 vdev->safemode = 0;
4492 vdev->drop_unenc = 1;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05304493 vdev->sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004494#ifdef notyet
4495 vdev->filters_num = 0;
4496#endif
4497
4498 qdf_mem_copy(
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004499 &vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004500
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004501 /* TODO: Initialize default HTT meta data that will be used in
4502 * TCL descriptors for packets transmitted from this VDEV
4503 */
4504
4505 TAILQ_INIT(&vdev->peer_list);
4506
chenguod22ed622018-12-03 16:54:56 +08004507 if ((soc->intr_mode == DP_INTR_POLL) &&
4508 wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4509 if ((pdev->vdev_count == 0) ||
4510 (wlan_op_mode_monitor == vdev->opmode))
4511 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4512 }
4513
chenguo2a733792018-11-01 16:10:38 +08004514 if (wlan_op_mode_monitor == vdev->opmode) {
4515 pdev->monitor_vdev = vdev;
Anish Nataraj83d08112018-10-17 20:20:55 +05304516 return (struct cdp_vdev *)vdev;
chenguo2a733792018-11-01 16:10:38 +08004517 }
Anish Nataraj83d08112018-10-17 20:20:55 +05304518
4519 vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4520 vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4521 vdev->dscp_tid_map_id = 0;
4522 vdev->mcast_enhancement_en = 0;
4523 vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
Varsha Mishraa331e6e2019-03-11 12:16:14 +05304524 vdev->prev_tx_enq_tstamp = 0;
4525 vdev->prev_rx_deliver_tstamp = 0;
Anish Nataraj83d08112018-10-17 20:20:55 +05304526
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304527 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004528 /* add this vdev into the pdev's list */
4529 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05304530 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004531 pdev->vdev_count++;
4532
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05304533 dp_tx_vdev_attach(vdev);
4534
Tallapragada Kalyan16395272018-08-28 12:34:21 +05304535 if (pdev->vdev_count == 1)
4536 dp_lro_hash_setup(soc, pdev);
Dhanashri Atreb178eb42017-03-21 12:32:33 -07004537
Mohit Khanna02553142019-04-11 17:49:27 -07004538 dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304539 DP_STATS_INIT(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004540
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05304541 if (wlan_op_mode_sta == vdev->opmode)
4542 dp_peer_create_wifi3((struct cdp_vdev *)vdev,
Akshay Kosigi78eced82018-05-14 14:53:48 +05304543 vdev->mac_addr.raw,
4544 NULL);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05304545
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004546 return (struct cdp_vdev *)vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004547
4548fail0:
4549 return NULL;
4550}
4551
4552/**
4553 * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4554 * @vdev: Datapath VDEV handle
4555 * @osif_vdev: OSIF vdev handle
Akshay Kosigidbbaef42018-05-03 23:39:27 +05304556 * @ctrl_vdev: UMAC vdev handle
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004557 * @txrx_ops: Tx and Rx operations
4558 *
4559 * Return: DP VDEV handle on success, NULL on failure
4560 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004561static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigidbbaef42018-05-03 23:39:27 +05304562 void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004563 struct ol_txrx_ops *txrx_ops)
4564{
4565 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4566 vdev->osif_vdev = osif_vdev;
Akshay Kosigidbbaef42018-05-03 23:39:27 +05304567 vdev->ctrl_vdev = ctrl_vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004568 vdev->osif_rx = txrx_ops->rx.rx;
Mohit Khanna7ac554b2018-05-24 11:58:13 -07004569 vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05304570 vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05304571 vdev->osif_get_key = txrx_ops->get_key;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004572 vdev->osif_rx_mon = txrx_ops->rx.mon;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05304573 vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
Sravan Kumar Kairamd55a74c2019-04-03 16:00:57 +05304574 vdev->tx_comp = txrx_ops->tx.tx_comp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004575#ifdef notyet
4576#if ATH_SUPPORT_WAPI
4577 vdev->osif_check_wai = txrx_ops->rx.wai_check;
4578#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004579#endif
Ishank Jain997955e2017-03-24 18:18:50 +05304580#ifdef UMAC_SUPPORT_PROXY_ARP
4581 vdev->osif_proxy_arp = txrx_ops->proxy_arp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004582#endif
Ishank Jainc838b132017-02-17 11:08:18 +05304583 vdev->me_convert = txrx_ops->me_convert;
4584
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004585 /* TODO: Enable the following once Tx code is integrated */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05304586 if (vdev->mesh_vdev)
4587 txrx_ops->tx.tx = dp_tx_send_mesh;
4588 else
4589 txrx_ops->tx.tx = dp_tx_send;
Leo Chang5ea93a42016-11-03 12:39:49 -07004590
Prathyusha Guduribe41d972018-01-19 14:17:14 +05304591 txrx_ops->tx.tx_exception = dp_tx_send_exception;
4592
Houston Hoffman41b912c2017-08-30 14:27:51 -07004593 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304594 "DP Vdev Register success");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004595}
4596
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304597/**
4598 * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
4599 * @vdev: Datapath VDEV handle
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304600 * @unmap_only: Flag to indicate "only unmap"
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304601 *
4602 * Return: void
4603 */
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304604static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304605{
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304606 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304607 struct dp_pdev *pdev = vdev->pdev;
4608 struct dp_soc *soc = pdev->soc;
4609 struct dp_peer *peer;
4610 uint16_t *peer_ids;
Chaithanya Garrepallia5ad5822019-03-20 16:56:43 +05304611 struct dp_ast_entry *ase, *tmp_ase;
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304612 uint8_t i = 0, j = 0;
4613
4614 peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
4615 if (!peer_ids) {
4616 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4617 "DP alloc failure - unable to flush peers");
4618 return;
4619 }
4620
4621 qdf_spin_lock_bh(&soc->peer_ref_mutex);
4622 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4623 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4624 if (peer->peer_ids[i] != HTT_INVALID_PEER)
4625 if (j < soc->max_peers)
4626 peer_ids[j++] = peer->peer_ids[i];
4627 }
4628 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4629
Krunal Sonice2009b2018-12-06 16:38:34 -08004630 for (i = 0; i < j ; i++) {
Vinay Adella5dc55512019-02-07 18:02:15 +05304631 if (unmap_only) {
4632 peer = __dp_peer_find_by_id(soc, peer_ids[i]);
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304633
Vinay Adella5dc55512019-02-07 18:02:15 +05304634 if (peer) {
Chaithanya Garrepallia5ad5822019-03-20 16:56:43 +05304635 if (soc->is_peer_map_unmap_v2) {
4636 /* free AST entries of peer before
4637 * release peer reference
4638 */
4639 DP_PEER_ITERATE_ASE_LIST(peer, ase,
4640 tmp_ase) {
4641 dp_rx_peer_unmap_handler
4642 (soc, peer_ids[i],
4643 vdev->vdev_id,
4644 ase->mac_addr.raw,
4645 1);
4646 }
4647 }
Vinay Adella5dc55512019-02-07 18:02:15 +05304648 dp_rx_peer_unmap_handler(soc, peer_ids[i],
4649 vdev->vdev_id,
4650 peer->mac_addr.raw,
4651 0);
4652 }
4653 } else {
4654 peer = dp_peer_find_by_id(soc, peer_ids[i]);
4655
4656 if (peer) {
4657 dp_info("peer: %pM is getting flush",
4658 peer->mac_addr.raw);
4659
Chaithanya Garrepallia5ad5822019-03-20 16:56:43 +05304660 if (soc->is_peer_map_unmap_v2) {
4661 /* free AST entries of peer before
4662 * release peer reference
4663 */
4664 DP_PEER_ITERATE_ASE_LIST(peer, ase,
4665 tmp_ase) {
4666 dp_rx_peer_unmap_handler
4667 (soc, peer_ids[i],
4668 vdev->vdev_id,
4669 ase->mac_addr.raw,
4670 1);
4671 }
4672 }
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304673 dp_peer_delete_wifi3(peer, 0);
Vinay Adella5dc55512019-02-07 18:02:15 +05304674 /*
4675 * we need to call dp_peer_unref_del_find_by_id
4676 * to remove additional ref count incremented
4677 * by dp_peer_find_by_id() call.
4678 *
4679 * Hold the ref count while executing
4680 * dp_peer_delete_wifi3() call.
4681 *
4682 */
4683 dp_peer_unref_del_find_by_id(peer);
4684 dp_rx_peer_unmap_handler(soc, peer_ids[i],
4685 vdev->vdev_id,
4686 peer->mac_addr.raw, 0);
4687 }
Krunal Sonice2009b2018-12-06 16:38:34 -08004688 }
Krunal Sonice2009b2018-12-06 16:38:34 -08004689 }
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304690
4691 qdf_mem_free(peer_ids);
4692
4693 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
4694 FL("Flushed peers for vdev object %pK "), vdev);
4695}
4696
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004697/*
4698 * dp_vdev_detach_wifi3() - Detach txrx vdev
4699 * @txrx_vdev: Datapath VDEV handle
4700 * @callback: Callback OL_IF on completion of detach
4701 * @cb_context: Callback context
4702 *
4703 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004704static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004705 ol_txrx_vdev_delete_cb callback, void *cb_context)
4706{
4707 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Mohit Khanna02553142019-04-11 17:49:27 -07004708 struct dp_pdev *pdev;
4709 struct dp_soc *soc;
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304710 struct dp_neighbour_peer *peer = NULL;
sumedh baikadyda159202018-11-01 17:31:23 -07004711 struct dp_neighbour_peer *temp_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004712
4713 /* preconditions */
Mohit Khanna02553142019-04-11 17:49:27 -07004714 qdf_assert_always(vdev);
4715 pdev = vdev->pdev;
4716 soc = pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004717
Anish Nataraj83d08112018-10-17 20:20:55 +05304718 if (wlan_op_mode_monitor == vdev->opmode)
4719 goto free_vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004720
Tallapragada Kalyan9d9cbb62018-02-26 17:39:12 +05304721 if (wlan_op_mode_sta == vdev->opmode)
4722 dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
4723
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004724 /*
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304725 * If Target is hung, flush all peers before detaching vdev
4726 * this will free all references held due to missing
4727 * unmap commands from Target
4728 */
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +05304729 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
4730 dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
Vinay Adella4ca1bf62018-02-26 11:03:05 +05304731
4732 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004733 * Use peer_ref_mutex while accessing peer_list, in case
4734 * a peer is in the process of being removed from the list.
4735 */
4736 qdf_spin_lock_bh(&soc->peer_ref_mutex);
4737 /* check that the vdev has no peers allocated */
4738 if (!TAILQ_EMPTY(&vdev->peer_list)) {
4739 /* debug print - will be removed later */
Mohit Khanna02553142019-04-11 17:49:27 -07004740 dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304741 vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004742 /* indicate that the vdev needs to be deleted */
4743 vdev->delete.pending = 1;
4744 vdev->delete.callback = callback;
4745 vdev->delete.context = cb_context;
4746 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4747 return;
4748 }
4749 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4750
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304751 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
sumedh baikadyda159202018-11-01 17:31:23 -07004752 if (!soc->hw_nac_monitor_support) {
4753 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4754 neighbour_peer_list_elem) {
4755 QDF_ASSERT(peer->vdev != vdev);
4756 }
4757 } else {
4758 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4759 neighbour_peer_list_elem, temp_peer) {
4760 if (peer->vdev == vdev) {
4761 TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
4762 neighbour_peer_list_elem);
4763 qdf_mem_free(peer);
4764 }
4765 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05304766 }
4767 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4768
Krunal Soni7c4565f2018-09-04 19:02:53 -07004769 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05304770 dp_tx_vdev_detach(vdev);
Krunal Soni7c4565f2018-09-04 19:02:53 -07004771 /* remove the vdev from its parent pdev's list */
4772 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05304773 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004774 FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
Anish Nataraj83d08112018-10-17 20:20:55 +05304775
Krunal Soni7c4565f2018-09-04 19:02:53 -07004776 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Anish Nataraj83d08112018-10-17 20:20:55 +05304777free_vdev:
4778 qdf_mem_free(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004779
4780 if (callback)
4781 callback(cb_context);
4782}
4783
4784/*
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304785 * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
4786 * @soc - datapath soc handle
4787 * @peer - datapath peer handle
4788 *
4789 * Delete the AST entries belonging to a peer
4790 */
4791#ifdef FEATURE_AST
4792static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4793 struct dp_peer *peer)
4794{
4795 struct dp_ast_entry *ast_entry, *temp_ast_entry;
4796
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304797 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
4798 dp_peer_del_ast(soc, ast_entry);
4799
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05304800 peer->self_ast_entry = NULL;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304801}
4802#else
4803static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
4804 struct dp_peer *peer)
4805{
4806}
4807#endif
4808
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304809#if ATH_SUPPORT_WRAP
4810static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4811 uint8_t *peer_mac_addr)
4812{
4813 struct dp_peer *peer;
4814
4815 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4816 0, vdev->vdev_id);
4817 if (!peer)
4818 return NULL;
4819
4820 if (peer->bss_peer)
4821 return peer;
4822
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05304823 dp_peer_unref_delete(peer);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304824 return NULL;
4825}
4826#else
4827static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
4828 uint8_t *peer_mac_addr)
4829{
4830 struct dp_peer *peer;
4831
4832 peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
4833 0, vdev->vdev_id);
4834 if (!peer)
4835 return NULL;
4836
4837 if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
4838 return peer;
4839
Chaithanya Garrepalli7c8cf122018-09-07 19:23:52 +05304840 dp_peer_unref_delete(peer);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304841 return NULL;
4842}
4843#endif
4844
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304845#ifdef FEATURE_AST
phadiman64a7b912018-10-10 16:19:00 +05304846static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
Chaithanya Garrepalli4c952f12019-02-19 22:37:08 +05304847 struct dp_pdev *pdev,
phadiman64a7b912018-10-10 16:19:00 +05304848 uint8_t *peer_mac_addr)
4849{
4850 struct dp_ast_entry *ast_entry;
4851
4852 qdf_spin_lock_bh(&soc->ast_lock);
Chaithanya Garrepalli4c952f12019-02-19 22:37:08 +05304853 if (soc->ast_override_support)
4854 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
4855 pdev->pdev_id);
4856 else
4857 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
Kiran Venkatappa74e6d8b2018-11-05 15:02:29 +05304858
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304859 if (ast_entry && ast_entry->next_hop &&
4860 !ast_entry->delete_in_progress)
4861 dp_peer_del_ast(soc, ast_entry);
4862
4863 qdf_spin_unlock_bh(&soc->ast_lock);
phadiman64a7b912018-10-10 16:19:00 +05304864}
4865#endif
4866
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05304867#ifdef PEER_CACHE_RX_PKTS
4868static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
4869{
4870 qdf_spinlock_create(&peer->bufq_info.bufq_lock);
4871 peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
4872 qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
4873}
4874#else
4875static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
4876{
4877}
4878#endif
4879
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304880/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08004881 * dp_peer_create_wifi3() - attach txrx peer
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004882 * @txrx_vdev: Datapath VDEV handle
4883 * @peer_mac_addr: Peer MAC address
4884 *
4885 * Return: DP peeer handle on success, NULL on failure
4886 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004887static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
Akshay Kosigi78eced82018-05-14 14:53:48 +05304888 uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004889{
4890 struct dp_peer *peer;
4891 int i;
4892 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4893 struct dp_pdev *pdev;
4894 struct dp_soc *soc;
Amir Patel468bded2019-03-21 11:42:31 +05304895 struct cdp_peer_cookie peer_cookie;
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304896 enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004897
4898 /* preconditions */
4899 qdf_assert(vdev);
4900 qdf_assert(peer_mac_addr);
4901
4902 pdev = vdev->pdev;
4903 soc = pdev->soc;
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304904
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304905 /*
4906 * If a peer entry with given MAC address already exists,
4907 * reuse the peer and reset the state of peer.
4908 */
4909 peer = dp_peer_can_reuse(vdev, peer_mac_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304910
4911 if (peer) {
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05304912 qdf_atomic_init(&peer->is_default_route_set);
4913 dp_peer_cleanup(vdev, peer);
4914
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +05304915 qdf_spin_lock_bh(&soc->ast_lock);
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304916 dp_peer_delete_ast_entries(soc, peer);
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +05304917 peer->delete_in_progress = false;
4918 qdf_spin_unlock_bh(&soc->ast_lock);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05304919
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304920 if ((vdev->opmode == wlan_op_mode_sta) &&
4921 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004922 QDF_MAC_ADDR_SIZE)) {
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304923 ast_type = CDP_TXRX_AST_TYPE_SELF;
4924 }
4925
4926 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
4927
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05304928 /*
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304929 * Control path maintains a node count which is incremented
4930 * for every new peer create command. Since new peer is not being
4931 * created and earlier reference is reused here,
4932 * peer_unref_delete event is sent to control path to
4933 * increment the count back.
4934 */
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05304935 if (soc->cdp_soc.ol_ops->peer_unref_delete) {
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05304936 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
Vinay Adella94201152018-12-03 19:02:58 +05304937 peer->mac_addr.raw, vdev->mac_addr.raw,
Pavankumar Nandeshwar2702aee2018-12-20 18:57:12 +05304938 vdev->opmode, peer->ctrl_peer, ctrl_peer);
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05304939 }
Akshay Kosigi78eced82018-05-14 14:53:48 +05304940 peer->ctrl_peer = ctrl_peer;
Ruchi, Agrawal44461ef2018-04-05 15:25:09 +05304941
Sravan Kumar Kairamda542172018-06-08 12:51:21 +05304942 dp_local_peer_id_alloc(pdev, peer);
Tallapragada Kalyan1f49bff2018-04-12 19:21:21 +05304943 DP_STATS_INIT(peer);
Surya Prakash07c81e72019-04-29 10:08:01 +05304944 DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304945
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304946 return (void *)peer;
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304947 } else {
4948 /*
4949 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
4950 * need to remove the AST entry which was earlier added as a WDS
4951 * entry.
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304952 * If an AST entry exists, but no peer entry exists with a given
4953 * MAC addresses, we could deduce it as a WDS entry
Tallapragada Kalyan46f90ce2018-05-15 15:03:06 +05304954 */
Chaithanya Garrepalli4c952f12019-02-19 22:37:08 +05304955 dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05304956 }
4957
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004958#ifdef notyet
4959 peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
4960 soc->mempool_ol_ath_peer);
4961#else
4962 peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
4963#endif
4964
4965 if (!peer)
4966 return NULL; /* failure */
4967
Tallapragada57d86602017-03-31 07:53:58 +05304968 qdf_mem_zero(peer, sizeof(struct dp_peer));
4969
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05304970 TAILQ_INIT(&peer->ast_entry_list);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05304971
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05304972 /* store provided params */
4973 peer->vdev = vdev;
Akshay Kosigi78eced82018-05-14 14:53:48 +05304974 peer->ctrl_peer = ctrl_peer;
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05304975
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304976 if ((vdev->opmode == wlan_op_mode_sta) &&
4977 !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004978 QDF_MAC_ADDR_SIZE)) {
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05304979 ast_type = CDP_TXRX_AST_TYPE_SELF;
4980 }
4981
4982 dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05304983
Leo Chang5ea93a42016-11-03 12:39:49 -07004984 qdf_spinlock_create(&peer->peer_info_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004985
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05304986 dp_peer_rx_bufq_resources_init(peer);
4987
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004988 qdf_mem_copy(
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004989 &peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004990
4991 /* TODO: See of rx_opt_proc is really required */
4992 peer->rx_opt_proc = soc->rx_opt_proc;
4993
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004994 /* initialize the peer_id */
4995 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4996 peer->peer_ids[i] = HTT_INVALID_PEER;
4997
4998 qdf_spin_lock_bh(&soc->peer_ref_mutex);
4999
5000 qdf_atomic_init(&peer->ref_cnt);
5001
5002 /* keep one reference for attach */
5003 qdf_atomic_inc(&peer->ref_cnt);
5004
5005 /* add this peer into the vdev's list */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05305006 if (wlan_op_mode_sta == vdev->opmode)
5007 TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
5008 else
5009 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
5010
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005011 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5012
5013 /* TODO: See if hash based search is required */
5014 dp_peer_find_hash_add(soc, peer);
5015
Varun Reddy Yeturub9ec57e2017-11-28 11:42:09 -08005016 /* Initialize the peer state */
5017 peer->state = OL_TXRX_PEER_STATE_DISC;
5018
Mohit Khanna02553142019-04-11 17:49:27 -07005019 dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08005020 vdev, peer, peer->mac_addr.raw,
5021 qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005022 /*
5023 * For every peer MAp message search and set if bss_peer
5024 */
5025 if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
Mohit Khanna02553142019-04-11 17:49:27 -07005026 dp_info("vdev bss_peer!!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005027 peer->bss_peer = 1;
5028 vdev->vap_bss_peer = peer;
5029 }
Sumedh Baikady1c61e062018-02-12 22:25:47 -08005030 for (i = 0; i < DP_MAX_TIDS; i++)
5031 qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05305032
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05305033 peer->valid = 1;
Leo Chang5ea93a42016-11-03 12:39:49 -07005034 dp_local_peer_id_alloc(pdev, peer);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305035 DP_STATS_INIT(peer);
Surya Prakash07c81e72019-04-29 10:08:01 +05305036 DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
Amir Patel468bded2019-03-21 11:42:31 +05305037
5038 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005039 QDF_MAC_ADDR_SIZE);
Amir Patel468bded2019-03-21 11:42:31 +05305040 peer_cookie.ctx = NULL;
5041 peer_cookie.cookie = pdev->next_peer_cookie++;
5042#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5043 dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5044 (void *)&peer_cookie,
5045 peer->peer_ids[0], WDI_NO_VAL, pdev->pdev_id);
5046#endif
5047 if (soc->wlanstats_enabled) {
5048 if (!peer_cookie.ctx) {
5049 pdev->next_peer_cookie--;
5050 qdf_err("Failed to initialize peer rate stats");
5051 } else {
5052 peer->wlanstats_ctx = (void *)peer_cookie.ctx;
5053 }
5054 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005055 return (void *)peer;
5056}
5057
5058/*
Mohit Khanna81179cb2018-08-16 20:50:43 -07005059 * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5060 * @vdev: Datapath VDEV handle
5061 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5062 * @hash_based: pointer to hash value (enabled/disabled) to be populated
5063 *
5064 * Return: None
5065 */
5066static
5067void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5068 enum cdp_host_reo_dest_ring *reo_dest,
5069 bool *hash_based)
5070{
5071 struct dp_soc *soc;
5072 struct dp_pdev *pdev;
5073
5074 pdev = vdev->pdev;
5075 soc = pdev->soc;
5076 /*
5077 * hash based steering is disabled for Radios which are offloaded
5078 * to NSS
5079 */
5080 if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5081 *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5082
5083 /*
5084 * Below line of code will ensure the proper reo_dest ring is chosen
5085 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5086 */
5087 *reo_dest = pdev->reo_dest;
5088}
5089
5090#ifdef IPA_OFFLOAD
5091/*
5092 * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5093 * @vdev: Datapath VDEV handle
5094 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5095 * @hash_based: pointer to hash value (enabled/disabled) to be populated
5096 *
5097 * If IPA is enabled in ini, for SAP mode, disable hash based
5098 * steering, use default reo_dst ring for RX. Use config values for other modes.
5099 * Return: None
5100 */
5101static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5102 enum cdp_host_reo_dest_ring *reo_dest,
5103 bool *hash_based)
5104{
5105 struct dp_soc *soc;
5106 struct dp_pdev *pdev;
5107
5108 pdev = vdev->pdev;
5109 soc = pdev->soc;
5110
5111 dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5112
5113 /*
5114 * If IPA is enabled, disable hash-based flow steering and set
5115 * reo_dest_ring_4 as the REO ring to receive packets on.
5116 * IPA is configured to reap reo_dest_ring_4.
5117 *
5118 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5119 * value enum value is from 1 - 4.
5120 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5121 */
5122 if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5123 if (vdev->opmode == wlan_op_mode_ap) {
5124 *reo_dest = IPA_REO_DEST_RING_IDX + 1;
5125 *hash_based = 0;
5126 }
5127 }
5128}
5129
5130#else
5131
5132/*
5133 * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5134 * @vdev: Datapath VDEV handle
5135 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5136 * @hash_based: pointer to hash value (enabled/disabled) to be populated
5137 *
5138 * Use system config values for hash based steering.
5139 * Return: None
5140 */
5141
5142static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5143 enum cdp_host_reo_dest_ring *reo_dest,
5144 bool *hash_based)
5145{
5146 dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5147}
5148#endif /* IPA_OFFLOAD */
5149
5150/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005151 * dp_peer_setup_wifi3() - initialize the peer
5152 * @vdev_hdl: virtual device object
5153 * @peer: Peer object
5154 *
5155 * Return: void
5156 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005157static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005158{
5159 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
5160 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5161 struct dp_pdev *pdev;
5162 struct dp_soc *soc;
Dhanashri Atre14049172016-11-11 18:32:36 -08005163 bool hash_based = 0;
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05305164 enum cdp_host_reo_dest_ring reo_dest;
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005165
5166 /* preconditions */
5167 qdf_assert(vdev);
5168 qdf_assert(peer);
5169
5170 pdev = vdev->pdev;
5171 soc = pdev->soc;
5172
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08005173 peer->last_assoc_rcvd = 0;
5174 peer->last_disassoc_rcvd = 0;
5175 peer->last_deauth_rcvd = 0;
5176
Mohit Khanna81179cb2018-08-16 20:50:43 -07005177 dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05305178
Mohit Khanna81179cb2018-08-16 20:50:43 -07005179 dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5180 pdev->pdev_id, vdev->vdev_id,
5181 vdev->opmode, hash_based, reo_dest);
Dhanashri Atre14049172016-11-11 18:32:36 -08005182
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05305183
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05305184 /*
5185 * There are corner cases where the AD1 = AD2 = "VAPs address"
5186 * i.e both the devices have same MAC address. In these
5187 * cases we want such pkts to be processed in NULL Q handler
5188 * which is REO2TCL ring. for this reason we should
5189 * not setup reo_queues and default route for bss_peer.
5190 */
5191 if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
5192 return;
5193
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005194 if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5195 /* TODO: Check the destination ring number to be passed to FW */
Dhanashri Atre14049172016-11-11 18:32:36 -08005196 soc->cdp_soc.ol_ops->peer_set_default_routing(
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +05305197 pdev->ctrl_pdev, peer->mac_addr.raw,
5198 peer->vdev->vdev_id, hash_based, reo_dest);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005199 }
Ruchi, Agrawal8e2796b2018-02-07 19:07:43 +05305200
Tallapragada Kalyan8c93d5d2018-05-28 05:02:53 +05305201 qdf_atomic_set(&peer->is_default_route_set, 1);
5202
Ruchi, Agrawal8e2796b2018-02-07 19:07:43 +05305203 dp_peer_rx_init(pdev, peer);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08005204 return;
5205}
5206
5207/*
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +05305208 * dp_cp_peer_del_resp_handler - Handle the peer delete response
5209 * @soc_hdl: Datapath SOC handle
5210 * @vdev_hdl: virtual device object
5211 * @mac_addr: Mac address of the peer
5212 *
5213 * Return: void
5214 */
5215static void dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5216 struct cdp_vdev *vdev_hdl,
5217 uint8_t *mac_addr)
5218{
5219 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5220 struct dp_ast_entry *ast_entry = NULL;
5221 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5222 txrx_ast_free_cb cb = NULL;
5223 void *cookie;
5224
5225 qdf_spin_lock_bh(&soc->ast_lock);
5226
5227 if (soc->ast_override_support)
5228 ast_entry =
5229 dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
5230 vdev->pdev->pdev_id);
5231 else
5232 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
5233
5234 /* in case of qwrap we have multiple BSS peers
5235 * with same mac address
5236 *
5237 * AST entry for this mac address will be created
5238 * only for one peer hence it will be NULL here
5239 */
5240 if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
5241 qdf_spin_unlock_bh(&soc->ast_lock);
5242 return;
5243 }
5244
5245 if (ast_entry->is_mapped)
5246 soc->ast_table[ast_entry->ast_idx] = NULL;
5247
5248 DP_STATS_INC(soc, ast.deleted, 1);
5249 dp_peer_ast_hash_remove(soc, ast_entry);
5250
5251 cb = ast_entry->callback;
5252 cookie = ast_entry->cookie;
5253 ast_entry->callback = NULL;
5254 ast_entry->cookie = NULL;
5255
5256 soc->num_ast_entries--;
5257 qdf_spin_unlock_bh(&soc->ast_lock);
5258
5259 if (cb) {
5260 cb(soc->ctrl_psoc,
5261 soc,
5262 cookie,
5263 CDP_TXRX_AST_DELETED);
5264 }
5265 qdf_mem_free(ast_entry);
5266}
5267
5268/*
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05305269 * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
5270 * @vdev_handle: virtual device object
5271 * @htt_pkt_type: type of pkt
5272 *
5273 * Return: void
5274 */
5275static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
5276 enum htt_cmn_pkt_type val)
5277{
5278 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5279 vdev->tx_encap_type = val;
5280}
5281
5282/*
5283 * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
5284 * @vdev_handle: virtual device object
5285 * @htt_pkt_type: type of pkt
5286 *
5287 * Return: void
5288 */
5289static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
5290 enum htt_cmn_pkt_type val)
5291{
5292 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5293 vdev->rx_decap_type = val;
5294}
5295
5296/*
sumedh baikady1f8f3192018-02-20 17:30:32 -08005297 * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5298 * @txrx_soc: cdp soc handle
5299 * @ac: Access category
5300 * @value: timeout value in millisec
5301 *
5302 * Return: void
5303 */
5304static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5305 uint8_t ac, uint32_t value)
5306{
5307 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5308
5309 hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
5310}
5311
5312/*
5313 * dp_get_ba_aging_timeout() - get ba aging timeout per AC
5314 * @txrx_soc: cdp soc handle
5315 * @ac: access category
5316 * @value: timeout value in millisec
5317 *
5318 * Return: void
5319 */
5320static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5321 uint8_t ac, uint32_t *value)
5322{
5323 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5324
5325 hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
5326}
5327
5328/*
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05305329 * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
5330 * @pdev_handle: physical device object
5331 * @val: reo destination ring index (1 - 4)
5332 *
5333 * Return: void
5334 */
5335static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
5336 enum cdp_host_reo_dest_ring val)
5337{
5338 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5339
5340 if (pdev)
5341 pdev->reo_dest = val;
5342}
5343
5344/*
5345 * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5346 * @pdev_handle: physical device object
5347 *
5348 * Return: reo destination ring index
5349 */
5350static enum cdp_host_reo_dest_ring
5351dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
5352{
5353 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5354
5355 if (pdev)
5356 return pdev->reo_dest;
5357 else
5358 return cdp_host_reo_dest_ring_unknown;
5359}
5360
5361/*
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305362 * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
5363 * @pdev_handle: device object
5364 * @val: value to be set
5365 *
5366 * Return: void
5367 */
5368static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5369 uint32_t val)
5370{
5371 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5372
5373 /* Enable/Disable smart mesh filtering. This flag will be checked
5374 * during rx processing to check if packets are from NAC clients.
5375 */
5376 pdev->filter_neighbour_peers = val;
5377 return 0;
5378}
5379
5380/*
5381 * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5382 * address for smart mesh filtering
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305383 * @vdev_handle: virtual device object
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305384 * @cmd: Add/Del command
5385 * @macaddr: nac client mac address
5386 *
5387 * Return: void
5388 */
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305389static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5390 uint32_t cmd, uint8_t *macaddr)
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305391{
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305392 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5393 struct dp_pdev *pdev = vdev->pdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305394 struct dp_neighbour_peer *peer = NULL;
5395
5396 if (!macaddr)
5397 goto fail0;
5398
5399 /* Store address of NAC (neighbour peer) which will be checked
5400 * against TA of received packets.
5401 */
5402 if (cmd == DP_NAC_PARAM_ADD) {
5403 peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5404 sizeof(*peer));
5405
5406 if (!peer) {
5407 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5408 FL("DP neighbour peer node memory allocation failed"));
5409 goto fail0;
5410 }
5411
5412 qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005413 macaddr, QDF_MAC_ADDR_SIZE);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305414 peer->vdev = vdev;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305415
5416 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305417
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305418 /* add this neighbour peer into the list */
5419 TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5420 neighbour_peer_list_elem);
5421 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5422
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305423 /* first neighbour */
5424 if (!pdev->neighbour_peers_added) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305425 pdev->neighbour_peers_added = true;
sumedh baikady59a2d332018-05-22 01:50:38 -07005426 dp_ppdu_ring_cfg(pdev);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305427 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305428 return 1;
5429
5430 } else if (cmd == DP_NAC_PARAM_DEL) {
5431 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5432 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5433 neighbour_peer_list_elem) {
5434 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005435 macaddr, QDF_MAC_ADDR_SIZE)) {
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305436 /* delete this peer from the list */
5437 TAILQ_REMOVE(&pdev->neighbour_peers_list,
5438 peer, neighbour_peer_list_elem);
5439 qdf_mem_free(peer);
5440 break;
5441 }
5442 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305443 /* last neighbour deleted */
sumedh baikadyda159202018-11-01 17:31:23 -07005444 if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305445 pdev->neighbour_peers_added = false;
sumedh baikadyda159202018-11-01 17:31:23 -07005446 dp_ppdu_ring_cfg(pdev);
5447 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305448
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305449 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5450
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05305451 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5452 !pdev->enhanced_stats_en)
5453 dp_ppdu_ring_reset(pdev);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305454 return 1;
5455
5456 }
5457
5458fail0:
5459 return 0;
5460}
5461
5462/*
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05305463 * dp_get_sec_type() - Get the security type
5464 * @peer: Datapath peer handle
5465 * @sec_idx: Security id (mcast, ucast)
5466 *
5467 * return sec_type: Security type
5468 */
5469static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5470{
5471 struct dp_peer *dpeer = (struct dp_peer *)peer;
5472
5473 return dpeer->security[sec_idx].sec_type;
5474}
5475
5476/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005477 * dp_peer_authorize() - authorize txrx peer
5478 * @peer_handle: Datapath peer handle
5479 * @authorize
5480 *
5481 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05305482static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005483{
5484 struct dp_peer *peer = (struct dp_peer *)peer_handle;
5485 struct dp_soc *soc;
5486
Jeff Johnsona8edf332019-03-18 09:51:52 -07005487 if (peer) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005488 soc = peer->vdev->pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005489 qdf_spin_lock_bh(&soc->peer_ref_mutex);
5490 peer->authorize = authorize ? 1 : 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005491 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5492 }
5493}
5494
Krunal Soni7c4565f2018-09-04 19:02:53 -07005495static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
5496 struct dp_pdev *pdev,
5497 struct dp_peer *peer,
Om Prakash Tripathibf529e52019-04-11 17:23:57 +05305498 struct dp_vdev *vdev)
Krunal Soni7c4565f2018-09-04 19:02:53 -07005499{
Krunal Soni7c4565f2018-09-04 19:02:53 -07005500 struct dp_peer *bss_peer = NULL;
5501 uint8_t *m_addr = NULL;
5502
Krunal Soni7c4565f2018-09-04 19:02:53 -07005503 if (!vdev) {
5504 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5505 "vdev is NULL");
5506 } else {
5507 if (vdev->vap_bss_peer == peer)
5508 vdev->vap_bss_peer = NULL;
5509 m_addr = peer->mac_addr.raw;
5510 if (soc->cdp_soc.ol_ops->peer_unref_delete)
Vinay Adella94201152018-12-03 19:02:58 +05305511 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
Pavankumar Nandeshwar2702aee2018-12-20 18:57:12 +05305512 m_addr, vdev->mac_addr.raw, vdev->opmode,
5513 peer->ctrl_peer, NULL);
Vinay Adella94201152018-12-03 19:02:58 +05305514
Krunal Soni7c4565f2018-09-04 19:02:53 -07005515 if (vdev && vdev->vap_bss_peer) {
5516 bss_peer = vdev->vap_bss_peer;
5517 DP_UPDATE_STATS(vdev, peer);
5518 }
5519 }
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05305520 /*
5521 * Peer AST list hast to be empty here
5522 */
5523 DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
5524
Krunal Soni7c4565f2018-09-04 19:02:53 -07005525 qdf_mem_free(peer);
5526}
5527
Krunal Sonia5211f22018-09-21 13:46:33 -07005528/**
5529 * dp_delete_pending_vdev() - check and process vdev delete
5530 * @pdev: DP specific pdev pointer
5531 * @vdev: DP specific vdev pointer
5532 * @vdev_id: vdev id corresponding to vdev
5533 *
5534 * This API does following:
5535 * 1) It releases tx flow pools buffers as vdev is
5536 * going down and no peers are associated.
5537 * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
5538 */
5539static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
5540 uint8_t vdev_id)
Krunal Soni7c4565f2018-09-04 19:02:53 -07005541{
Krunal Soni7c4565f2018-09-04 19:02:53 -07005542 ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
5543 void *vdev_delete_context = NULL;
5544
Krunal Sonia5211f22018-09-21 13:46:33 -07005545 vdev_delete_cb = vdev->delete.callback;
5546 vdev_delete_context = vdev->delete.context;
Krunal Soni7c4565f2018-09-04 19:02:53 -07005547
Krunal Sonia5211f22018-09-21 13:46:33 -07005548 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5549 FL("deleting vdev object %pK (%pM)- its last peer is done"),
5550 vdev, vdev->mac_addr.raw);
5551 /* all peers are gone, go ahead and delete it */
5552 dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
5553 FLOW_TYPE_VDEV, vdev_id);
5554 dp_tx_vdev_detach(vdev);
5555
5556 qdf_spin_lock_bh(&pdev->vdev_list_lock);
5557 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
Krunal Soni7c4565f2018-09-04 19:02:53 -07005558 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5559
Krunal Sonia5211f22018-09-21 13:46:33 -07005560 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5561 FL("deleting vdev object %pK (%pM)"),
5562 vdev, vdev->mac_addr.raw);
5563 qdf_mem_free(vdev);
5564 vdev = NULL;
5565
Krunal Soni7c4565f2018-09-04 19:02:53 -07005566 if (vdev_delete_cb)
5567 vdev_delete_cb(vdev_delete_context);
5568}
5569
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005570/*
5571 * dp_peer_unref_delete() - unref and delete peer
5572 * @peer_handle: Datapath peer handle
5573 *
5574 */
5575void dp_peer_unref_delete(void *peer_handle)
5576{
5577 struct dp_peer *peer = (struct dp_peer *)peer_handle;
5578 struct dp_vdev *vdev = peer->vdev;
Kiran Venkatappa9edb9612017-03-16 11:37:35 +05305579 struct dp_pdev *pdev = vdev->pdev;
5580 struct dp_soc *soc = pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005581 struct dp_peer *tmppeer;
5582 int found = 0;
5583 uint16_t peer_id;
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08005584 uint16_t vdev_id;
Krunal Sonia5211f22018-09-21 13:46:33 -07005585 bool delete_vdev;
Amir Patel468bded2019-03-21 11:42:31 +05305586 struct cdp_peer_cookie peer_cookie;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005587
5588 /*
5589 * Hold the lock all the way from checking if the peer ref count
5590 * is zero until the peer references are removed from the hash
5591 * table and vdev list (if the peer ref count is zero).
5592 * This protects against a new HL tx operation starting to use the
5593 * peer object just after this function concludes it's done being used.
5594 * Furthermore, the lock needs to be held while checking whether the
5595 * vdev's list of peers is empty, to make sure that list is not modified
5596 * concurrently with the empty check.
5597 */
5598 qdf_spin_lock_bh(&soc->peer_ref_mutex);
5599 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
5600 peer_id = peer->peer_ids[0];
Manoj Ekbote4a59d8d2018-02-14 10:13:27 -08005601 vdev_id = vdev->vdev_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005602
5603 /*
5604 * Make sure that the reference to the peer in
5605 * peer object map is removed
5606 */
5607 if (peer_id != HTT_INVALID_PEER)
5608 soc->peer_id_to_obj_map[peer_id] = NULL;
5609
Krunal Sonic96a1162019-02-21 11:33:26 -08005610 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5611 "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005612
5613 /* remove the reference to the peer from the hash table */
5614 dp_peer_find_hash_remove(soc, peer);
5615
Sravan Kumar Kairam8e15ed92018-07-05 19:00:13 +05305616 qdf_spin_lock_bh(&soc->ast_lock);
5617 if (peer->self_ast_entry) {
5618 dp_peer_del_ast(soc, peer->self_ast_entry);
5619 peer->self_ast_entry = NULL;
5620 }
5621 qdf_spin_unlock_bh(&soc->ast_lock);
5622
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005623 TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
5624 if (tmppeer == peer) {
5625 found = 1;
5626 break;
5627 }
5628 }
Krunal Soni7c4565f2018-09-04 19:02:53 -07005629
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005630 if (found) {
5631 TAILQ_REMOVE(&peer->vdev->peer_list, peer,
5632 peer_list_elem);
5633 } else {
5634 /*Ignoring the remove operation as peer not found*/
Krunal Sonic96a1162019-02-21 11:33:26 -08005635 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Sravan Kumar Kairam51d46642018-08-24 15:07:55 +05305636 "peer:%pK not found in vdev:%pK peerlist:%pK",
5637 peer, vdev, &peer->vdev->peer_list);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005638 }
5639
Amir Patel468bded2019-03-21 11:42:31 +05305640 /* send peer destroy event to upper layer */
5641 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005642 QDF_MAC_ADDR_SIZE);
Amir Patel468bded2019-03-21 11:42:31 +05305643 peer_cookie.ctx = NULL;
5644 peer_cookie.ctx = (void *)peer->wlanstats_ctx;
5645#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5646 dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
5647 pdev->soc,
5648 (void *)&peer_cookie,
5649 peer->peer_ids[0],
5650 WDI_NO_VAL,
5651 pdev->pdev_id);
5652#endif
5653 peer->wlanstats_ctx = NULL;
5654
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08005655 /* cleanup the peer data */
5656 dp_peer_cleanup(vdev, peer);
Om Prakash Tripathibf529e52019-04-11 17:23:57 +05305657 dp_reset_and_release_peer_mem(soc, pdev, peer, vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005658
5659 /* check whether the parent vdev has no peers left */
5660 if (TAILQ_EMPTY(&vdev->peer_list)) {
5661 /*
Krunal Sonia5211f22018-09-21 13:46:33 -07005662 * capture vdev delete pending flag's status
5663 * while holding peer_ref_mutex lock
5664 */
5665 delete_vdev = vdev->delete.pending;
5666 /*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005667 * Now that there are no references to the peer, we can
5668 * release the peer reference lock.
5669 */
5670 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5671 /*
5672 * Check if the parent vdev was waiting for its peers
5673 * to be deleted, in order for it to be deleted too.
5674 */
Krunal Sonia5211f22018-09-21 13:46:33 -07005675 if (delete_vdev)
5676 dp_delete_pending_vdev(pdev, vdev, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005677 } else {
5678 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5679 }
chenguo1dead6f2018-01-08 14:51:44 +08005680
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005681 } else {
5682 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5683 }
5684}
5685
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05305686#ifdef PEER_CACHE_RX_PKTS
5687static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
5688{
5689 qdf_list_destroy(&peer->bufq_info.cached_bufq);
5690 qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
5691}
5692#else
5693static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
5694{
5695}
5696#endif
5697
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005698/*
5699 * dp_peer_detach_wifi3() – Detach txrx peer
Naveen Rawat761329b2017-09-19 10:30:11 -07005700 * @peer_handle: Datapath peer handle
5701 * @bitmap: bitmap indicating special handling of request.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005702 *
5703 */
Naveen Rawat761329b2017-09-19 10:30:11 -07005704static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005705{
5706 struct dp_peer *peer = (struct dp_peer *)peer_handle;
5707
5708 /* redirect the peer's rx delivery function to point to a
5709 * discard func
5710 */
Chaithanya Garrepalli0323f802018-03-14 17:45:21 +05305711
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005712 peer->rx_opt_proc = dp_rx_discard;
Pavankumar Nandeshwar2702aee2018-12-20 18:57:12 +05305713
5714 /* Do not make ctrl_peer to NULL for connected sta peers.
5715 * We need ctrl_peer to release the reference during dp
5716 * peer free. This reference was held for
5717 * obj_mgr peer during the creation of dp peer.
5718 */
5719 if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
5720 !peer->bss_peer))
5721 peer->ctrl_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005722
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05305723 peer->valid = 0;
5724
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05305725 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07005726 FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005727
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08005728 dp_local_peer_id_free(peer->vdev->pdev, peer);
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08005729 qdf_spinlock_destroy(&peer->peer_info_lock);
5730
Sravan Kumar Kairamebd627e2018-08-28 23:32:52 +05305731 dp_peer_rx_bufq_resources_deinit(peer);
5732
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07005733 /*
5734 * Remove the reference added during peer_attach.
5735 * The peer will still be left allocated until the
5736 * PEER_UNMAP message arrives to remove the other
5737 * reference, added by the PEER_MAP message.
5738 */
5739 dp_peer_unref_delete(peer_handle);
Leo Chang5ea93a42016-11-03 12:39:49 -07005740}
5741
5742/*
5743 * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
5744 * @peer_handle: Datapath peer handle
5745 *
5746 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005747static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07005748{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005749 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07005750 return vdev->mac_addr.raw;
5751}
5752
5753/*
Karunakar Dasinenica792542017-01-16 10:08:58 -08005754 * dp_vdev_set_wds() - Enable per packet stats
5755 * @vdev_handle: DP VDEV handle
5756 * @val: value
5757 *
5758 * Return: none
5759 */
5760static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
5761{
5762 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5763
5764 vdev->wds_enabled = val;
5765 return 0;
5766}
5767
5768/*
Leo Chang5ea93a42016-11-03 12:39:49 -07005769 * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
5770 * @peer_handle: Datapath peer handle
5771 *
5772 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005773static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
5774 uint8_t vdev_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07005775{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005776 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07005777 struct dp_vdev *vdev = NULL;
5778
5779 if (qdf_unlikely(!pdev))
5780 return NULL;
5781
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305782 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07005783 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Mohit Khanna02553142019-04-11 17:49:27 -07005784 if (vdev->delete.pending)
5785 continue;
5786
Leo Chang5ea93a42016-11-03 12:39:49 -07005787 if (vdev->vdev_id == vdev_id)
5788 break;
5789 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05305790 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -07005791
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005792 return (struct cdp_vdev *)vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07005793}
5794
chenguo2a733792018-11-01 16:10:38 +08005795/*
5796 * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
5797 * @dev: PDEV handle
5798 *
5799 * Return: VDEV handle of monitor mode
5800 */
5801
5802static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
5803{
5804 struct dp_pdev *pdev = (struct dp_pdev *)dev;
5805
5806 if (qdf_unlikely(!pdev))
5807 return NULL;
5808
5809 return (struct cdp_vdev *)pdev->monitor_vdev;
5810}
5811
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005812static int dp_get_opmode(struct cdp_vdev *vdev_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07005813{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005814 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07005815
5816 return vdev->opmode;
5817}
5818
Mohit Khanna7ac554b2018-05-24 11:58:13 -07005819static
5820void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
5821 ol_txrx_rx_fp *stack_fn_p,
5822 ol_osif_vdev_handle *osif_vdev_p)
5823{
5824 struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
5825
5826 qdf_assert(vdev);
5827 *stack_fn_p = vdev->osif_rx_stack;
5828 *osif_vdev_p = vdev->osif_vdev;
5829}
5830
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005831static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07005832{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005833 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07005834 struct dp_pdev *pdev = vdev->pdev;
5835
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08005836 return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
Leo Chang5ea93a42016-11-03 12:39:49 -07005837}
phadiman7821bf82018-02-06 16:03:54 +05305838
Kai Chen6eca1a62017-01-12 10:17:53 -08005839/**
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005840 * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
5841 * ring based on target
5842 * @soc: soc handle
5843 * @mac_for_pdev: pdev_id
5844 * @pdev: physical device handle
5845 * @ring_num: mac id
5846 * @htt_tlv_filter: tlv filter
5847 *
5848 * Return: zero on success, non-zero on failure
5849 */
5850static inline
5851QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
5852 struct dp_pdev *pdev, uint8_t ring_num,
5853 struct htt_rx_ring_tlv_filter htt_tlv_filter)
5854{
5855 QDF_STATUS status;
5856
5857 if (soc->wlan_cfg_ctx->rxdma1_enable)
5858 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5859 pdev->rxdma_mon_buf_ring[ring_num]
5860 .hal_srng,
5861 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
5862 &htt_tlv_filter);
5863 else
5864 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
5865 pdev->rx_mac_buf_ring[ring_num]
5866 .hal_srng,
5867 RXDMA_BUF, RX_BUFFER_SIZE,
5868 &htt_tlv_filter);
5869
5870 return status;
5871}
5872
5873/**
sumedh baikady84613b02017-09-19 16:36:14 -07005874 * dp_reset_monitor_mode() - Disable monitor mode
5875 * @pdev_handle: Datapath PDEV handle
5876 *
Kai Chen52ef33f2019-03-05 18:33:40 -08005877 * Return: QDF_STATUS
sumedh baikady84613b02017-09-19 16:36:14 -07005878 */
Kai Chen52ef33f2019-03-05 18:33:40 -08005879QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
sumedh baikady84613b02017-09-19 16:36:14 -07005880{
5881 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5882 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005883 struct dp_soc *soc = pdev->soc;
sumedh baikady84613b02017-09-19 16:36:14 -07005884 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005885 int mac_id;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005886 QDF_STATUS status = QDF_STATUS_SUCCESS;
sumedh baikady84613b02017-09-19 16:36:14 -07005887
5888 pdev_id = pdev->pdev_id;
5889 soc = pdev->soc;
5890
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08005891 qdf_spin_lock_bh(&pdev->mon_lock);
5892
hangtianfe681a52019-01-16 17:16:28 +08005893 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
sumedh baikady84613b02017-09-19 16:36:14 -07005894
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005895 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
5896 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
sumedh baikady84613b02017-09-19 16:36:14 -07005897
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005898 status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
5899 pdev, mac_id,
5900 htt_tlv_filter);
5901
5902 if (status != QDF_STATUS_SUCCESS) {
5903 dp_err("Failed to send tlv filter for monitor mode rings");
5904 return status;
5905 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005906
5907 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005908 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
5909 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
5910 &htt_tlv_filter);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005911 }
sumedh baikady84613b02017-09-19 16:36:14 -07005912
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08005913 pdev->monitor_vdev = NULL;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05305914 pdev->mcopy_mode = 0;
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05305915 pdev->monitor_configured = false;
Sumedh Baikady12b2b2c2018-03-05 16:50:58 -08005916
5917 qdf_spin_unlock_bh(&pdev->mon_lock);
5918
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005919 return QDF_STATUS_SUCCESS;
sumedh baikady84613b02017-09-19 16:36:14 -07005920}
phadiman7821bf82018-02-06 16:03:54 +05305921
5922/**
5923 * dp_set_nac() - set peer_nac
5924 * @peer_handle: Datapath PEER handle
5925 *
5926 * Return: void
5927 */
5928static void dp_set_nac(struct cdp_peer *peer_handle)
5929{
5930 struct dp_peer *peer = (struct dp_peer *)peer_handle;
5931
5932 peer->nac = 1;
5933}
5934
5935/**
5936 * dp_get_tx_pending() - read pending tx
5937 * @pdev_handle: Datapath PDEV handle
5938 *
5939 * Return: outstanding tx
5940 */
5941static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
5942{
5943 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5944
5945 return qdf_atomic_read(&pdev->num_tx_outstanding);
5946}
5947
5948/**
5949 * dp_get_peer_mac_from_peer_id() - get peer mac
5950 * @pdev_handle: Datapath PDEV handle
5951 * @peer_id: Peer ID
5952 * @peer_mac: MAC addr of PEER
5953 *
5954 * Return: void
5955 */
5956static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
5957 uint32_t peer_id, uint8_t *peer_mac)
5958{
5959 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5960 struct dp_peer *peer;
5961
5962 if (pdev && peer_mac) {
5963 peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05305964 if (peer) {
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07005965 qdf_mem_copy(peer_mac, peer->mac_addr.raw,
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08005966 QDF_MAC_ADDR_SIZE);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05305967 dp_peer_unref_del_find_by_id(peer);
phadiman7821bf82018-02-06 16:03:54 +05305968 }
5969 }
5970}
5971
sumedh baikady84613b02017-09-19 16:36:14 -07005972/**
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05305973 * dp_pdev_configure_monitor_rings() - configure monitor rings
Kai Chen6eca1a62017-01-12 10:17:53 -08005974 * @vdev_handle: Datapath VDEV handle
5975 *
Kai Chen52ef33f2019-03-05 18:33:40 -08005976 * Return: QDF_STATUS
Kai Chen6eca1a62017-01-12 10:17:53 -08005977 */
Kai Chen52ef33f2019-03-05 18:33:40 -08005978QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
Kai Chen6eca1a62017-01-12 10:17:53 -08005979{
Kai Chen6eca1a62017-01-12 10:17:53 -08005980 struct htt_rx_ring_tlv_filter htt_tlv_filter;
5981 struct dp_soc *soc;
5982 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08005983 int mac_id;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07005984 QDF_STATUS status = QDF_STATUS_SUCCESS;
Kai Chen6eca1a62017-01-12 10:17:53 -08005985
Kai Chen6eca1a62017-01-12 10:17:53 -08005986 pdev_id = pdev->pdev_id;
5987 soc = pdev->soc;
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05305988
nobeljd124b742017-10-16 11:59:12 -07005989 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05305990 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
nobeljd124b742017-10-16 11:59:12 -07005991 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
5992 pdev->fp_ctrl_filter, pdev->fp_data_filter,
5993 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
5994 pdev->mo_data_filter);
5995
hangtianfe681a52019-01-16 17:16:28 +08005996 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
nobelj1c31fee2018-03-21 11:47:05 -07005997
Kai Chen6eca1a62017-01-12 10:17:53 -08005998 htt_tlv_filter.mpdu_start = 1;
5999 htt_tlv_filter.msdu_start = 1;
6000 htt_tlv_filter.packet = 1;
6001 htt_tlv_filter.msdu_end = 1;
6002 htt_tlv_filter.mpdu_end = 1;
6003 htt_tlv_filter.packet_header = 1;
6004 htt_tlv_filter.attention = 1;
6005 htt_tlv_filter.ppdu_start = 0;
6006 htt_tlv_filter.ppdu_end = 0;
6007 htt_tlv_filter.ppdu_end_user_stats = 0;
6008 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6009 htt_tlv_filter.ppdu_end_status_done = 0;
sumedh baikady308ff002017-09-18 16:24:36 -07006010 htt_tlv_filter.header_per_msdu = 1;
nobeljd124b742017-10-16 11:59:12 -07006011 htt_tlv_filter.enable_fp =
6012 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08006013 htt_tlv_filter.enable_md = 0;
nobeljd124b742017-10-16 11:59:12 -07006014 htt_tlv_filter.enable_mo =
6015 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6016 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6017 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
phadiman86911cd2019-04-04 19:17:41 +05306018
6019 if (pdev->mcopy_mode) {
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306020 htt_tlv_filter.fp_data_filter = 0;
phadiman86911cd2019-04-04 19:17:41 +05306021 htt_tlv_filter.mo_data_filter = 0;
6022 } else {
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306023 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
phadiman86911cd2019-04-04 19:17:41 +05306024 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
6025 }
nobeljd124b742017-10-16 11:59:12 -07006026 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6027 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
Kiran Venkatappa07921612019-03-02 23:14:12 +05306028 htt_tlv_filter.offset_valid = false;
Kai Chen6eca1a62017-01-12 10:17:53 -08006029
Kai Chen52ef33f2019-03-05 18:33:40 -08006030 if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6031 (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6032 htt_tlv_filter.fp_mgmt_filter = 0;
6033 htt_tlv_filter.fp_ctrl_filter = 0;
6034 htt_tlv_filter.fp_data_filter = 0;
6035 htt_tlv_filter.mo_mgmt_filter = 0;
6036 htt_tlv_filter.mo_ctrl_filter = 0;
6037 htt_tlv_filter.mo_data_filter = 0;
6038 }
6039
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006040 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6041 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6042
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006043 status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6044 pdev, mac_id,
6045 htt_tlv_filter);
6046
6047 if (status != QDF_STATUS_SUCCESS) {
6048 dp_err("Failed to send tlv filter for monitor mode rings");
6049 return status;
6050 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006051 }
Kai Chen6eca1a62017-01-12 10:17:53 -08006052
hangtianfe681a52019-01-16 17:16:28 +08006053 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
nobelj1c31fee2018-03-21 11:47:05 -07006054
Kai Chen6eca1a62017-01-12 10:17:53 -08006055 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006056 htt_tlv_filter.msdu_start = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08006057 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006058 htt_tlv_filter.msdu_end = 0;
6059 htt_tlv_filter.mpdu_end = 0;
Kai Chen52ef33f2019-03-05 18:33:40 -08006060 if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6061 (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6062 htt_tlv_filter.mpdu_end = 1;
6063 }
nobelj1c31fee2018-03-21 11:47:05 -07006064 htt_tlv_filter.attention = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08006065 htt_tlv_filter.ppdu_start = 1;
6066 htt_tlv_filter.ppdu_end = 1;
6067 htt_tlv_filter.ppdu_end_user_stats = 1;
6068 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6069 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006070 htt_tlv_filter.enable_fp = 1;
Karunakar Dasineni40555682017-03-26 22:44:39 -07006071 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006072 htt_tlv_filter.enable_mo = 1;
Kai Chen52ef33f2019-03-05 18:33:40 -08006073 if (pdev->mcopy_mode ||
6074 (pdev->rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
nobelj1c31fee2018-03-21 11:47:05 -07006075 htt_tlv_filter.packet_header = 1;
Kai Chen52ef33f2019-03-05 18:33:40 -08006076 if (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) {
6077 htt_tlv_filter.header_per_msdu = 0;
6078 htt_tlv_filter.enable_mo = 0;
6079 } else if (pdev->rx_enh_capture_mode ==
6080 CDP_RX_ENH_CAPTURE_MPDU_MSDU) {
6081 htt_tlv_filter.header_per_msdu = 1;
6082 htt_tlv_filter.enable_mo = 0;
6083 }
nobelj1c31fee2018-03-21 11:47:05 -07006084 }
Kai Chen52ef33f2019-03-05 18:33:40 -08006085
nobelj1c31fee2018-03-21 11:47:05 -07006086 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6087 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6088 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6089 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6090 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6091 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Kiran Venkatappa07921612019-03-02 23:14:12 +05306092 htt_tlv_filter.offset_valid = false;
nobeljd124b742017-10-16 11:59:12 -07006093
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006094 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07006095 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6096 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006097
6098 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6099 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6100 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6101 }
nobeljd124b742017-10-16 11:59:12 -07006102
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306103 return status;
6104}
6105
6106/**
6107 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6108 * @vdev_handle: Datapath VDEV handle
6109 * @smart_monitor: Flag to denote if its smart monitor mode
6110 *
6111 * Return: 0 on success, not 0 on failure
6112 */
6113static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
Naga65fad662019-03-22 19:01:28 +05306114 uint8_t special_monitor)
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306115{
6116 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6117 struct dp_pdev *pdev;
6118
6119 qdf_assert(vdev);
6120
6121 pdev = vdev->pdev;
Naga65fad662019-03-22 19:01:28 +05306122 pdev->monitor_vdev = vdev;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306123 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6124 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6125 pdev, pdev->pdev_id, pdev->soc, vdev);
6126
Naga65fad662019-03-22 19:01:28 +05306127 /*
6128 * do not configure monitor buf ring and filter for smart and
6129 * lite monitor
6130 * for smart monitor filters are added along with first NAC
6131 * for lite monitor required configuration done through
6132 * dp_set_pdev_param
6133 */
6134 if (special_monitor)
6135 return QDF_STATUS_SUCCESS;
6136
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306137 /*Check if current pdev's monitor_vdev exists */
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05306138 if (pdev->monitor_configured) {
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306139 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6140 "monitor vap already created vdev=%pK\n", vdev);
6141 qdf_assert(vdev);
6142 return QDF_STATUS_E_RESOURCES;
6143 }
6144
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05306145 pdev->monitor_configured = true;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306146
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306147 return dp_pdev_configure_monitor_rings(pdev);
nobeljd124b742017-10-16 11:59:12 -07006148}
6149
6150/**
6151 * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6152 * @pdev_handle: Datapath PDEV handle
6153 * @filter_val: Flag to select Filter for monitor mode
6154 * Return: 0 on success, not 0 on failure
6155 */
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006156static QDF_STATUS
6157dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
6158 struct cdp_monitor_filter *filter_val)
nobeljd124b742017-10-16 11:59:12 -07006159{
6160 /* Many monitor VAPs can exists in a system but only one can be up at
6161 * anytime
6162 */
6163 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6164 struct dp_vdev *vdev = pdev->monitor_vdev;
6165 struct htt_rx_ring_tlv_filter htt_tlv_filter;
6166 struct dp_soc *soc;
6167 uint8_t pdev_id;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006168 int mac_id;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006169 QDF_STATUS status = QDF_STATUS_SUCCESS;
nobeljd124b742017-10-16 11:59:12 -07006170
6171 pdev_id = pdev->pdev_id;
6172 soc = pdev->soc;
6173
6174 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Aditya Sathishded018e2018-07-02 16:25:21 +05306175 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
nobeljd124b742017-10-16 11:59:12 -07006176 pdev, pdev_id, soc, vdev);
6177
6178 /*Check if current pdev's monitor_vdev exists */
6179 if (!pdev->monitor_vdev) {
6180 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05306181 "vdev=%pK", vdev);
nobeljd124b742017-10-16 11:59:12 -07006182 qdf_assert(vdev);
6183 }
6184
6185 /* update filter mode, type in pdev structure */
6186 pdev->mon_filter_mode = filter_val->mode;
6187 pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6188 pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6189 pdev->fp_data_filter = filter_val->fp_data;
6190 pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6191 pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6192 pdev->mo_data_filter = filter_val->mo_data;
6193
6194 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05306195 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
nobeljd124b742017-10-16 11:59:12 -07006196 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6197 pdev->fp_ctrl_filter, pdev->fp_data_filter,
6198 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6199 pdev->mo_data_filter);
6200
hangtianfe681a52019-01-16 17:16:28 +08006201 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
nobeljd124b742017-10-16 11:59:12 -07006202
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006203 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6204 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
nobeljd124b742017-10-16 11:59:12 -07006205
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006206 status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6207 pdev, mac_id,
6208 htt_tlv_filter);
6209
6210 if (status != QDF_STATUS_SUCCESS) {
6211 dp_err("Failed to send tlv filter for monitor mode rings");
6212 return status;
6213 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006214
6215 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6216 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6217 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6218 }
nobeljd124b742017-10-16 11:59:12 -07006219
6220 htt_tlv_filter.mpdu_start = 1;
6221 htt_tlv_filter.msdu_start = 1;
6222 htt_tlv_filter.packet = 1;
6223 htt_tlv_filter.msdu_end = 1;
6224 htt_tlv_filter.mpdu_end = 1;
6225 htt_tlv_filter.packet_header = 1;
6226 htt_tlv_filter.attention = 1;
6227 htt_tlv_filter.ppdu_start = 0;
6228 htt_tlv_filter.ppdu_end = 0;
6229 htt_tlv_filter.ppdu_end_user_stats = 0;
6230 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6231 htt_tlv_filter.ppdu_end_status_done = 0;
6232 htt_tlv_filter.header_per_msdu = 1;
6233 htt_tlv_filter.enable_fp =
6234 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6235 htt_tlv_filter.enable_md = 0;
6236 htt_tlv_filter.enable_mo =
6237 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6238 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6239 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05306240 if (pdev->mcopy_mode)
6241 htt_tlv_filter.fp_data_filter = 0;
6242 else
6243 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
nobeljd124b742017-10-16 11:59:12 -07006244 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6245 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6246 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
Kiran Venkatappa07921612019-03-02 23:14:12 +05306247 htt_tlv_filter.offset_valid = false;
nobeljd124b742017-10-16 11:59:12 -07006248
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006249 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6250 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6251
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07006252 status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6253 pdev, mac_id,
6254 htt_tlv_filter);
6255
6256 if (status != QDF_STATUS_SUCCESS) {
6257 dp_err("Failed to send tlv filter for monitor mode rings");
6258 return status;
6259 }
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006260 }
nobeljd124b742017-10-16 11:59:12 -07006261
hangtianfe681a52019-01-16 17:16:28 +08006262 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
nobelj1c31fee2018-03-21 11:47:05 -07006263
nobeljd124b742017-10-16 11:59:12 -07006264 htt_tlv_filter.mpdu_start = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006265 htt_tlv_filter.msdu_start = 0;
nobeljd124b742017-10-16 11:59:12 -07006266 htt_tlv_filter.packet = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006267 htt_tlv_filter.msdu_end = 0;
6268 htt_tlv_filter.mpdu_end = 0;
6269 htt_tlv_filter.attention = 0;
nobeljd124b742017-10-16 11:59:12 -07006270 htt_tlv_filter.ppdu_start = 1;
6271 htt_tlv_filter.ppdu_end = 1;
6272 htt_tlv_filter.ppdu_end_user_stats = 1;
6273 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6274 htt_tlv_filter.ppdu_end_status_done = 1;
nobelj1c31fee2018-03-21 11:47:05 -07006275 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07006276 htt_tlv_filter.enable_md = 0;
nobelj1c31fee2018-03-21 11:47:05 -07006277 htt_tlv_filter.enable_mo = 1;
6278 if (pdev->mcopy_mode) {
6279 htt_tlv_filter.packet_header = 1;
6280 }
6281 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6282 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6283 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6284 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6285 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6286 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Kiran Venkatappa07921612019-03-02 23:14:12 +05306287 htt_tlv_filter.offset_valid = false;
Karunakar Dasineni40555682017-03-26 22:44:39 -07006288
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006289 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
nobelj1c31fee2018-03-21 11:47:05 -07006290 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6291 pdev->pdev_id);
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08006292
6293 htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6294 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6295 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6296 }
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05306297
Kai Chen6eca1a62017-01-12 10:17:53 -08006298 return QDF_STATUS_SUCCESS;
6299}
Leo Chang5ea93a42016-11-03 12:39:49 -07006300
nobeljc8eb4d62018-01-04 14:29:32 -08006301/**
phadiman7821bf82018-02-06 16:03:54 +05306302 * dp_get_pdev_id_frm_pdev() - get pdev_id
6303 * @pdev_handle: Datapath PDEV handle
6304 *
6305 * Return: pdev_id
6306 */
6307static
6308uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
6309{
6310 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6311
6312 return pdev->pdev_id;
6313}
6314
6315/**
Varsha Mishraa331e6e2019-03-11 12:16:14 +05306316 * dp_get_delay_stats_flag() - get delay stats flag
6317 * @pdev_handle: Datapath PDEV handle
6318 *
6319 * Return: 0 if flag is disabled else 1
6320 */
6321static
6322bool dp_get_delay_stats_flag(struct cdp_pdev *pdev_handle)
6323{
6324 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6325
6326 return pdev->delay_stats_flag;
6327}
6328
6329/**
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -07006330 * dp_pdev_set_chan_noise_floor() - set channel noise floor
6331 * @pdev_handle: Datapath PDEV handle
6332 * @chan_noise_floor: Channel Noise Floor
6333 *
6334 * Return: void
6335 */
6336static
6337void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
6338 int16_t chan_noise_floor)
6339{
6340 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6341
6342 pdev->chan_noise_floor = chan_noise_floor;
6343}
6344
6345/**
nobeljc8eb4d62018-01-04 14:29:32 -08006346 * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
6347 * @vdev_handle: Datapath VDEV handle
6348 * Return: true on ucast filter flag set
6349 */
6350static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
6351{
6352 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6353 struct dp_pdev *pdev;
6354
6355 pdev = vdev->pdev;
6356
6357 if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
6358 (pdev->mo_data_filter & FILTER_DATA_UCAST))
6359 return true;
6360
6361 return false;
6362}
6363
6364/**
6365 * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
6366 * @vdev_handle: Datapath VDEV handle
6367 * Return: true on mcast filter flag set
6368 */
6369static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
6370{
6371 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6372 struct dp_pdev *pdev;
6373
6374 pdev = vdev->pdev;
6375
6376 if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
6377 (pdev->mo_data_filter & FILTER_DATA_MCAST))
6378 return true;
6379
6380 return false;
6381}
6382
6383/**
6384 * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
6385 * @vdev_handle: Datapath VDEV handle
6386 * Return: true on non data filter flag set
6387 */
6388static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
6389{
6390 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6391 struct dp_pdev *pdev;
6392
6393 pdev = vdev->pdev;
6394
6395 if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
6396 (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
6397 if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
6398 (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
6399 return true;
6400 }
6401 }
6402
6403 return false;
6404}
6405
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306406#ifdef MESH_MODE_SUPPORT
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05306407void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306408{
6409 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6410
6411 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Venkateswara Swamy Bandaru5caa83a2017-03-06 11:33:15 +05306412 FL("val %d"), val);
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306413 vdev->mesh_vdev = val;
6414}
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05306415
6416/*
6417 * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
6418 * @vdev_hdl: virtual device object
6419 * @val: value to be set
6420 *
6421 * Return: void
6422 */
6423void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
6424{
6425 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6426
6427 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6428 FL("val %d"), val);
6429 vdev->mesh_rx_filter = val;
6430}
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306431#endif
6432
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306433/*
6434 * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
Jeff Johnson2d821eb2018-05-06 16:25:49 -07006435 * Current scope is bar received count
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306436 *
6437 * @pdev_handle: DP_PDEV handle
6438 *
6439 * Return: void
6440 */
Pratik Gandhi81fe0622018-02-23 12:36:10 +05306441#define STATS_PROC_TIMEOUT (HZ/1000)
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306442
6443static void
6444dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
6445{
6446 struct dp_vdev *vdev;
6447 struct dp_peer *peer;
6448 uint32_t waitcnt;
6449
6450 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6451 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6452 if (!peer) {
6453 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6454 FL("DP Invalid Peer refernce"));
6455 return;
6456 }
Pratik Gandhi81fe0622018-02-23 12:36:10 +05306457
6458 if (peer->delete_in_progress) {
6459 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6460 FL("DP Peer deletion in progress"));
6461 continue;
6462 }
Pratik Gandhi81fe0622018-02-23 12:36:10 +05306463 qdf_atomic_inc(&peer->ref_cnt);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306464 waitcnt = 0;
6465 dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306466 while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306467 && waitcnt < 10) {
6468 schedule_timeout_interruptible(
6469 STATS_PROC_TIMEOUT);
6470 waitcnt++;
6471 }
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306472 qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
Pratik Gandhi81fe0622018-02-23 12:36:10 +05306473 dp_peer_unref_delete(peer);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306474 }
6475 }
6476}
6477
6478/**
6479 * dp_rx_bar_stats_cb(): BAR received stats callback
6480 * @soc: SOC handle
6481 * @cb_ctxt: Call back context
6482 * @reo_status: Reo status
6483 *
6484 * return: void
6485 */
6486void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
6487 union hal_reo_status *reo_status)
6488{
6489 struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
6490 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
6491
Chaithanya Garrepalli291dfa02018-10-12 17:11:34 +05306492 if (!qdf_atomic_read(&soc->cmn_init_done))
6493 return;
6494
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306495 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
6496 DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
6497 queue_status->header.status);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306498 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306499 return;
6500 }
6501
6502 pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306503 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306504
6505}
6506
Ishank Jain1e7401c2017-02-17 15:38:39 +05306507/**
6508 * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
6509 * @vdev: DP VDEV handle
6510 *
6511 * return: void
6512 */
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306513void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
6514 struct cdp_vdev_stats *vdev_stats)
Ishank Jain1e7401c2017-02-17 15:38:39 +05306515{
6516 struct dp_peer *peer = NULL;
Amir Patelee49ad52018-12-18 13:23:36 +05306517 struct dp_soc *soc = NULL;
6518
Viyom Mittal757853f2019-01-03 14:38:56 +05306519 if (!vdev || !vdev->pdev)
Amir Patelee49ad52018-12-18 13:23:36 +05306520 return;
6521
6522 soc = vdev->pdev->soc;
Ishank Jain1e7401c2017-02-17 15:38:39 +05306523
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306524 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
Ishank Jain1e7401c2017-02-17 15:38:39 +05306525
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05306526 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306527 dp_update_vdev_stats(vdev_stats, peer);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306528
Amir Patel756d05e2018-10-10 12:35:30 +05306529#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6530 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6531 vdev_stats, vdev->vdev_id,
6532 UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6533#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05306534}
6535
6536/**
6537 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
6538 * @pdev: DP PDEV handle
6539 *
6540 * return: void
6541 */
6542static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
6543{
6544 struct dp_vdev *vdev = NULL;
Amir Patel17b91782019-01-08 12:17:15 +05306545 struct dp_soc *soc;
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306546 struct cdp_vdev_stats *vdev_stats =
6547 qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6548
6549 if (!vdev_stats) {
6550 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6551 "DP alloc failure - unable to get alloc vdev stats");
6552 return;
6553 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05306554
hangtianfe681a52019-01-16 17:16:28 +08006555 qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
6556 qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
6557 qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
Ishank Jain1e7401c2017-02-17 15:38:39 +05306558
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +05306559 if (pdev->mcopy_mode)
6560 DP_UPDATE_STATS(pdev, pdev->invalid_peer);
6561
Amir Patel17b91782019-01-08 12:17:15 +05306562 soc = pdev->soc;
6563 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05306564 qdf_spin_lock_bh(&pdev->vdev_list_lock);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306565 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05306566
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306567 dp_aggregate_vdev_stats(vdev, vdev_stats);
6568 dp_update_pdev_stats(pdev, vdev_stats);
Viyom Mittal18a73bc2018-12-18 12:30:22 +05306569 dp_update_pdev_ingress_stats(pdev, vdev);
Ishank Jain1e7401c2017-02-17 15:38:39 +05306570 }
Anish Nataraj4c9a4ed2018-04-08 21:25:13 +05306571 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
Amir Patel17b91782019-01-08 12:17:15 +05306572 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306573 qdf_mem_free(vdev_stats);
6574
Amir Patel756d05e2018-10-10 12:35:30 +05306575#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6576 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
6577 pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
6578#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05306579}
6580
6581/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306582 * dp_vdev_getstats() - get vdev packet level stats
6583 * @vdev_handle: Datapath VDEV handle
6584 * @stats: cdp network device stats structure
6585 *
6586 * Return: void
6587 */
6588static void dp_vdev_getstats(void *vdev_handle,
6589 struct cdp_dev_stats *stats)
6590{
6591 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Amir Patel17b91782019-01-08 12:17:15 +05306592 struct dp_pdev *pdev;
6593 struct dp_soc *soc;
6594 struct cdp_vdev_stats *vdev_stats;
6595
6596 if (!vdev)
6597 return;
6598
6599 pdev = vdev->pdev;
6600 if (!pdev)
6601 return;
6602
6603 soc = pdev->soc;
6604
6605 vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306606
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306607 if (!vdev_stats) {
6608 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6609 "DP alloc failure - unable to get alloc vdev stats");
6610 return;
6611 }
6612
Amir Patel17b91782019-01-08 12:17:15 +05306613 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306614 dp_aggregate_vdev_stats(vdev, vdev_stats);
Amir Patel17b91782019-01-08 12:17:15 +05306615 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05306616
6617 stats->tx_packets = vdev_stats->tx_i.rcvd.num;
6618 stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
6619
6620 stats->tx_errors = vdev_stats->tx.tx_failed +
6621 vdev_stats->tx_i.dropped.dropped_pkt.num;
6622 stats->tx_dropped = stats->tx_errors;
6623
6624 stats->rx_packets = vdev_stats->rx.unicast.num +
6625 vdev_stats->rx.multicast.num +
6626 vdev_stats->rx.bcast.num;
6627 stats->rx_bytes = vdev_stats->rx.unicast.bytes +
6628 vdev_stats->rx.multicast.bytes +
6629 vdev_stats->rx.bcast.bytes;
6630
Amir Patel46f39b62019-04-16 12:56:26 +05306631 qdf_mem_free(vdev_stats);
6632
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306633}
6634
6635
6636/**
Anish Natarajf12b0a32018-03-14 14:27:13 +05306637 * dp_pdev_getstats() - get pdev packet level stats
6638 * @pdev_handle: Datapath PDEV handle
6639 * @stats: cdp network device stats structure
6640 *
6641 * Return: void
6642 */
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306643static void dp_pdev_getstats(void *pdev_handle,
Anish Natarajf12b0a32018-03-14 14:27:13 +05306644 struct cdp_dev_stats *stats)
6645{
6646 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6647
6648 dp_aggregate_pdev_stats(pdev);
6649
6650 stats->tx_packets = pdev->stats.tx_i.rcvd.num;
6651 stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
6652
6653 stats->tx_errors = pdev->stats.tx.tx_failed +
6654 pdev->stats.tx_i.dropped.dropped_pkt.num;
6655 stats->tx_dropped = stats->tx_errors;
6656
6657 stats->rx_packets = pdev->stats.rx.unicast.num +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05306658 pdev->stats.rx.multicast.num +
6659 pdev->stats.rx.bcast.num;
Anish Natarajf12b0a32018-03-14 14:27:13 +05306660 stats->rx_bytes = pdev->stats.rx.unicast.bytes +
Pamidipati, Vijay3b0f9162018-04-16 19:06:20 +05306661 pdev->stats.rx.multicast.bytes +
6662 pdev->stats.rx.bcast.bytes;
Anish Natarajf12b0a32018-03-14 14:27:13 +05306663}
6664
6665/**
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306666 * dp_get_device_stats() - get interface level packet stats
6667 * @handle: device handle
6668 * @stats: cdp network device stats structure
6669 * @type: device type pdev/vdev
6670 *
6671 * Return: void
6672 */
6673static void dp_get_device_stats(void *handle,
6674 struct cdp_dev_stats *stats, uint8_t type)
6675{
6676 switch (type) {
6677 case UPDATE_VDEV_STATS:
6678 dp_vdev_getstats(handle, stats);
6679 break;
6680 case UPDATE_PDEV_STATS:
6681 dp_pdev_getstats(handle, stats);
6682 break;
6683 default:
6684 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6685 "apstats cannot be updated for this input "
Aditya Sathishded018e2018-07-02 16:25:21 +05306686 "type %d", type);
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +05306687 break;
6688 }
6689
6690}
6691
6692
6693/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05306694 * dp_print_pdev_tx_stats(): Print Pdev level TX stats
6695 * @pdev: DP_PDEV Handle
6696 *
6697 * Return:void
6698 */
6699static inline void
6700dp_print_pdev_tx_stats(struct dp_pdev *pdev)
6701{
Venkata Sharath Chandra Manchala69a0ed32018-12-12 14:22:11 -08006702 uint8_t i = 0, index = 0;
Venkata Sharath Chandra Manchalaec9a5302018-12-13 15:25:35 -08006703
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306704 DP_PRINT_STATS("PDEV Tx Stats:\n");
6705 DP_PRINT_STATS("Received From Stack:");
6706 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306707 pdev->stats.tx_i.rcvd.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306708 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306709 pdev->stats.tx_i.rcvd.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306710 DP_PRINT_STATS("Processed:");
6711 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306712 pdev->stats.tx_i.processed.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306713 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306714 pdev->stats.tx_i.processed.bytes);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08006715 DP_PRINT_STATS("Total Completions:");
6716 DP_PRINT_STATS(" Packets = %u",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306717 pdev->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306718 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306719 pdev->stats.tx.comp_pkt.bytes);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08006720 DP_PRINT_STATS("Successful Completions:");
6721 DP_PRINT_STATS(" Packets = %u",
6722 pdev->stats.tx.tx_success.num);
6723 DP_PRINT_STATS(" Bytes = %llu",
6724 pdev->stats.tx.tx_success.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306725 DP_PRINT_STATS("Dropped:");
6726 DP_PRINT_STATS(" Total = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306727 pdev->stats.tx_i.dropped.dropped_pkt.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306728 DP_PRINT_STATS(" Dma_map_error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306729 pdev->stats.tx_i.dropped.dma_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306730 DP_PRINT_STATS(" Ring Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306731 pdev->stats.tx_i.dropped.ring_full);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306732 DP_PRINT_STATS(" Descriptor Not available = %d",
Ruchi, Agrawalc3e68bc2018-07-16 16:45:34 +05306733 pdev->stats.tx_i.dropped.desc_na.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306734 DP_PRINT_STATS(" HW enqueue failed= %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306735 pdev->stats.tx_i.dropped.enqueue_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306736 DP_PRINT_STATS(" Resources Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306737 pdev->stats.tx_i.dropped.res_full);
Pranita Solanke05881142018-08-17 18:20:51 +05306738 DP_PRINT_STATS(" FW removed Pkts = %u",
6739 pdev->stats.tx.dropped.fw_rem.num);
6740 DP_PRINT_STATS(" FW removed bytes= %llu",
6741 pdev->stats.tx.dropped.fw_rem.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306742 DP_PRINT_STATS(" FW removed transmitted = %d",
6743 pdev->stats.tx.dropped.fw_rem_tx);
6744 DP_PRINT_STATS(" FW removed untransmitted = %d",
6745 pdev->stats.tx.dropped.fw_rem_notx);
Venkata Sharath Chandra Manchala65812e62018-02-15 16:04:52 -08006746 DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d",
6747 pdev->stats.tx.dropped.fw_reason1);
6748 DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d",
6749 pdev->stats.tx.dropped.fw_reason2);
6750 DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d",
6751 pdev->stats.tx.dropped.fw_reason3);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306752 DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
6753 pdev->stats.tx.dropped.age_out);
Venkateswara Swamy Bandaru41ebb332018-09-12 18:25:29 +05306754 DP_PRINT_STATS(" headroom insufficient = %d",
6755 pdev->stats.tx_i.dropped.headroom_insufficient);
Venkata Sharath Chandra Manchalafaa0d8b2018-04-09 14:39:43 -07006756 DP_PRINT_STATS(" Multicast:");
6757 DP_PRINT_STATS(" Packets: %u",
6758 pdev->stats.tx.mcast.num);
6759 DP_PRINT_STATS(" Bytes: %llu",
6760 pdev->stats.tx.mcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306761 DP_PRINT_STATS("Scatter Gather:");
6762 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306763 pdev->stats.tx_i.sg.sg_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306764 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306765 pdev->stats.tx_i.sg.sg_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306766 DP_PRINT_STATS(" Dropped By Host = %d",
Amir Patel02911572018-07-02 13:00:53 +05306767 pdev->stats.tx_i.sg.dropped_host.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306768 DP_PRINT_STATS(" Dropped By Target = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306769 pdev->stats.tx_i.sg.dropped_target);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306770 DP_PRINT_STATS("TSO:");
6771 DP_PRINT_STATS(" Number of Segments = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306772 pdev->stats.tx_i.tso.num_seg);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306773 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306774 pdev->stats.tx_i.tso.tso_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306775 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306776 pdev->stats.tx_i.tso.tso_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306777 DP_PRINT_STATS(" Dropped By Host = %d",
Amir Patel02911572018-07-02 13:00:53 +05306778 pdev->stats.tx_i.tso.dropped_host.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306779 DP_PRINT_STATS("Mcast Enhancement:");
6780 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306781 pdev->stats.tx_i.mcast_en.mcast_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306782 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306783 pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306784 DP_PRINT_STATS(" Dropped: Map Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306785 pdev->stats.tx_i.mcast_en.dropped_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306786 DP_PRINT_STATS(" Dropped: Self Mac = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306787 pdev->stats.tx_i.mcast_en.dropped_self_mac);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306788 DP_PRINT_STATS(" Dropped: Send Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306789 pdev->stats.tx_i.mcast_en.dropped_send_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306790 DP_PRINT_STATS(" Unicast sent = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306791 pdev->stats.tx_i.mcast_en.ucast);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306792 DP_PRINT_STATS("Raw:");
6793 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306794 pdev->stats.tx_i.raw.raw_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306795 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306796 pdev->stats.tx_i.raw.raw_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306797 DP_PRINT_STATS(" DMA map error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306798 pdev->stats.tx_i.raw.dma_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306799 DP_PRINT_STATS("Reinjected:");
6800 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306801 pdev->stats.tx_i.reinject_pkts.num);
chenguo6a027fb2018-05-21 18:42:54 +08006802 DP_PRINT_STATS(" Bytes = %llu\n",
6803 pdev->stats.tx_i.reinject_pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306804 DP_PRINT_STATS("Inspected:");
6805 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306806 pdev->stats.tx_i.inspect_pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306807 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306808 pdev->stats.tx_i.inspect_pkts.bytes);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306809 DP_PRINT_STATS("Nawds Multicast:");
6810 DP_PRINT_STATS(" Packets = %d",
6811 pdev->stats.tx_i.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306812 DP_PRINT_STATS(" Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05306813 pdev->stats.tx_i.nawds_mcast.bytes);
Ruchi, Agrawal34721392017-11-13 18:02:09 +05306814 DP_PRINT_STATS("CCE Classified:");
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05306815 DP_PRINT_STATS(" CCE Classified Packets: %u",
Ruchi, Agrawal34721392017-11-13 18:02:09 +05306816 pdev->stats.tx_i.cce_classified);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05306817 DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +05306818 pdev->stats.tx_i.cce_classified_raw);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05306819 DP_PRINT_STATS("Mesh stats:");
6820 DP_PRINT_STATS(" frames to firmware: %u",
6821 pdev->stats.tx_i.mesh.exception_fw);
6822 DP_PRINT_STATS(" completions from fw: %u",
6823 pdev->stats.tx_i.mesh.completion_fw);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306824 DP_PRINT_STATS("PPDU stats counter");
6825 for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
6826 DP_PRINT_STATS(" Tag[%d] = %llu", index,
6827 pdev->stats.ppdu_stats_counter[index]);
6828 }
Venkata Sharath Chandra Manchalaec9a5302018-12-13 15:25:35 -08006829
Venkata Sharath Chandra Manchala69a0ed32018-12-12 14:22:11 -08006830 for (i = 0; i < CDP_WDI_NUM_EVENTS; i++) {
6831 if (!pdev->stats.wdi_event[i])
6832 DP_PRINT_STATS("Wdi msgs received from fw[%d]:%d",
6833 i, pdev->stats.wdi_event[i]);
6834 }
6835
Ishank Jain1e7401c2017-02-17 15:38:39 +05306836}
6837
6838/**
6839 * dp_print_pdev_rx_stats(): Print Pdev level RX stats
6840 * @pdev: DP_PDEV Handle
6841 *
6842 * Return: void
6843 */
6844static inline void
6845dp_print_pdev_rx_stats(struct dp_pdev *pdev)
6846{
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306847 DP_PRINT_STATS("PDEV Rx Stats:\n");
6848 DP_PRINT_STATS("Received From HW (Per Rx Ring):");
6849 DP_PRINT_STATS(" Packets = %d %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306850 pdev->stats.rx.rcvd_reo[0].num,
6851 pdev->stats.rx.rcvd_reo[1].num,
6852 pdev->stats.rx.rcvd_reo[2].num,
6853 pdev->stats.rx.rcvd_reo[3].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306854 DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306855 pdev->stats.rx.rcvd_reo[0].bytes,
6856 pdev->stats.rx.rcvd_reo[1].bytes,
6857 pdev->stats.rx.rcvd_reo[2].bytes,
6858 pdev->stats.rx.rcvd_reo[3].bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306859 DP_PRINT_STATS("Replenished:");
6860 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306861 pdev->stats.replenish.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306862 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05306863 pdev->stats.replenish.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306864 DP_PRINT_STATS(" Buffers Added To Freelist = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306865 pdev->stats.buf_freelist);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07006866 DP_PRINT_STATS(" Low threshold intr = %d",
6867 pdev->stats.replenish.low_thresh_intrs);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306868 DP_PRINT_STATS("Dropped:");
6869 DP_PRINT_STATS(" msdu_not_done = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306870 pdev->stats.dropped.msdu_not_done);
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08006871 DP_PRINT_STATS(" wifi parse = %d",
6872 pdev->stats.dropped.wifi_parse);
Neil Zhao0bd967d2018-03-02 16:00:00 -08006873 DP_PRINT_STATS(" mon_rx_drop = %d",
6874 pdev->stats.dropped.mon_rx_drop);
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05306875 DP_PRINT_STATS(" mec_drop = %d",
6876 pdev->stats.rx.mec_drop.num);
6877 DP_PRINT_STATS(" Bytes = %llu",
6878 pdev->stats.rx.mec_drop.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306879 DP_PRINT_STATS("Sent To Stack:");
6880 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306881 pdev->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306882 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306883 pdev->stats.rx.to_stack.bytes);
sumedh baikadyc2fa7c92018-12-28 15:26:08 -08006884 DP_PRINT_STATS(" vlan_tag_stp_cnt = %d",
6885 pdev->stats.vlan_tag_stp_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306886 DP_PRINT_STATS("Multicast/Broadcast:");
6887 DP_PRINT_STATS(" Packets = %d",
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05306888 pdev->stats.rx.multicast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306889 DP_PRINT_STATS(" Bytes = %llu",
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05306890 pdev->stats.rx.multicast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306891 DP_PRINT_STATS("Errors:");
6892 DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306893 pdev->stats.replenish.rxdma_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306894 DP_PRINT_STATS(" Desc Alloc Failed: = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306895 pdev->stats.err.desc_alloc_fail);
chenguo6a027fb2018-05-21 18:42:54 +08006896 DP_PRINT_STATS(" IP checksum error = %d",
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05306897 pdev->stats.err.ip_csum_err);
chenguo6a027fb2018-05-21 18:42:54 +08006898 DP_PRINT_STATS(" TCP/UDP checksum error = %d",
Tallapragada Kalyan51198fc2018-04-18 14:30:44 +05306899 pdev->stats.err.tcp_udp_csum_err);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05306900
6901 /* Get bar_recv_cnt */
6902 dp_aggregate_pdev_ctrl_frames_stats(pdev);
6903 DP_PRINT_STATS("BAR Received Count: = %d",
6904 pdev->stats.rx.bar_recv_cnt);
6905
Ishank Jain1e7401c2017-02-17 15:38:39 +05306906}
6907
6908/**
Kai Chen783e0382018-01-25 16:29:08 -08006909 * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats
6910 * @pdev: DP_PDEV Handle
6911 *
6912 * Return: void
6913 */
6914static inline void
6915dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
6916{
6917 struct cdp_pdev_mon_stats *rx_mon_stats;
6918
6919 rx_mon_stats = &pdev->rx_mon_stats;
6920
6921 DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
6922
6923 dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats);
6924
6925 DP_PRINT_STATS("status_ppdu_done_cnt = %d",
6926 rx_mon_stats->status_ppdu_done);
6927 DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
6928 rx_mon_stats->dest_ppdu_done);
6929 DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
6930 rx_mon_stats->dest_mpdu_done);
Karunakar Dasinenibb7848e2018-05-07 15:09:46 -07006931 DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
6932 rx_mon_stats->dest_mpdu_drop);
Karunakar Dasineni700ad732018-11-06 12:40:07 -08006933 DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
6934 rx_mon_stats->dup_mon_linkdesc_cnt);
6935 DP_PRINT_STATS("dup_mon_buf_cnt = %d",
6936 rx_mon_stats->dup_mon_buf_cnt);
Kai Chen783e0382018-01-25 16:29:08 -08006937}
6938
6939/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05306940 * dp_print_soc_tx_stats(): Print SOC level stats
6941 * @soc DP_SOC Handle
6942 *
6943 * Return: void
6944 */
6945static inline void
6946dp_print_soc_tx_stats(struct dp_soc *soc)
6947{
Soumya Bhatdbb85302018-05-18 11:01:34 +05306948 uint8_t desc_pool_id;
6949 soc->stats.tx.desc_in_use = 0;
6950
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306951 DP_PRINT_STATS("SOC Tx Stats:\n");
Soumya Bhatdbb85302018-05-18 11:01:34 +05306952
6953 for (desc_pool_id = 0;
6954 desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
6955 desc_pool_id++)
6956 soc->stats.tx.desc_in_use +=
6957 soc->tx_desc[desc_pool_id].num_allocated;
6958
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306959 DP_PRINT_STATS("Tx Descriptors In Use = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306960 soc->stats.tx.desc_in_use);
Venkata Sharath Chandra Manchalaec9a5302018-12-13 15:25:35 -08006961 DP_PRINT_STATS("Tx Invalid peer:");
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306962 DP_PRINT_STATS(" Packets = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05306963 soc->stats.tx.tx_invalid_peer.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05306964 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jaine73c4032017-03-16 11:48:15 +05306965 soc->stats.tx.tx_invalid_peer.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306966 DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05306967 soc->stats.tx.tcl_ring_full[0],
6968 soc->stats.tx.tcl_ring_full[1],
6969 soc->stats.tx.tcl_ring_full[2]);
6970
Ishank Jain1e7401c2017-02-17 15:38:39 +05306971}
Ishank Jain1e7401c2017-02-17 15:38:39 +05306972/**
6973 * dp_print_soc_rx_stats: Print SOC level Rx stats
6974 * @soc: DP_SOC Handle
6975 *
6976 * Return:void
6977 */
6978static inline void
6979dp_print_soc_rx_stats(struct dp_soc *soc)
6980{
6981 uint32_t i;
6982 char reo_error[DP_REO_ERR_LENGTH];
6983 char rxdma_error[DP_RXDMA_ERR_LENGTH];
6984 uint8_t index = 0;
6985
Tallapragada Kalyana7023622018-12-03 19:29:52 +05306986 DP_PRINT_STATS("No of AST Entries = %d", soc->num_ast_entries);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306987 DP_PRINT_STATS("SOC Rx Stats:\n");
Venkata Sharath Chandra Manchalaa7d58742018-08-31 15:14:24 -07006988 DP_PRINT_STATS("Fragmented packets: %u",
6989 soc->stats.rx.rx_frags);
6990 DP_PRINT_STATS("Reo reinjected packets: %u",
6991 soc->stats.rx.reo_reinject);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306992 DP_PRINT_STATS("Errors:\n");
6993 DP_PRINT_STATS("Rx Decrypt Errors = %d",
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05306994 (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
6995 soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306996 DP_PRINT_STATS("Invalid RBM = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306997 soc->stats.rx.err.invalid_rbm);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05306998 DP_PRINT_STATS("Invalid Vdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05306999 soc->stats.rx.err.invalid_vdev);
Chaithanya Garrepalliaf34aae2019-02-18 20:44:27 +05307000 DP_PRINT_STATS("Invalid sa_idx or da_idx = %d",
7001 soc->stats.rx.err.invalid_sa_da_idx);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307002 DP_PRINT_STATS("Invalid Pdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307003 soc->stats.rx.err.invalid_pdev);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307004 DP_PRINT_STATS("Invalid Peer = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05307005 soc->stats.rx.err.rx_invalid_peer.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307006 DP_PRINT_STATS("HAL Ring Access Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307007 soc->stats.rx.err.hal_ring_access_fail);
Mohit Khanna16cd1b22019-01-25 10:46:00 -08007008 DP_PRINT_STATS("MSDU Done failures = %d",
7009 soc->stats.rx.err.msdu_done_fail);
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05307010 DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags);
Karunakar Dasineni6fb46e22018-11-14 19:28:41 -08007011 DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait);
7012 DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err);
Tallapragada Kalyan5deeef22018-08-24 18:19:41 +05307013 DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos);
Mohit Khanna16cd1b22019-01-25 10:46:00 -08007014 DP_PRINT_STATS("RX DESC invalid magic: %u",
7015 soc->stats.rx.err.rx_desc_invalid_magic);
Tallapragada Kalyaneff377a2019-01-09 19:13:19 +05307016 DP_PRINT_STATS("RX DUP DESC: %d",
7017 soc->stats.rx.err.hal_reo_dest_dup);
7018 DP_PRINT_STATS("RX REL DUP DESC: %d",
7019 soc->stats.rx.err.hal_wbm_rel_dup);
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05307020
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307021 for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05307022 index += qdf_snprint(&rxdma_error[index],
7023 DP_RXDMA_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05307024 " %d", soc->stats.rx.err.rxdma_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307025 }
Mohit Khanna16cd1b22019-01-25 10:46:00 -08007026 DP_PRINT_STATS("RXDMA Error (0-31):%s", rxdma_error);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307027
7028 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307029 for (i = 0; i < HAL_REO_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05307030 index += qdf_snprint(&reo_error[index],
7031 DP_REO_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05307032 " %d", soc->stats.rx.err.reo_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307033 }
Mohit Khanna16cd1b22019-01-25 10:46:00 -08007034 DP_PRINT_STATS("REO Error(0-14):%s", reo_error);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307035}
7036
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007037/**
7038 * dp_srng_get_str_from_ring_type() - Return string name for a ring
7039 * @ring_type: Ring
7040 *
7041 * Return: char const pointer
7042 */
7043static inline const
7044char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7045{
7046 switch (ring_type) {
7047 case REO_DST:
7048 return "Reo_dst";
7049 case REO_EXCEPTION:
7050 return "Reo_exception";
7051 case REO_CMD:
7052 return "Reo_cmd";
7053 case REO_REINJECT:
7054 return "Reo_reinject";
7055 case REO_STATUS:
7056 return "Reo_status";
7057 case WBM2SW_RELEASE:
7058 return "wbm2sw_release";
7059 case TCL_DATA:
7060 return "tcl_data";
7061 case TCL_CMD:
7062 return "tcl_cmd";
7063 case TCL_STATUS:
7064 return "tcl_status";
7065 case SW2WBM_RELEASE:
7066 return "sw2wbm_release";
7067 case RXDMA_BUF:
7068 return "Rxdma_buf";
7069 case RXDMA_DST:
7070 return "Rxdma_dst";
7071 case RXDMA_MONITOR_BUF:
7072 return "Rxdma_monitor_buf";
7073 case RXDMA_MONITOR_DESC:
7074 return "Rxdma_monitor_desc";
7075 case RXDMA_MONITOR_STATUS:
7076 return "Rxdma_monitor_status";
7077 default:
7078 dp_err("Invalid ring type");
7079 break;
7080 }
7081 return "Invalid";
7082}
sumedh baikady72b1c712017-08-24 12:11:46 -07007083
7084/**
7085 * dp_print_ring_stat_from_hal(): Print hal level ring stats
7086 * @soc: DP_SOC handle
7087 * @srng: DP_SRNG handle
7088 * @ring_name: SRNG name
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007089 * @ring_type: srng src/dst ring
sumedh baikady72b1c712017-08-24 12:11:46 -07007090 *
7091 * Return: void
7092 */
Mohit Khanna81179cb2018-08-16 20:50:43 -07007093static void
sumedh baikady72b1c712017-08-24 12:11:46 -07007094dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007095 enum hal_ring_type ring_type)
sumedh baikady72b1c712017-08-24 12:11:46 -07007096{
7097 uint32_t tailp;
7098 uint32_t headp;
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007099 int32_t hw_headp = -1;
7100 int32_t hw_tailp = -1;
7101 const char *ring_name;
Venkata Sharath Chandra Manchala965035c2019-01-16 12:36:39 -08007102 struct hal_soc *hal_soc;
sumedh baikady72b1c712017-08-24 12:11:46 -07007103
Mohit Khanna81179cb2018-08-16 20:50:43 -07007104 if (soc && srng && srng->hal_srng) {
Venkata Sharath Chandra Manchala965035c2019-01-16 12:36:39 -08007105 hal_soc = (struct hal_soc *)soc->hal_soc;
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007106 ring_name = dp_srng_get_str_from_hal_ring_type(ring_type);
7107
7108 hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
7109
7110 DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n",
Mohit Khanna81179cb2018-08-16 20:50:43 -07007111 ring_name, headp, tailp);
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007112
Chaithanya Garrepallidedc49b2019-02-20 23:21:20 +05307113 hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_headp,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007114 &hw_tailp, ring_type);
7115
7116 DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n",
7117 ring_name, hw_headp, hw_tailp);
sumedh baikady72b1c712017-08-24 12:11:46 -07007118 }
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007119
sumedh baikady72b1c712017-08-24 12:11:46 -07007120}
7121
Pamidipati, Vijay37d107d2018-12-31 14:46:14 +05307122/*
7123 * dp_print_napi_stats(): NAPI stats
7124 * @soc - soc handle
7125 */
7126static void dp_print_napi_stats(struct dp_soc *soc)
7127{
7128 hif_print_napi_stats(soc->hif_handle);
7129}
7130
sumedh baikady72b1c712017-08-24 12:11:46 -07007131/**
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07007132 * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based
7133 * on target
7134 * @pdev: physical device handle
7135 * @mac_id: mac id
7136 *
7137 * Return: void
7138 */
7139static inline
7140void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id)
7141{
7142 if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
7143 dp_print_ring_stat_from_hal(pdev->soc,
7144 &pdev->rxdma_mon_buf_ring[mac_id],
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007145 RXDMA_MONITOR_BUF);
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07007146 dp_print_ring_stat_from_hal(pdev->soc,
7147 &pdev->rxdma_mon_dst_ring[mac_id],
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007148 RXDMA_MONITOR_DST);
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07007149 dp_print_ring_stat_from_hal(pdev->soc,
7150 &pdev->rxdma_mon_desc_ring[mac_id],
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007151 RXDMA_MONITOR_DESC);
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07007152 }
7153
7154 dp_print_ring_stat_from_hal(pdev->soc,
7155 &pdev->rxdma_mon_status_ring[mac_id],
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007156 RXDMA_MONITOR_STATUS);
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07007157}
7158
7159/**
sumedh baikady72b1c712017-08-24 12:11:46 -07007160 * dp_print_ring_stats(): Print tail and head pointer
7161 * @pdev: DP_PDEV handle
7162 *
7163 * Return:void
7164 */
7165static inline void
7166dp_print_ring_stats(struct dp_pdev *pdev)
7167{
7168 uint32_t i;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08007169 int mac_id;
sumedh baikady72b1c712017-08-24 12:11:46 -07007170
7171 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007172 &pdev->soc->reo_exception_ring,
7173 REO_EXCEPTION);
sumedh baikady72b1c712017-08-24 12:11:46 -07007174 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007175 &pdev->soc->reo_reinject_ring,
7176 REO_REINJECT);
sumedh baikady72b1c712017-08-24 12:11:46 -07007177 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007178 &pdev->soc->reo_cmd_ring,
7179 REO_CMD);
sumedh baikady72b1c712017-08-24 12:11:46 -07007180 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007181 &pdev->soc->reo_status_ring,
7182 REO_STATUS);
sumedh baikady72b1c712017-08-24 12:11:46 -07007183 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007184 &pdev->soc->rx_rel_ring,
7185 WBM2SW_RELEASE);
sumedh baikady72b1c712017-08-24 12:11:46 -07007186 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007187 &pdev->soc->tcl_cmd_ring,
7188 TCL_CMD);
sumedh baikady72b1c712017-08-24 12:11:46 -07007189 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007190 &pdev->soc->tcl_status_ring,
7191 TCL_STATUS);
sumedh baikady72b1c712017-08-24 12:11:46 -07007192 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007193 &pdev->soc->wbm_desc_rel_ring,
7194 SW2WBM_RELEASE);
7195 for (i = 0; i < MAX_REO_DEST_RINGS; i++)
sumedh baikady72b1c712017-08-24 12:11:46 -07007196 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007197 &pdev->soc->reo_dest_ring[i],
7198 REO_DST);
Mohit Khanna81179cb2018-08-16 20:50:43 -07007199
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007200 for (i = 0; i < pdev->soc->num_tcl_data_rings; i++)
sumedh baikady72b1c712017-08-24 12:11:46 -07007201 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007202 &pdev->soc->tcl_data_ring[i],
7203 TCL_DATA);
7204 for (i = 0; i < MAX_TCL_DATA_RINGS; i++)
sumedh baikady72b1c712017-08-24 12:11:46 -07007205 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007206 &pdev->soc->tx_comp_ring[i],
7207 WBM2SW_RELEASE);
sumedh baikady72b1c712017-08-24 12:11:46 -07007208
sumedh baikady72b1c712017-08-24 12:11:46 -07007209 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007210 &pdev->rx_refill_buf_ring,
7211 RXDMA_BUF);
sumedh baikady72b1c712017-08-24 12:11:46 -07007212
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007213 dp_print_ring_stat_from_hal(pdev->soc,
7214 &pdev->rx_refill_buf_ring2,
7215 RXDMA_BUF);
7216
7217 for (i = 0; i < MAX_RX_MAC_RINGS; i++)
7218 dp_print_ring_stat_from_hal(pdev->soc,
7219 &pdev->rx_mac_buf_ring[i],
7220 RXDMA_BUF);
7221
7222 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07007223 dp_print_mon_ring_stat_from_hal(pdev, mac_id);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08007224
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007225 for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++)
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08007226 dp_print_ring_stat_from_hal(pdev->soc,
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -07007227 &pdev->rxdma_err_dst_ring[i],
7228 RXDMA_DST);
sumedh baikady72b1c712017-08-24 12:11:46 -07007229}
7230
Ishank Jain1e7401c2017-02-17 15:38:39 +05307231/**
7232 * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7233 * @vdev: DP_VDEV handle
7234 *
7235 * Return:void
7236 */
7237static inline void
7238dp_txrx_host_stats_clr(struct dp_vdev *vdev)
7239{
7240 struct dp_peer *peer = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +05307241
phadiman49757302018-12-18 16:13:59 +05307242 if (!vdev || !vdev->pdev)
7243 return;
7244
Ishank Jain1e7401c2017-02-17 15:38:39 +05307245 DP_STATS_CLR(vdev->pdev);
7246 DP_STATS_CLR(vdev->pdev->soc);
7247 DP_STATS_CLR(vdev);
Pamidipati, Vijay37d107d2018-12-31 14:46:14 +05307248
7249 hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
7250
Ishank Jain1e7401c2017-02-17 15:38:39 +05307251 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7252 if (!peer)
7253 return;
7254 DP_STATS_CLR(peer);
Anish Nataraj28490c42018-01-19 19:34:54 +05307255
Amir Patel756d05e2018-10-10 12:35:30 +05307256#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7257 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7258 &peer->stats, peer->peer_ids[0],
7259 UPDATE_PEER_STATS, vdev->pdev->pdev_id);
7260#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05307261 }
7262
Amir Patel756d05e2018-10-10 12:35:30 +05307263#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7264 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7265 &vdev->stats, vdev->vdev_id,
7266 UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7267#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05307268}
7269
7270/**
chenguo4d877b82018-08-06 14:18:05 +08007271 * dp_print_common_rates_info(): Print common rate for tx or rx
7272 * @pkt_type_array: rate type array contains rate info
7273 *
7274 * Return:void
7275 */
7276static inline void
7277dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array)
7278{
7279 uint8_t mcs, pkt_type;
7280
7281 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
7282 for (mcs = 0; mcs < MAX_MCS; mcs++) {
7283 if (!dp_rate_string[pkt_type][mcs].valid)
7284 continue;
7285
7286 DP_PRINT_STATS(" %s = %d",
7287 dp_rate_string[pkt_type][mcs].mcs_type,
7288 pkt_type_array[pkt_type].mcs_count[mcs]);
7289 }
7290
7291 DP_PRINT_STATS("\n");
7292 }
7293}
7294
7295/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05307296 * dp_print_rx_rates(): Print Rx rate stats
7297 * @vdev: DP_VDEV handle
7298 *
7299 * Return:void
7300 */
7301static inline void
7302dp_print_rx_rates(struct dp_vdev *vdev)
7303{
7304 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
chenguo4d877b82018-08-06 14:18:05 +08007305 uint8_t i;
Ishank Jain1e7401c2017-02-17 15:38:39 +05307306 uint8_t index = 0;
Ishank Jain1e7401c2017-02-17 15:38:39 +05307307 char nss[DP_NSS_LENGTH];
7308
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307309 DP_PRINT_STATS("Rx Rate Info:\n");
chenguo4d877b82018-08-06 14:18:05 +08007310 dp_print_common_rates_info(pdev->stats.rx.pkt_type);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307311
Ishank Jain57c42a12017-04-12 10:42:22 +05307312
Ishank Jain1e7401c2017-02-17 15:38:39 +05307313 index = 0;
7314 for (i = 0; i < SS_COUNT; i++) {
7315 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05307316 " %d", pdev->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307317 }
Anish Nataraj072d8972018-01-09 18:23:33 +05307318 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307319 nss);
7320
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307321 DP_PRINT_STATS("SGI ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05307322 " 0.8us %d,"
7323 " 0.4us %d,"
7324 " 1.6us %d,"
7325 " 3.2us %d,",
7326 pdev->stats.rx.sgi_count[0],
7327 pdev->stats.rx.sgi_count[1],
7328 pdev->stats.rx.sgi_count[2],
7329 pdev->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307330 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307331 pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
7332 pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307333 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05307334 " SU: %d,"
7335 " MU_MIMO:%d,"
7336 " MU_OFDMA:%d,"
Ishank Jain57c42a12017-04-12 10:42:22 +05307337 " MU_OFDMA_MIMO:%d\n",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307338 pdev->stats.rx.reception_type[0],
7339 pdev->stats.rx.reception_type[1],
7340 pdev->stats.rx.reception_type[2],
7341 pdev->stats.rx.reception_type[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307342 DP_PRINT_STATS("Aggregation:\n");
7343 DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307344 pdev->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307345 DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307346 pdev->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307347 DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307348 pdev->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307349 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307350 pdev->stats.rx.non_amsdu_cnt);
7351}
7352
7353/**
7354 * dp_print_tx_rates(): Print tx rates
7355 * @vdev: DP_VDEV handle
7356 *
7357 * Return:void
7358 */
7359static inline void
7360dp_print_tx_rates(struct dp_vdev *vdev)
7361{
7362 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Ishank Jain1e7401c2017-02-17 15:38:39 +05307363
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307364 DP_PRINT_STATS("Tx Rate Info:\n");
chenguo4d877b82018-08-06 14:18:05 +08007365 dp_print_common_rates_info(pdev->stats.tx.pkt_type);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307366
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307367 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05307368 " 0.8us %d"
7369 " 0.4us %d"
7370 " 1.6us %d"
7371 " 3.2us %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05307372 pdev->stats.tx.sgi_count[0],
7373 pdev->stats.tx.sgi_count[1],
7374 pdev->stats.tx.sgi_count[2],
7375 pdev->stats.tx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307376
7377 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
chenguoec849832018-04-11 19:14:06 +08007378 pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
7379 pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307380
7381 DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
7382 DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
7383 DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
7384 DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
7385 DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
7386
7387 DP_PRINT_STATS("Aggregation:\n");
Chaitanya Kiran Godavarthie541e9c2019-04-02 21:43:43 +05307388 DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
7389 pdev->stats.tx.ampdu_cnt);
7390 DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
7391 pdev->stats.tx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307392 DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05307393 pdev->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307394 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05307395 pdev->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307396}
7397
7398/**
7399 * dp_print_peer_stats():print peer stats
7400 * @peer: DP_PEER handle
7401 *
7402 * return void
7403 */
7404static inline void dp_print_peer_stats(struct dp_peer *peer)
7405{
chenguo4d877b82018-08-06 14:18:05 +08007406 uint8_t i;
Ishank Jain1e7401c2017-02-17 15:38:39 +05307407 uint32_t index;
nobelj4e9d51f2018-08-07 19:36:47 -07007408 uint32_t j;
Ishank Jain1e7401c2017-02-17 15:38:39 +05307409 char nss[DP_NSS_LENGTH];
nobelj4e9d51f2018-08-07 19:36:47 -07007410 char mu_group_id[DP_MU_GROUP_LENGTH];
7411
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307412 DP_PRINT_STATS("Node Tx Stats:\n");
7413 DP_PRINT_STATS("Total Packet Completions = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307414 peer->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307415 DP_PRINT_STATS("Total Bytes Completions = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307416 peer->stats.tx.comp_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307417 DP_PRINT_STATS("Success Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307418 peer->stats.tx.tx_success.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307419 DP_PRINT_STATS("Success Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307420 peer->stats.tx.tx_success.bytes);
Pranita Solankefc2ff392017-12-15 19:25:13 +05307421 DP_PRINT_STATS("Unicast Success Packets = %d",
7422 peer->stats.tx.ucast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307423 DP_PRINT_STATS("Unicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05307424 peer->stats.tx.ucast.bytes);
7425 DP_PRINT_STATS("Multicast Success Packets = %d",
7426 peer->stats.tx.mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307427 DP_PRINT_STATS("Multicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05307428 peer->stats.tx.mcast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05307429 DP_PRINT_STATS("Broadcast Success Packets = %d",
7430 peer->stats.tx.bcast.num);
7431 DP_PRINT_STATS("Broadcast Success Bytes = %llu",
7432 peer->stats.tx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307433 DP_PRINT_STATS("Packets Failed = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307434 peer->stats.tx.tx_failed);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307435 DP_PRINT_STATS("Packets In OFDMA = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307436 peer->stats.tx.ofdma);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307437 DP_PRINT_STATS("Packets In STBC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307438 peer->stats.tx.stbc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307439 DP_PRINT_STATS("Packets In LDPC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307440 peer->stats.tx.ldpc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307441 DP_PRINT_STATS("Packet Retries = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307442 peer->stats.tx.retries);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307443 DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307444 peer->stats.tx.amsdu_cnt);
Chaitanya Kiran Godavarthie541e9c2019-04-02 21:43:43 +05307445 DP_PRINT_STATS("Msdu's As Part of Ampdu = %d",
7446 peer->stats.tx.non_ampdu_cnt);
7447 DP_PRINT_STATS("Msdu's As Ampdu = %d",
7448 peer->stats.tx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307449 DP_PRINT_STATS("Last Packet RSSI = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307450 peer->stats.tx.last_ack_rssi);
Pranita Solanke05881142018-08-17 18:20:51 +05307451 DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u",
7452 peer->stats.tx.dropped.fw_rem.num);
7453 DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu",
7454 peer->stats.tx.dropped.fw_rem.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307455 DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
7456 peer->stats.tx.dropped.fw_rem_tx);
7457 DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
7458 peer->stats.tx.dropped.fw_rem_notx);
7459 DP_PRINT_STATS("Dropped : Age Out = %d",
7460 peer->stats.tx.dropped.age_out);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05307461 DP_PRINT_STATS("NAWDS : ");
7462 DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
7463 peer->stats.tx.nawds_mcast_drop);
7464 DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
7465 peer->stats.tx.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307466 DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05307467 peer->stats.tx.nawds_mcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307468
7469 DP_PRINT_STATS("Rate Info:");
chenguo4d877b82018-08-06 14:18:05 +08007470 dp_print_common_rates_info(peer->stats.tx.pkt_type);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307471
Ishank Jain1e7401c2017-02-17 15:38:39 +05307472
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307473 DP_PRINT_STATS("SGI = "
Ishank Jain57c42a12017-04-12 10:42:22 +05307474 " 0.8us %d"
7475 " 0.4us %d"
7476 " 1.6us %d"
7477 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307478 peer->stats.tx.sgi_count[0],
7479 peer->stats.tx.sgi_count[1],
7480 peer->stats.tx.sgi_count[2],
7481 peer->stats.tx.sgi_count[3]);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05307482 DP_PRINT_STATS("Excess Retries per AC ");
7483 DP_PRINT_STATS(" Best effort = %d",
7484 peer->stats.tx.excess_retries_per_ac[0]);
7485 DP_PRINT_STATS(" Background= %d",
7486 peer->stats.tx.excess_retries_per_ac[1]);
7487 DP_PRINT_STATS(" Video = %d",
7488 peer->stats.tx.excess_retries_per_ac[2]);
7489 DP_PRINT_STATS(" Voice = %d",
7490 peer->stats.tx.excess_retries_per_ac[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307491 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
nobelj418b2e92018-07-25 16:19:20 -07007492 peer->stats.tx.bw[0], peer->stats.tx.bw[1],
7493 peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307494
Pranita Solankeed0aba62018-01-12 19:14:31 +05307495 index = 0;
7496 for (i = 0; i < SS_COUNT; i++) {
7497 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
7498 " %d", peer->stats.tx.nss[i]);
7499 }
nobelj4e9d51f2018-08-07 19:36:47 -07007500 DP_PRINT_STATS("NSS(1-8) = %s", nss);
7501
7502 DP_PRINT_STATS("Transmit Type :");
7503 DP_PRINT_STATS("SU %d, MU_MIMO %d, MU_OFDMA %d, MU_MIMO_OFDMA %d",
7504 peer->stats.tx.transmit_type[0],
7505 peer->stats.tx.transmit_type[1],
7506 peer->stats.tx.transmit_type[2],
7507 peer->stats.tx.transmit_type[3]);
7508
7509 for (i = 0; i < MAX_MU_GROUP_ID;) {
7510 index = 0;
7511 for (j = 0; j < DP_MU_GROUP_SHOW && i < MAX_MU_GROUP_ID;
7512 j++) {
7513 index += qdf_snprint(&mu_group_id[index],
7514 DP_MU_GROUP_LENGTH - index,
7515 " %d",
7516 peer->stats.tx.mu_group_id[i]);
7517 i++;
7518 }
7519
7520 DP_PRINT_STATS("User position list for GID %02d->%d: [%s]",
7521 i - DP_MU_GROUP_SHOW, i - 1, mu_group_id);
7522 }
7523
7524 DP_PRINT_STATS("Last Packet RU index [%d], Size [%d]",
7525 peer->stats.tx.ru_start, peer->stats.tx.ru_tones);
7526 DP_PRINT_STATS("RU Locations RU[26 52 106 242 484 996]:");
7527 DP_PRINT_STATS("RU_26: %d", peer->stats.tx.ru_loc[0]);
7528 DP_PRINT_STATS("RU 52: %d", peer->stats.tx.ru_loc[1]);
7529 DP_PRINT_STATS("RU 106: %d", peer->stats.tx.ru_loc[2]);
7530 DP_PRINT_STATS("RU 242: %d", peer->stats.tx.ru_loc[3]);
7531 DP_PRINT_STATS("RU 484: %d", peer->stats.tx.ru_loc[4]);
7532 DP_PRINT_STATS("RU 996: %d", peer->stats.tx.ru_loc[5]);
Pranita Solankeed0aba62018-01-12 19:14:31 +05307533
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307534 DP_PRINT_STATS("Aggregation:");
7535 DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05307536 peer->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307537 DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
Ishank Jaine73c4032017-03-16 11:48:15 +05307538 peer->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307539
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05307540 DP_PRINT_STATS("Bytes and Packets transmitted in last one sec:");
7541 DP_PRINT_STATS(" Bytes transmitted in last sec: %d",
7542 peer->stats.tx.tx_byte_rate);
7543 DP_PRINT_STATS(" Data transmitted in last sec: %d",
7544 peer->stats.tx.tx_data_rate);
7545
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307546 DP_PRINT_STATS("Node Rx Stats:");
7547 DP_PRINT_STATS("Packets Sent To Stack = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307548 peer->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307549 DP_PRINT_STATS("Bytes Sent To Stack = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307550 peer->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05307551 for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
Pranita Solankefc2ff392017-12-15 19:25:13 +05307552 DP_PRINT_STATS("Ring Id = %d", i);
7553 DP_PRINT_STATS(" Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05307554 peer->stats.rx.rcvd_reo[i].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307555 DP_PRINT_STATS(" Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05307556 peer->stats.rx.rcvd_reo[i].bytes);
7557 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307558 DP_PRINT_STATS("Multicast Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307559 peer->stats.rx.multicast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307560 DP_PRINT_STATS("Multicast Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307561 peer->stats.rx.multicast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05307562 DP_PRINT_STATS("Broadcast Packets Received = %d",
7563 peer->stats.rx.bcast.num);
7564 DP_PRINT_STATS("Broadcast Bytes Received = %llu",
7565 peer->stats.rx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307566 DP_PRINT_STATS("Intra BSS Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05307567 peer->stats.rx.intra_bss.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307568 DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05307569 peer->stats.rx.intra_bss.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307570 DP_PRINT_STATS("Raw Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307571 peer->stats.rx.raw.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05307572 DP_PRINT_STATS("Raw Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307573 peer->stats.rx.raw.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307574 DP_PRINT_STATS("Errors: MIC Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307575 peer->stats.rx.err.mic_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307576 DP_PRINT_STATS("Erros: Decryption Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307577 peer->stats.rx.err.decrypt_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307578 DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307579 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307580 DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05307581 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307582 DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307583 peer->stats.rx.non_amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307584 DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307585 peer->stats.rx.amsdu_cnt);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05307586 DP_PRINT_STATS("NAWDS : ");
7587 DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
Ruchi, Agrawal27550482018-02-20 19:43:41 +05307588 peer->stats.rx.nawds_mcast_drop);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307589 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05307590 " 0.8us %d"
7591 " 0.4us %d"
7592 " 1.6us %d"
7593 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307594 peer->stats.rx.sgi_count[0],
7595 peer->stats.rx.sgi_count[1],
7596 peer->stats.rx.sgi_count[2],
7597 peer->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307598 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307599 peer->stats.rx.bw[0], peer->stats.rx.bw[1],
7600 peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307601 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05307602 " SU %d,"
7603 " MU_MIMO %d,"
7604 " MU_OFDMA %d,"
7605 " MU_OFDMA_MIMO %d",
7606 peer->stats.rx.reception_type[0],
7607 peer->stats.rx.reception_type[1],
7608 peer->stats.rx.reception_type[2],
7609 peer->stats.rx.reception_type[3]);
7610
chenguo4d877b82018-08-06 14:18:05 +08007611 dp_print_common_rates_info(peer->stats.rx.pkt_type);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307612
7613 index = 0;
7614 for (i = 0; i < SS_COUNT; i++) {
7615 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05307616 " %d", peer->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307617 }
Anish Nataraj072d8972018-01-09 18:23:33 +05307618 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05307619 nss);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307620
7621 DP_PRINT_STATS("Aggregation:");
7622 DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05307623 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307624 DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05307625 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307626 DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05307627 peer->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05307628 DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05307629 peer->stats.rx.non_amsdu_cnt);
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05307630
7631 DP_PRINT_STATS("Bytes and Packets received in last one sec:");
7632 DP_PRINT_STATS(" Bytes received in last sec: %d",
7633 peer->stats.rx.rx_byte_rate);
7634 DP_PRINT_STATS(" Data received in last sec: %d",
7635 peer->stats.rx.rx_data_rate);
Ishank Jain1e7401c2017-02-17 15:38:39 +05307636}
7637
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07007638/*
7639 * dp_get_host_peer_stats()- function to print peer stats
7640 * @pdev_handle: DP_PDEV handle
7641 * @mac_addr: mac address of the peer
7642 *
7643 * Return: void
7644 */
7645static void
7646dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
7647{
7648 struct dp_peer *peer;
7649 uint8_t local_id;
7650
Aditya Sathishb514afc2018-12-05 15:42:17 +05307651 if (!mac_addr) {
7652 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7653 "Invalid MAC address\n");
7654 return;
7655 }
7656
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07007657 peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
7658 &local_id);
7659
7660 if (!peer) {
7661 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7662 "%s: Invalid peer\n", __func__);
7663 return;
7664 }
7665
Aditya Sathishf8074d82019-02-13 15:28:49 +05307666 /* Making sure the peer is for the specific pdev */
7667 if ((struct dp_pdev *)pdev_handle != peer->vdev->pdev) {
7668 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7669 "%s: Peer is not for this pdev\n", __func__);
7670 return;
7671 }
7672
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07007673 dp_print_peer_stats(peer);
7674 dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7675}
7676
Ishank Jain1e7401c2017-02-17 15:38:39 +05307677/**
Venkata Sharath Chandra Manchalaf167af12018-10-09 20:23:02 -07007678 * dp_print_soc_cfg_params()- Dump soc wlan config parameters
7679 * @soc_handle: Soc handle
7680 *
7681 * Return: void
7682 */
7683static void
7684dp_print_soc_cfg_params(struct dp_soc *soc)
7685{
7686 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
7687 uint8_t index = 0, i = 0;
7688 char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH];
7689 int num_of_int_contexts;
7690
7691 if (!soc) {
7692 dp_err("Context is null");
7693 return;
7694 }
7695
7696 soc_cfg_ctx = soc->wlan_cfg_ctx;
7697
7698 if (!soc_cfg_ctx) {
7699 dp_err("Context is null");
7700 return;
7701 }
7702
7703 num_of_int_contexts =
7704 wlan_cfg_get_num_contexts(soc_cfg_ctx);
7705
7706 DP_TRACE_STATS(DEBUG, "No. of interrupt contexts: %u",
7707 soc_cfg_ctx->num_int_ctxts);
7708 DP_TRACE_STATS(DEBUG, "Max clients: %u",
7709 soc_cfg_ctx->max_clients);
7710 DP_TRACE_STATS(DEBUG, "Max alloc size: %u ",
7711 soc_cfg_ctx->max_alloc_size);
7712 DP_TRACE_STATS(DEBUG, "Per pdev tx ring: %u ",
7713 soc_cfg_ctx->per_pdev_tx_ring);
7714 DP_TRACE_STATS(DEBUG, "Num tcl data rings: %u ",
7715 soc_cfg_ctx->num_tcl_data_rings);
7716 DP_TRACE_STATS(DEBUG, "Per pdev rx ring: %u ",
7717 soc_cfg_ctx->per_pdev_rx_ring);
7718 DP_TRACE_STATS(DEBUG, "Per pdev lmac ring: %u ",
7719 soc_cfg_ctx->per_pdev_lmac_ring);
7720 DP_TRACE_STATS(DEBUG, "Num of reo dest rings: %u ",
7721 soc_cfg_ctx->num_reo_dest_rings);
7722 DP_TRACE_STATS(DEBUG, "Num tx desc pool: %u ",
7723 soc_cfg_ctx->num_tx_desc_pool);
7724 DP_TRACE_STATS(DEBUG, "Num tx ext desc pool: %u ",
7725 soc_cfg_ctx->num_tx_ext_desc_pool);
7726 DP_TRACE_STATS(DEBUG, "Num tx desc: %u ",
7727 soc_cfg_ctx->num_tx_desc);
7728 DP_TRACE_STATS(DEBUG, "Num tx ext desc: %u ",
7729 soc_cfg_ctx->num_tx_ext_desc);
7730 DP_TRACE_STATS(DEBUG, "Htt packet type: %u ",
7731 soc_cfg_ctx->htt_packet_type);
7732 DP_TRACE_STATS(DEBUG, "Max peer_ids: %u ",
7733 soc_cfg_ctx->max_peer_id);
7734 DP_TRACE_STATS(DEBUG, "Tx ring size: %u ",
7735 soc_cfg_ctx->tx_ring_size);
7736 DP_TRACE_STATS(DEBUG, "Tx comp ring size: %u ",
7737 soc_cfg_ctx->tx_comp_ring_size);
7738 DP_TRACE_STATS(DEBUG, "Tx comp ring size nss: %u ",
7739 soc_cfg_ctx->tx_comp_ring_size_nss);
7740 DP_TRACE_STATS(DEBUG, "Int batch threshold tx: %u ",
7741 soc_cfg_ctx->int_batch_threshold_tx);
7742 DP_TRACE_STATS(DEBUG, "Int timer threshold tx: %u ",
7743 soc_cfg_ctx->int_timer_threshold_tx);
7744 DP_TRACE_STATS(DEBUG, "Int batch threshold rx: %u ",
7745 soc_cfg_ctx->int_batch_threshold_rx);
7746 DP_TRACE_STATS(DEBUG, "Int timer threshold rx: %u ",
7747 soc_cfg_ctx->int_timer_threshold_rx);
7748 DP_TRACE_STATS(DEBUG, "Int batch threshold other: %u ",
7749 soc_cfg_ctx->int_batch_threshold_other);
7750 DP_TRACE_STATS(DEBUG, "Int timer threshold other: %u ",
7751 soc_cfg_ctx->int_timer_threshold_other);
7752
7753 for (i = 0; i < num_of_int_contexts; i++) {
7754 index += qdf_snprint(&ring_mask[index],
7755 DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7756 " %d",
7757 soc_cfg_ctx->int_tx_ring_mask[i]);
7758 }
7759
7760 DP_TRACE_STATS(DEBUG, "Tx ring mask (0-%d):%s",
7761 num_of_int_contexts, ring_mask);
7762
7763 index = 0;
7764 for (i = 0; i < num_of_int_contexts; i++) {
7765 index += qdf_snprint(&ring_mask[index],
7766 DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7767 " %d",
7768 soc_cfg_ctx->int_rx_ring_mask[i]);
7769 }
7770
7771 DP_TRACE_STATS(DEBUG, "Rx ring mask (0-%d):%s",
7772 num_of_int_contexts, ring_mask);
7773
7774 index = 0;
7775 for (i = 0; i < num_of_int_contexts; i++) {
7776 index += qdf_snprint(&ring_mask[index],
7777 DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7778 " %d",
7779 soc_cfg_ctx->int_rx_mon_ring_mask[i]);
7780 }
7781
7782 DP_TRACE_STATS(DEBUG, "Rx mon ring mask (0-%d):%s",
7783 num_of_int_contexts, ring_mask);
7784
7785 index = 0;
7786 for (i = 0; i < num_of_int_contexts; i++) {
7787 index += qdf_snprint(&ring_mask[index],
7788 DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7789 " %d",
7790 soc_cfg_ctx->int_rx_err_ring_mask[i]);
7791 }
7792
7793 DP_TRACE_STATS(DEBUG, "Rx err ring mask (0-%d):%s",
7794 num_of_int_contexts, ring_mask);
7795
7796 index = 0;
7797 for (i = 0; i < num_of_int_contexts; i++) {
7798 index += qdf_snprint(&ring_mask[index],
7799 DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7800 " %d",
7801 soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]);
7802 }
7803
7804 DP_TRACE_STATS(DEBUG, "Rx wbm rel ring mask (0-%d):%s",
7805 num_of_int_contexts, ring_mask);
7806
7807 index = 0;
7808 for (i = 0; i < num_of_int_contexts; i++) {
7809 index += qdf_snprint(&ring_mask[index],
7810 DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7811 " %d",
7812 soc_cfg_ctx->int_reo_status_ring_mask[i]);
7813 }
7814
7815 DP_TRACE_STATS(DEBUG, "Reo ring mask (0-%d):%s",
7816 num_of_int_contexts, ring_mask);
7817
7818 index = 0;
7819 for (i = 0; i < num_of_int_contexts; i++) {
7820 index += qdf_snprint(&ring_mask[index],
7821 DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7822 " %d",
7823 soc_cfg_ctx->int_rxdma2host_ring_mask[i]);
7824 }
7825
7826 DP_TRACE_STATS(DEBUG, "Rxdma2host ring mask (0-%d):%s",
7827 num_of_int_contexts, ring_mask);
7828
7829 index = 0;
7830 for (i = 0; i < num_of_int_contexts; i++) {
7831 index += qdf_snprint(&ring_mask[index],
7832 DP_MAX_INT_CONTEXTS_STRING_LENGTH - index,
7833 " %d",
7834 soc_cfg_ctx->int_host2rxdma_ring_mask[i]);
7835 }
7836
7837 DP_TRACE_STATS(DEBUG, "Host2rxdma ring mask (0-%d):%s",
7838 num_of_int_contexts, ring_mask);
7839
7840 DP_TRACE_STATS(DEBUG, "Rx hash: %u ",
7841 soc_cfg_ctx->rx_hash);
7842 DP_TRACE_STATS(DEBUG, "Tso enabled: %u ",
7843 soc_cfg_ctx->tso_enabled);
7844 DP_TRACE_STATS(DEBUG, "Lro enabled: %u ",
7845 soc_cfg_ctx->lro_enabled);
7846 DP_TRACE_STATS(DEBUG, "Sg enabled: %u ",
7847 soc_cfg_ctx->sg_enabled);
7848 DP_TRACE_STATS(DEBUG, "Gro enabled: %u ",
7849 soc_cfg_ctx->gro_enabled);
7850 DP_TRACE_STATS(DEBUG, "rawmode enabled: %u ",
7851 soc_cfg_ctx->rawmode_enabled);
7852 DP_TRACE_STATS(DEBUG, "peer flow ctrl enabled: %u ",
7853 soc_cfg_ctx->peer_flow_ctrl_enabled);
7854 DP_TRACE_STATS(DEBUG, "napi enabled: %u ",
7855 soc_cfg_ctx->napi_enabled);
7856 DP_TRACE_STATS(DEBUG, "Tcp Udp checksum offload: %u ",
7857 soc_cfg_ctx->tcp_udp_checksumoffload);
7858 DP_TRACE_STATS(DEBUG, "Defrag timeout check: %u ",
7859 soc_cfg_ctx->defrag_timeout_check);
7860 DP_TRACE_STATS(DEBUG, "Rx defrag min timeout: %u ",
7861 soc_cfg_ctx->rx_defrag_min_timeout);
7862 DP_TRACE_STATS(DEBUG, "WBM release ring: %u ",
7863 soc_cfg_ctx->wbm_release_ring);
7864 DP_TRACE_STATS(DEBUG, "TCL CMD ring: %u ",
7865 soc_cfg_ctx->tcl_cmd_ring);
7866 DP_TRACE_STATS(DEBUG, "TCL Status ring: %u ",
7867 soc_cfg_ctx->tcl_status_ring);
7868 DP_TRACE_STATS(DEBUG, "REO Reinject ring: %u ",
7869 soc_cfg_ctx->reo_reinject_ring);
7870 DP_TRACE_STATS(DEBUG, "RX release ring: %u ",
7871 soc_cfg_ctx->rx_release_ring);
7872 DP_TRACE_STATS(DEBUG, "REO Exception ring: %u ",
7873 soc_cfg_ctx->reo_exception_ring);
7874 DP_TRACE_STATS(DEBUG, "REO CMD ring: %u ",
7875 soc_cfg_ctx->reo_cmd_ring);
7876 DP_TRACE_STATS(DEBUG, "REO STATUS ring: %u ",
7877 soc_cfg_ctx->reo_status_ring);
7878 DP_TRACE_STATS(DEBUG, "RXDMA refill ring: %u ",
7879 soc_cfg_ctx->rxdma_refill_ring);
7880 DP_TRACE_STATS(DEBUG, "RXDMA err dst ring: %u ",
7881 soc_cfg_ctx->rxdma_err_dst_ring);
7882}
7883
7884/**
7885 * dp_print_vdev_cfg_params() - Print the pdev cfg parameters
7886 * @pdev_handle: DP pdev handle
7887 *
7888 * Return - void
7889 */
7890static void
7891dp_print_pdev_cfg_params(struct dp_pdev *pdev)
7892{
7893 struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
7894
7895 if (!pdev) {
7896 dp_err("Context is null");
7897 return;
7898 }
7899
7900 pdev_cfg_ctx = pdev->wlan_cfg_ctx;
7901
7902 if (!pdev_cfg_ctx) {
7903 dp_err("Context is null");
7904 return;
7905 }
7906
7907 DP_TRACE_STATS(DEBUG, "Rx dma buf ring size: %d ",
7908 pdev_cfg_ctx->rx_dma_buf_ring_size);
7909 DP_TRACE_STATS(DEBUG, "DMA Mon buf ring size: %d ",
7910 pdev_cfg_ctx->dma_mon_buf_ring_size);
7911 DP_TRACE_STATS(DEBUG, "DMA Mon dest ring size: %d ",
7912 pdev_cfg_ctx->dma_mon_dest_ring_size);
7913 DP_TRACE_STATS(DEBUG, "DMA Mon status ring size: %d ",
7914 pdev_cfg_ctx->dma_mon_status_ring_size);
7915 DP_TRACE_STATS(DEBUG, "Rxdma monitor desc ring: %d",
7916 pdev_cfg_ctx->rxdma_monitor_desc_ring);
7917 DP_TRACE_STATS(DEBUG, "Num mac rings: %d ",
7918 pdev_cfg_ctx->num_mac_rings);
7919}
7920
7921/**
Venkata Sharath Chandra Manchala389c4e12018-10-03 12:13:33 -07007922 * dp_txrx_stats_help() - Helper function for Txrx_Stats
7923 *
7924 * Return: None
7925 */
7926static void dp_txrx_stats_help(void)
7927{
7928 dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7929 dp_info("stats_option:");
7930 dp_info(" 1 -- HTT Tx Statistics");
7931 dp_info(" 2 -- HTT Rx Statistics");
7932 dp_info(" 3 -- HTT Tx HW Queue Statistics");
7933 dp_info(" 4 -- HTT Tx HW Sched Statistics");
7934 dp_info(" 5 -- HTT Error Statistics");
7935 dp_info(" 6 -- HTT TQM Statistics");
7936 dp_info(" 7 -- HTT TQM CMDQ Statistics");
7937 dp_info(" 8 -- HTT TX_DE_CMN Statistics");
7938 dp_info(" 9 -- HTT Tx Rate Statistics");
7939 dp_info(" 10 -- HTT Rx Rate Statistics");
7940 dp_info(" 11 -- HTT Peer Statistics");
7941 dp_info(" 12 -- HTT Tx SelfGen Statistics");
7942 dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7943 dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7944 dp_info(" 15 -- HTT SRNG Statistics");
7945 dp_info(" 16 -- HTT SFM Info Statistics");
7946 dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7947 dp_info(" 18 -- HTT Peer List Details");
7948 dp_info(" 20 -- Clear Host Statistics");
7949 dp_info(" 21 -- Host Rx Rate Statistics");
7950 dp_info(" 22 -- Host Tx Rate Statistics");
7951 dp_info(" 23 -- Host Tx Statistics");
7952 dp_info(" 24 -- Host Rx Statistics");
7953 dp_info(" 25 -- Host AST Statistics");
7954 dp_info(" 26 -- Host SRNG PTR Statistics");
7955 dp_info(" 27 -- Host Mon Statistics");
7956 dp_info(" 28 -- Host REO Queue Statistics");
Venkata Sharath Chandra Manchalaf167af12018-10-09 20:23:02 -07007957 dp_info(" 29 -- Host Soc cfg param Statistics");
7958 dp_info(" 30 -- Host pdev cfg param Statistics");
Venkata Sharath Chandra Manchala389c4e12018-10-03 12:13:33 -07007959}
7960
7961/**
Ishank Jain1e7401c2017-02-17 15:38:39 +05307962 * dp_print_host_stats()- Function to print the stats aggregated at host
7963 * @vdev_handle: DP_VDEV handle
Ishank Jain1e7401c2017-02-17 15:38:39 +05307964 * @type: host stats type
7965 *
Ishank Jain1e7401c2017-02-17 15:38:39 +05307966 * Return: 0 on success, print error message in case of failure
7967 */
7968static int
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07007969dp_print_host_stats(struct cdp_vdev *vdev_handle,
7970 struct cdp_txrx_stats_req *req)
Ishank Jain1e7401c2017-02-17 15:38:39 +05307971{
7972 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7973 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07007974 enum cdp_host_txrx_stats type =
7975 dp_stats_mapping_table[req->stats][STATS_HOST];
Ishank Jain1e7401c2017-02-17 15:38:39 +05307976
7977 dp_aggregate_pdev_stats(pdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05307978
Ishank Jain1e7401c2017-02-17 15:38:39 +05307979 switch (type) {
Ishank Jain6290a3c2017-03-21 10:49:39 +05307980 case TXRX_CLEAR_STATS:
7981 dp_txrx_host_stats_clr(vdev);
7982 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05307983 case TXRX_RX_RATE_STATS:
7984 dp_print_rx_rates(vdev);
7985 break;
7986 case TXRX_TX_RATE_STATS:
7987 dp_print_tx_rates(vdev);
7988 break;
7989 case TXRX_TX_HOST_STATS:
7990 dp_print_pdev_tx_stats(pdev);
7991 dp_print_soc_tx_stats(pdev->soc);
7992 break;
7993 case TXRX_RX_HOST_STATS:
7994 dp_print_pdev_rx_stats(pdev);
7995 dp_print_soc_rx_stats(pdev->soc);
7996 break;
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05307997 case TXRX_AST_STATS:
7998 dp_print_ast_stats(pdev->soc);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +05307999 dp_print_peer_table(vdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05308000 break;
sumedh baikady72b1c712017-08-24 12:11:46 -07008001 case TXRX_SRNG_PTR_STATS:
Kai Chen783e0382018-01-25 16:29:08 -08008002 dp_print_ring_stats(pdev);
8003 break;
8004 case TXRX_RX_MON_STATS:
8005 dp_print_pdev_rx_mon_stats(pdev);
8006 break;
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07008007 case TXRX_REO_QUEUE_STATS:
8008 dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
8009 break;
Venkata Sharath Chandra Manchalaf167af12018-10-09 20:23:02 -07008010 case TXRX_SOC_CFG_PARAMS:
8011 dp_print_soc_cfg_params(pdev->soc);
8012 break;
8013 case TXRX_PDEV_CFG_PARAMS:
8014 dp_print_pdev_cfg_params(pdev);
8015 break;
Pamidipati, Vijay37d107d2018-12-31 14:46:14 +05308016 case TXRX_NAPI_STATS:
8017 dp_print_napi_stats(pdev->soc);
8018 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05308019 default:
Venkata Sharath Chandra Manchala389c4e12018-10-03 12:13:33 -07008020 dp_info("Wrong Input For TxRx Host Stats");
8021 dp_txrx_stats_help();
Ishank Jain1e7401c2017-02-17 15:38:39 +05308022 break;
8023 }
8024 return 0;
8025}
8026
8027/*
Soumya Bhat7422db82017-12-15 13:48:53 +05308028 * dp_ppdu_ring_reset()- Reset PPDU Stats ring
8029 * @pdev: DP_PDEV handle
8030 *
8031 * Return: void
8032 */
8033static void
8034dp_ppdu_ring_reset(struct dp_pdev *pdev)
8035{
8036 struct htt_rx_ring_tlv_filter htt_tlv_filter;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008037 int mac_id;
Soumya Bhat7422db82017-12-15 13:48:53 +05308038
hangtianfe681a52019-01-16 17:16:28 +08008039 qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
Soumya Bhat7422db82017-12-15 13:48:53 +05308040
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008041 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
8042 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
8043 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05308044
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008045 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
8046 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
8047 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
8048 }
Soumya Bhat7422db82017-12-15 13:48:53 +05308049}
8050
8051/*
Anish Nataraj38a29562017-08-18 19:41:17 +05308052 * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
8053 * @pdev: DP_PDEV handle
8054 *
8055 * Return: void
8056 */
8057static void
8058dp_ppdu_ring_cfg(struct dp_pdev *pdev)
8059{
8060 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008061 int mac_id;
Anish Nataraj38a29562017-08-18 19:41:17 +05308062
Soumya Bhat35fc6992018-03-09 18:39:03 +05308063 htt_tlv_filter.mpdu_start = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05308064 htt_tlv_filter.msdu_start = 0;
8065 htt_tlv_filter.packet = 0;
8066 htt_tlv_filter.msdu_end = 0;
8067 htt_tlv_filter.mpdu_end = 0;
nobelj1c31fee2018-03-21 11:47:05 -07008068 htt_tlv_filter.attention = 0;
Anish Nataraj38a29562017-08-18 19:41:17 +05308069 htt_tlv_filter.ppdu_start = 1;
8070 htt_tlv_filter.ppdu_end = 1;
8071 htt_tlv_filter.ppdu_end_user_stats = 1;
8072 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
8073 htt_tlv_filter.ppdu_end_status_done = 1;
8074 htt_tlv_filter.enable_fp = 1;
8075 htt_tlv_filter.enable_md = 0;
sumedh baikady59a2d332018-05-22 01:50:38 -07008076 if (pdev->neighbour_peers_added &&
8077 pdev->soc->hw_nac_monitor_support) {
8078 htt_tlv_filter.enable_md = 1;
8079 htt_tlv_filter.packet_header = 1;
8080 }
nobelj1c31fee2018-03-21 11:47:05 -07008081 if (pdev->mcopy_mode) {
8082 htt_tlv_filter.packet_header = 1;
Soumya Bhat2f54de22018-02-21 09:54:28 +05308083 htt_tlv_filter.enable_mo = 1;
nobelj1c31fee2018-03-21 11:47:05 -07008084 }
nobeljd124b742017-10-16 11:59:12 -07008085 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
8086 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
8087 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
8088 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
8089 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
8090 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
sumedh baikady59a2d332018-05-22 01:50:38 -07008091 if (pdev->neighbour_peers_added &&
8092 pdev->soc->hw_nac_monitor_support)
8093 htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
Anish Nataraj38a29562017-08-18 19:41:17 +05308094
Kiran Venkatappa07921612019-03-02 23:14:12 +05308095 htt_tlv_filter.offset_valid = false;
8096
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08008097 for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
8098 int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
8099 pdev->pdev_id);
8100
8101 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
8102 pdev->rxdma_mon_status_ring[mac_id].hal_srng,
8103 RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
8104 }
Anish Nataraj38a29562017-08-18 19:41:17 +05308105}
8106
8107/*
Alok Singh40a622b2018-06-28 10:47:26 +05308108 * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
8109 * modes are enabled or not.
8110 * @dp_pdev: dp pdev handle.
8111 *
8112 * Return: bool
8113 */
8114static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
8115{
8116 if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
8117 !pdev->mcopy_mode)
8118 return true;
8119 else
8120 return false;
8121}
8122
8123/*
Vinay Adella873dc402018-05-28 12:06:34 +05308124 *dp_set_bpr_enable() - API to enable/disable bpr feature
8125 *@pdev_handle: DP_PDEV handle.
8126 *@val: Provided value.
8127 *
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308128 *Return: 0 for success. nonzero for failure.
Vinay Adella873dc402018-05-28 12:06:34 +05308129 */
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308130static QDF_STATUS
Vinay Adella873dc402018-05-28 12:06:34 +05308131dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
8132{
8133 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8134
8135 switch (val) {
8136 case CDP_BPR_DISABLE:
8137 pdev->bpr_enable = CDP_BPR_DISABLE;
8138 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
8139 !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8140 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
8141 } else if (pdev->enhanced_stats_en &&
8142 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
8143 !pdev->pktlog_ppdu_stats) {
8144 dp_h2t_cfg_stats_msg_send(pdev,
8145 DP_PPDU_STATS_CFG_ENH_STATS,
8146 pdev->pdev_id);
8147 }
8148 break;
8149 case CDP_BPR_ENABLE:
8150 pdev->bpr_enable = CDP_BPR_ENABLE;
8151 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
8152 !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
8153 dp_h2t_cfg_stats_msg_send(pdev,
8154 DP_PPDU_STATS_CFG_BPR,
8155 pdev->pdev_id);
8156 } else if (pdev->enhanced_stats_en &&
8157 !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
8158 !pdev->pktlog_ppdu_stats) {
8159 dp_h2t_cfg_stats_msg_send(pdev,
8160 DP_PPDU_STATS_CFG_BPR_ENH,
8161 pdev->pdev_id);
8162 } else if (pdev->pktlog_ppdu_stats) {
8163 dp_h2t_cfg_stats_msg_send(pdev,
8164 DP_PPDU_STATS_CFG_BPR_PKTLOG,
8165 pdev->pdev_id);
8166 }
8167 break;
8168 default:
8169 break;
8170 }
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308171
8172 return QDF_STATUS_SUCCESS;
Vinay Adella873dc402018-05-28 12:06:34 +05308173}
8174
8175/*
Varsha Mishra18281792019-03-06 17:57:23 +05308176 * dp_pdev_tid_stats_ingress_inc
8177 * @pdev: pdev handle
8178 * @val: increase in value
8179 *
8180 * Return: void
8181 */
8182static void
8183dp_pdev_tid_stats_ingress_inc(struct cdp_pdev *pdev, uint32_t val)
8184{
8185 struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
8186
8187 dp_pdev->stats.tid_stats.ingress_stack += val;
8188}
8189
8190/*
8191 * dp_pdev_tid_stats_osif_drop
8192 * @pdev: pdev handle
8193 * @val: increase in value
8194 *
8195 * Return: void
8196 */
8197static void
8198dp_pdev_tid_stats_osif_drop(struct cdp_pdev *pdev, uint32_t val)
8199{
8200 struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
8201
8202 dp_pdev->stats.tid_stats.osif_drop += val;
8203}
8204
8205/*
Soumya Bhat6fee59c2017-10-31 13:12:37 +05308206 * dp_config_debug_sniffer()- API to enable/disable debug sniffer
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308207 * @pdev_handle: DP_PDEV handle
8208 * @val: user provided value
8209 *
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308210 * Return: 0 for success. nonzero for failure.
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308211 */
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308212static QDF_STATUS
Soumya Bhat6fee59c2017-10-31 13:12:37 +05308213dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308214{
8215 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308216 QDF_STATUS status = QDF_STATUS_SUCCESS;
8217
8218 if (pdev->mcopy_mode)
8219 dp_reset_monitor_mode(pdev_handle);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308220
Soumya Bhat89647ef2017-11-16 17:23:48 +05308221 switch (val) {
8222 case 0:
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308223 pdev->tx_sniffer_enable = 0;
Soumya Bhat7422db82017-12-15 13:48:53 +05308224 pdev->mcopy_mode = 0;
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05308225 pdev->monitor_configured = false;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308226
Alok Singh40a622b2018-06-28 10:47:26 +05308227 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
8228 !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008229 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05308230 dp_ppdu_ring_reset(pdev);
Alok Singh40a622b2018-06-28 10:47:26 +05308231 } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308232 dp_h2t_cfg_stats_msg_send(pdev,
8233 DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05308234 } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
8235 dp_h2t_cfg_stats_msg_send(pdev,
8236 DP_PPDU_STATS_CFG_BPR_ENH,
8237 pdev->pdev_id);
8238 } else {
8239 dp_h2t_cfg_stats_msg_send(pdev,
8240 DP_PPDU_STATS_CFG_BPR,
8241 pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05308242 }
Soumya Bhat89647ef2017-11-16 17:23:48 +05308243 break;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308244
Soumya Bhat89647ef2017-11-16 17:23:48 +05308245 case 1:
8246 pdev->tx_sniffer_enable = 1;
Soumya Bhat7422db82017-12-15 13:48:53 +05308247 pdev->mcopy_mode = 0;
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05308248 pdev->monitor_configured = false;
Soumya Bhat7422db82017-12-15 13:48:53 +05308249
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308250 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05308251 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308252 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05308253 break;
8254 case 2:
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308255 if (pdev->monitor_vdev) {
8256 status = QDF_STATUS_E_RESOURCES;
8257 break;
8258 }
8259
Soumya Bhat7422db82017-12-15 13:48:53 +05308260 pdev->mcopy_mode = 1;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308261 dp_pdev_configure_monitor_rings(pdev);
Chaithanya Garrepalli65e6fc12018-12-21 19:17:33 +05308262 pdev->monitor_configured = true;
Soumya Bhat89647ef2017-11-16 17:23:48 +05308263 pdev->tx_sniffer_enable = 0;
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308264
8265 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05308266 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308267 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05308268 break;
Kai Chen52ef33f2019-03-05 18:33:40 -08008269
Soumya Bhat89647ef2017-11-16 17:23:48 +05308270 default:
8271 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05308272 "Invalid value");
Soumya Bhat89647ef2017-11-16 17:23:48 +05308273 break;
8274 }
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308275 return status;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308276}
8277
8278/*
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05308279 * dp_enable_enhanced_stats()- API to enable enhanced statistcs
8280 * @pdev_handle: DP_PDEV handle
8281 *
8282 * Return: void
8283 */
8284static void
8285dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
8286{
8287 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05308288
8289 if (pdev->enhanced_stats_en == 0)
8290 dp_cal_client_timer_start(pdev->cal_client_ctx);
8291
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05308292 pdev->enhanced_stats_en = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05308293
Chaithanya Garrepalli1bbf4f02018-07-20 12:07:38 +05308294 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
8295 !pdev->monitor_vdev)
Soumya Bhat7422db82017-12-15 13:48:53 +05308296 dp_ppdu_ring_cfg(pdev);
8297
Alok Singh40a622b2018-06-28 10:47:26 +05308298 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05308299 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05308300 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
8301 dp_h2t_cfg_stats_msg_send(pdev,
8302 DP_PPDU_STATS_CFG_BPR_ENH,
8303 pdev->pdev_id);
8304 }
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05308305}
8306
8307/*
8308 * dp_disable_enhanced_stats()- API to disable enhanced statistcs
8309 * @pdev_handle: DP_PDEV handle
8310 *
8311 * Return: void
8312 */
8313static void
8314dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
8315{
8316 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308317
Ruchi, Agrawal234753c2018-06-28 14:53:37 +05308318 if (pdev->enhanced_stats_en == 1)
8319 dp_cal_client_timer_stop(pdev->cal_client_ctx);
8320
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05308321 pdev->enhanced_stats_en = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308322
Alok Singh40a622b2018-06-28 10:47:26 +05308323 if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07008324 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Alok Singh40a622b2018-06-28 10:47:26 +05308325 } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
8326 dp_h2t_cfg_stats_msg_send(pdev,
8327 DP_PPDU_STATS_CFG_BPR,
8328 pdev->pdev_id);
8329 }
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05308330
Chaithanya Garrepalli1bbf4f02018-07-20 12:07:38 +05308331 if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
8332 !pdev->monitor_vdev)
Soumya Bhat7422db82017-12-15 13:48:53 +05308333 dp_ppdu_ring_reset(pdev);
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05308334}
8335
8336/*
Ishank Jain6290a3c2017-03-21 10:49:39 +05308337 * dp_get_fw_peer_stats()- function to print peer stats
8338 * @pdev_handle: DP_PDEV handle
8339 * @mac_addr: mac address of the peer
8340 * @cap: Type of htt stats requested
Amir Patel1ea85d42019-01-09 15:19:10 +05308341 * @is_wait: if set, wait on completion from firmware response
Ishank Jain6290a3c2017-03-21 10:49:39 +05308342 *
8343 * Currently Supporting only MAC ID based requests Only
8344 * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
8345 * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
8346 * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
8347 *
8348 * Return: void
8349 */
8350static void
8351dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
Amir Patel1ea85d42019-01-09 15:19:10 +05308352 uint32_t cap, uint32_t is_wait)
Ishank Jain6290a3c2017-03-21 10:49:39 +05308353{
8354 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05308355 int i;
Ishank Jain6290a3c2017-03-21 10:49:39 +05308356 uint32_t config_param0 = 0;
8357 uint32_t config_param1 = 0;
8358 uint32_t config_param2 = 0;
8359 uint32_t config_param3 = 0;
8360
8361 HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
8362 config_param0 |= (1 << (cap + 1));
8363
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05308364 for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
8365 config_param1 |= (1 << i);
8366 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05308367
8368 config_param2 |= (mac_addr[0] & 0x000000ff);
8369 config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
8370 config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
8371 config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
8372
8373 config_param3 |= (mac_addr[4] & 0x000000ff);
8374 config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
8375
Amir Patel1ea85d42019-01-09 15:19:10 +05308376 if (is_wait) {
8377 qdf_event_reset(&pdev->fw_peer_stats_event);
8378 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8379 config_param0, config_param1,
8380 config_param2, config_param3,
8381 0, 1, 0);
8382 qdf_wait_single_event(&pdev->fw_peer_stats_event,
8383 DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
8384 } else {
8385 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8386 config_param0, config_param1,
8387 config_param2, config_param3,
8388 0, 0, 0);
8389 }
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07008390
Ishank Jain6290a3c2017-03-21 10:49:39 +05308391}
8392
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05308393/* This struct definition will be removed from here
8394 * once it get added in FW headers*/
8395struct httstats_cmd_req {
8396 uint32_t config_param0;
8397 uint32_t config_param1;
8398 uint32_t config_param2;
8399 uint32_t config_param3;
8400 int cookie;
8401 u_int8_t stats_id;
8402};
8403
8404/*
8405 * dp_get_htt_stats: function to process the httstas request
8406 * @pdev_handle: DP pdev handle
8407 * @data: pointer to request data
8408 * @data_len: length for request data
8409 *
8410 * return: void
8411 */
8412static void
8413dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
8414{
8415 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8416 struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
8417
8418 QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
8419 dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
8420 req->config_param0, req->config_param1,
8421 req->config_param2, req->config_param3,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08008422 req->cookie, 0, 0);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05308423}
Vinay Adella873dc402018-05-28 12:06:34 +05308424
Ishank Jain9f174c62017-03-30 18:37:42 +05308425/*
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308426 * dp_set_pdev_param: function to set parameters in pdev
8427 * @pdev_handle: DP pdev handle
8428 * @param: parameter type to be set
8429 * @val: value of parameter to be set
8430 *
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308431 * Return: 0 for success. nonzero for failure.
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308432 */
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308433static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
8434 enum cdp_pdev_param_type param,
8435 uint8_t val)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308436{
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +05308437 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308438 switch (param) {
Soumya Bhat6fee59c2017-10-31 13:12:37 +05308439 case CDP_CONFIG_DEBUG_SNIFFER:
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308440 return dp_config_debug_sniffer(pdev_handle, val);
Vinay Adella873dc402018-05-28 12:06:34 +05308441 case CDP_CONFIG_BPR_ENABLE:
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308442 return dp_set_bpr_enable(pdev_handle, val);
Pamidipati, Vijay13f5ec22018-08-06 17:34:21 +05308443 case CDP_CONFIG_PRIMARY_RADIO:
8444 pdev->is_primary = val;
8445 break;
Ankit Kumar8dc0e2a2019-02-28 18:17:15 +05308446 case CDP_CONFIG_CAPTURE_LATENCY:
8447 if (val == 1)
8448 pdev->latency_capture_enable = true;
8449 else
8450 pdev->latency_capture_enable = false;
8451 break;
Varsha Mishra18281792019-03-06 17:57:23 +05308452 case CDP_INGRESS_STATS:
8453 dp_pdev_tid_stats_ingress_inc(pdev_handle, val);
8454 break;
8455 case CDP_OSIF_DROP:
8456 dp_pdev_tid_stats_osif_drop(pdev_handle, val);
8457 break;
Kai Chen52ef33f2019-03-05 18:33:40 -08008458 case CDP_CONFIG_ENH_RX_CAPTURE:
8459 return dp_config_enh_rx_capture(pdev_handle, val);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308460 default:
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308461 return QDF_STATUS_E_INVAL;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308462 }
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +05308463 return QDF_STATUS_SUCCESS;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05308464}
8465
8466/*
Varsha Mishraa331e6e2019-03-11 12:16:14 +05308467 * dp_calculate_delay_stats: function to get rx delay stats
8468 * @vdev_handle: DP vdev handle
8469 * @nbuf: skb
8470 *
8471 * Return: void
8472 */
8473static void dp_calculate_delay_stats(struct cdp_vdev *vdev_handle,
8474 qdf_nbuf_t nbuf)
8475{
8476 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8477
8478 dp_rx_compute_delay(vdev, nbuf);
8479}
8480
8481/*
phadiman4213e9c2018-10-29 12:50:02 +05308482 * dp_get_vdev_param: function to get parameters from vdev
8483 * @param: parameter type to get value
8484 *
8485 * return: void
8486 */
8487static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
8488 enum cdp_vdev_param_type param)
8489{
8490 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8491 uint32_t val;
8492
8493 switch (param) {
8494 case CDP_ENABLE_WDS:
8495 val = vdev->wds_enabled;
8496 break;
8497 case CDP_ENABLE_MEC:
8498 val = vdev->mec_enabled;
8499 break;
8500 case CDP_ENABLE_DA_WAR:
Nandha Kishore Easwaranf9c44ce2019-01-18 15:31:18 +05308501 val = vdev->pdev->soc->da_war_enabled;
phadiman4213e9c2018-10-29 12:50:02 +05308502 break;
8503 default:
8504 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8505 "param value %d is wrong\n",
8506 param);
8507 val = -1;
8508 break;
8509 }
8510
8511 return val;
8512}
8513
8514/*
Ishank Jain9f174c62017-03-30 18:37:42 +05308515 * dp_set_vdev_param: function to set parameters in vdev
8516 * @param: parameter type to be set
8517 * @val: value of parameter to be set
8518 *
8519 * return: void
8520 */
8521static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
8522 enum cdp_vdev_param_type param, uint32_t val)
8523{
8524 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Ishank Jain9f174c62017-03-30 18:37:42 +05308525 switch (param) {
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05308526 case CDP_ENABLE_WDS:
Pamidipati, Vijay3756b762018-05-12 11:10:37 +05308527 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8528 "wds_enable %d for vdev(%p) id(%d)\n",
8529 val, vdev, vdev->vdev_id);
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05308530 vdev->wds_enabled = val;
8531 break;
phadiman4213e9c2018-10-29 12:50:02 +05308532 case CDP_ENABLE_MEC:
8533 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8534 "mec_enable %d for vdev(%p) id(%d)\n",
8535 val, vdev, vdev->vdev_id);
8536 vdev->mec_enabled = val;
8537 break;
8538 case CDP_ENABLE_DA_WAR:
8539 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8540 "da_war_enable %d for vdev(%p) id(%d)\n",
8541 val, vdev, vdev->vdev_id);
Nandha Kishore Easwaranf9c44ce2019-01-18 15:31:18 +05308542 vdev->pdev->soc->da_war_enabled = val;
8543 dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8544 vdev->pdev->soc));
phadiman4213e9c2018-10-29 12:50:02 +05308545 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05308546 case CDP_ENABLE_NAWDS:
8547 vdev->nawds_enabled = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05308548 break;
Ishank Jainc838b132017-02-17 11:08:18 +05308549 case CDP_ENABLE_MCAST_EN:
8550 vdev->mcast_enhancement_en = val;
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05308551 break;
8552 case CDP_ENABLE_PROXYSTA:
8553 vdev->proxysta_vdev = val;
8554 break;
Kabilan Kannan56bfd8f2017-04-26 13:26:47 -07008555 case CDP_UPDATE_TDLS_FLAGS:
8556 vdev->tdls_link_connected = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05308557 break;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05308558 case CDP_CFG_WDS_AGING_TIMER:
8559 if (val == 0)
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05308560 qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05308561 else if (val != vdev->wds_aging_timer_val)
Chaitanya Kiran Godavarthif6c06122018-11-23 23:24:05 +05308562 qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05308563
8564 vdev->wds_aging_timer_val = val;
8565 break;
URAJ SASAN81d95712017-08-21 20:51:03 +05308566 case CDP_ENABLE_AP_BRIDGE:
8567 if (wlan_op_mode_sta != vdev->opmode)
8568 vdev->ap_bridge_enabled = val;
8569 else
8570 vdev->ap_bridge_enabled = false;
8571 break;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05308572 case CDP_ENABLE_CIPHER:
8573 vdev->sec_type = val;
8574 break;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +05308575 case CDP_ENABLE_QWRAP_ISOLATION:
8576 vdev->isolation_vdev = val;
8577 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05308578 default:
8579 break;
8580 }
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05308581
8582 dp_tx_vdev_update_search_flags(vdev);
Ishank Jain9f174c62017-03-30 18:37:42 +05308583}
8584
8585/**
8586 * dp_peer_set_nawds: set nawds bit in peer
8587 * @peer_handle: pointer to peer
8588 * @value: enable/disable nawds
8589 *
8590 * return: void
8591 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05308592static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
Ishank Jain9f174c62017-03-30 18:37:42 +05308593{
8594 struct dp_peer *peer = (struct dp_peer *)peer_handle;
8595 peer->nawds_enabled = value;
8596}
Ishank Jain1e7401c2017-02-17 15:38:39 +05308597
Ishank Jain949674c2017-02-27 17:09:29 +05308598/*
8599 * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8600 * @vdev_handle: DP_VDEV handle
8601 * @map_id:ID of map that needs to be updated
8602 *
8603 * Return: void
8604 */
8605static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
8606 uint8_t map_id)
8607{
8608 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8609 vdev->dscp_tid_map_id = map_id;
8610 return;
8611}
8612
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +05308613#ifdef DP_RATETABLE_SUPPORT
8614static int dp_txrx_get_ratekbps(int preamb, int mcs,
8615 int htflag, int gintval)
8616{
Amir Patelffe9a862019-02-28 14:13:12 +05308617 uint32_t rix;
8618
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +05308619 return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
Amir Patelffe9a862019-02-28 14:13:12 +05308620 (uint8_t)preamb, 1, &rix);
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +05308621}
8622#else
8623static int dp_txrx_get_ratekbps(int preamb, int mcs,
8624 int htflag, int gintval)
8625{
8626 return 0;
8627}
8628#endif
8629
Amir Patel756d05e2018-10-10 12:35:30 +05308630/* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8631 * @peer_handle: DP pdev handle
8632 *
8633 * return : cdp_pdev_stats pointer
8634 */
8635static struct cdp_pdev_stats*
8636dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
8637{
8638 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8639
8640 dp_aggregate_pdev_stats(pdev);
8641
8642 return &pdev->stats;
8643}
8644
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05308645/* dp_txrx_get_peer_stats - will return cdp_peer_stats
8646 * @peer_handle: DP_PEER handle
8647 *
8648 * return : cdp_peer_stats pointer
8649 */
8650static struct cdp_peer_stats*
8651 dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
8652{
8653 struct dp_peer *peer = (struct dp_peer *)peer_handle;
8654
8655 qdf_assert(peer);
8656
8657 return &peer->stats;
8658}
8659
8660/* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8661 * @peer_handle: DP_PEER handle
8662 *
8663 * return : void
8664 */
8665static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
8666{
8667 struct dp_peer *peer = (struct dp_peer *)peer_handle;
8668
8669 qdf_assert(peer);
8670
hangtianfe681a52019-01-16 17:16:28 +08008671 qdf_mem_zero(&peer->stats, sizeof(peer->stats));
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05308672}
8673
8674/* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8675 * @vdev_handle: DP_VDEV handle
8676 * @buf: buffer for vdev stats
8677 *
8678 * return : int
8679 */
8680static int dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
8681 bool is_aggregate)
8682{
8683 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Amir Patel17b91782019-01-08 12:17:15 +05308684 struct cdp_vdev_stats *vdev_stats;
8685 struct dp_pdev *pdev;
8686 struct dp_soc *soc;
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05308687
Amir Patel17b91782019-01-08 12:17:15 +05308688 if (!vdev)
8689 return 1;
8690
8691 pdev = vdev->pdev;
8692 if (!pdev)
8693 return 1;
8694
8695 soc = pdev->soc;
8696 vdev_stats = (struct cdp_vdev_stats *)buf;
8697
8698 if (is_aggregate) {
8699 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05308700 dp_aggregate_vdev_stats(vdev, buf);
Amir Patel17b91782019-01-08 12:17:15 +05308701 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
8702 } else {
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05308703 qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
Amir Patel17b91782019-01-08 12:17:15 +05308704 }
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +05308705
8706 return 0;
8707}
8708
Prathyusha Guduri184b6402018-02-04 23:01:49 +05308709/*
Pranita Solanke92096e42018-09-11 11:14:51 +05308710 * dp_get_total_per(): get total per
8711 * @pdev_handle: DP_PDEV handle
8712 *
8713 * Return: % error rate using retries per packet and success packets
8714 */
8715static int dp_get_total_per(struct cdp_pdev *pdev_handle)
8716{
8717 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8718
8719 dp_aggregate_pdev_stats(pdev);
8720 if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8721 return 0;
8722 return ((pdev->stats.tx.retries * 100) /
8723 ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8724}
8725
8726/*
Prathyusha Guduri184b6402018-02-04 23:01:49 +05308727 * dp_txrx_stats_publish(): publish pdev stats into a buffer
8728 * @pdev_handle: DP_PDEV handle
8729 * @buf: to hold pdev_stats
8730 *
8731 * Return: int
8732 */
8733static int
8734dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
8735{
8736 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8737 struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05308738 struct cdp_txrx_stats_req req = {0,};
Prathyusha Guduri184b6402018-02-04 23:01:49 +05308739
8740 dp_aggregate_pdev_stats(pdev);
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07008741 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05308742 req.cookie_val = 1;
8743 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08008744 req.param1, req.param2, req.param3, 0,
8745 req.cookie_val, 0);
8746
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05308747 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05308748
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07008749 req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05308750 req.cookie_val = 1;
8751 dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08008752 req.param1, req.param2, req.param3, 0,
8753 req.cookie_val, 0);
8754
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05308755 msleep(DP_MAX_SLEEP_TIME);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05308756 qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
8757
8758 return TXRX_STATS_LEVEL;
8759}
8760
Ishank Jain949674c2017-02-27 17:09:29 +05308761/**
8762 * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8763 * @pdev: DP_PDEV handle
8764 * @map_id: ID of map that needs to be updated
8765 * @tos: index value in map
8766 * @tid: tid value passed by the user
8767 *
8768 * Return: void
8769 */
8770static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
8771 uint8_t map_id, uint8_t tos, uint8_t tid)
8772{
8773 uint8_t dscp;
8774 struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05308775 struct dp_soc *soc = pdev->soc;
8776
8777 if (!soc)
8778 return;
8779
Ishank Jain949674c2017-02-27 17:09:29 +05308780 dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8781 pdev->dscp_tid_map[map_id][dscp] = tid;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +05308782
8783 if (map_id < soc->num_hw_dscp_tid_map)
8784 hal_tx_update_dscp_tid(soc->hal_soc, tid,
8785 map_id, dscp);
Ishank Jain949674c2017-02-27 17:09:29 +05308786 return;
8787}
8788
Ishank Jain6290a3c2017-03-21 10:49:39 +05308789/**
Shashikala Prabhu8f6703b2018-10-31 09:43:00 +05308790 * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
8791 * @pdev_handle: pdev handle
8792 * @val: hmmc-dscp flag value
8793 *
8794 * Return: void
8795 */
8796static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
8797 bool val)
8798{
8799 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8800
8801 pdev->hmmc_tid_override_en = val;
8802}
8803
8804/**
8805 * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
8806 * @pdev_handle: pdev handle
8807 * @tid: tid value
8808 *
8809 * Return: void
8810 */
8811static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
8812 uint8_t tid)
8813{
8814 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8815
8816 pdev->hmmc_tid = tid;
8817}
8818
8819/**
Ishank Jain6290a3c2017-03-21 10:49:39 +05308820 * dp_fw_stats_process(): Process TxRX FW stats request
8821 * @vdev_handle: DP VDEV handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308822 * @req: stats request
Ishank Jain6290a3c2017-03-21 10:49:39 +05308823 *
8824 * return: int
8825 */
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308826static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
8827 struct cdp_txrx_stats_req *req)
Ishank Jain6290a3c2017-03-21 10:49:39 +05308828{
8829 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8830 struct dp_pdev *pdev = NULL;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308831 uint32_t stats = req->stats;
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07008832 uint8_t mac_id = req->mac_id;
Ishank Jain6290a3c2017-03-21 10:49:39 +05308833
8834 if (!vdev) {
8835 DP_TRACE(NONE, "VDEV not found");
8836 return 1;
8837 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05308838 pdev = vdev->pdev;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308839
chenguocda25122018-01-24 17:39:38 +08008840 /*
8841 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8842 * from param0 to param3 according to below rule:
8843 *
8844 * PARAM:
8845 * - config_param0 : start_offset (stats type)
8846 * - config_param1 : stats bmask from start offset
8847 * - config_param2 : stats bmask from start offset + 32
8848 * - config_param3 : stats bmask from start offset + 64
8849 */
8850 if (req->stats == CDP_TXRX_STATS_0) {
8851 req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8852 req->param1 = 0xFFFFFFFF;
8853 req->param2 = 0xFFFFFFFF;
8854 req->param3 = 0xFFFFFFFF;
Chaithanya Garrepalli32fcc2a2018-08-03 15:09:42 +05308855 } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8856 req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
chenguocda25122018-01-24 17:39:38 +08008857 }
8858
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308859 return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08008860 req->param1, req->param2, req->param3,
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07008861 0, 0, mac_id);
Ishank Jain6290a3c2017-03-21 10:49:39 +05308862}
8863
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308864/**
8865 * dp_txrx_stats_request - function to map to firmware and host stats
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008866 * @vdev: virtual handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308867 * @req: stats request
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008868 *
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07008869 * Return: QDF_STATUS
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008870 */
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07008871static
8872QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
8873 struct cdp_txrx_stats_req *req)
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008874{
8875 int host_stats;
8876 int fw_stats;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308877 enum cdp_stats stats;
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07008878 int num_stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008879
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308880 if (!vdev || !req) {
8881 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8882 "Invalid vdev/req instance");
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07008883 return QDF_STATUS_E_INVAL;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308884 }
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08008885
Venkata Sharath Chandra Manchala8d583a82019-04-21 12:32:24 -07008886 if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
8887 dp_err("Invalid mac id request");
8888 return QDF_STATUS_E_INVAL;
8889 }
8890
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308891 stats = req->stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008892 if (stats >= CDP_TXRX_MAX_STATS)
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07008893 return QDF_STATUS_E_INVAL;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008894
Ishank Jain6290a3c2017-03-21 10:49:39 +05308895 /*
8896 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8897 * has to be updated if new FW HTT stats added
8898 */
8899 if (stats > CDP_TXRX_STATS_HTT_MAX)
8900 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07008901
8902 num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8903
8904 if (stats >= num_stats) {
8905 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8906 "%s: Invalid stats option: %d", __func__, stats);
8907 return QDF_STATUS_E_INVAL;
8908 }
8909
8910 req->stats = stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008911 fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8912 host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8913
Mohit Khanna3d1e1b72019-03-18 14:30:01 -07008914 dp_info("stats: %u fw_stats_type: %d host_stats: %d",
8915 stats, fw_stats, host_stats);
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008916
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05308917 if (fw_stats != TXRX_FW_STATS_INVALID) {
8918 /* update request with FW stats type */
8919 req->stats = fw_stats;
8920 return dp_fw_stats_process(vdev, req);
8921 }
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008922
Ishank Jain57c42a12017-04-12 10:42:22 +05308923 if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8924 (host_stats <= TXRX_HOST_STATS_MAX))
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -07008925 return dp_print_host_stats(vdev, req);
Ishank Jain57c42a12017-04-12 10:42:22 +05308926 else
8927 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8928 "Wrong Input for TxRx Stats");
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008929
Venkata Sharath Chandra Manchala599b14c2018-08-06 10:59:11 -07008930 return QDF_STATUS_SUCCESS;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08008931}
8932
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08008933/*
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07008934 * dp_print_per_ring_stats(): Packet count per ring
8935 * @soc - soc handle
8936 */
8937static void dp_print_per_ring_stats(struct dp_soc *soc)
8938{
chenguo8107b662017-12-13 16:31:13 +08008939 uint8_t ring;
8940 uint16_t core;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07008941 uint64_t total_packets;
8942
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07008943 DP_TRACE_STATS(INFO_HIGH, "Reo packets per ring:");
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07008944 for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
8945 total_packets = 0;
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07008946 DP_TRACE_STATS(INFO_HIGH,
8947 "Packets on ring %u:", ring);
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07008948 for (core = 0; core < NR_CPUS; core++) {
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07008949 DP_TRACE_STATS(INFO_HIGH,
8950 "Packets arriving on core %u: %llu",
8951 core,
8952 soc->stats.rx.ring_packets[core][ring]);
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07008953 total_packets += soc->stats.rx.ring_packets[core][ring];
8954 }
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07008955 DP_TRACE_STATS(INFO_HIGH,
8956 "Total packets on ring %u: %llu",
8957 ring, total_packets);
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07008958 }
8959}
8960
8961/*
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08008962 * dp_txrx_path_stats() - Function to display dump stats
8963 * @soc - soc handle
8964 *
8965 * return: none
8966 */
8967static void dp_txrx_path_stats(struct dp_soc *soc)
8968{
8969 uint8_t error_code;
8970 uint8_t loop_pdev;
8971 struct dp_pdev *pdev;
Ishank Jain57c42a12017-04-12 10:42:22 +05308972 uint8_t i;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08008973
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07008974 if (!soc) {
8975 DP_TRACE(ERROR, "%s: Invalid access",
8976 __func__);
8977 return;
8978 }
8979
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08008980 for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
8981
8982 pdev = soc->pdev_list[loop_pdev];
8983 dp_aggregate_pdev_stats(pdev);
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07008984 DP_TRACE_STATS(INFO_HIGH, "Tx path Statistics:");
8985 DP_TRACE_STATS(INFO_HIGH, "from stack: %u msdus (%llu bytes)",
8986 pdev->stats.tx_i.rcvd.num,
8987 pdev->stats.tx_i.rcvd.bytes);
8988 DP_TRACE_STATS(INFO_HIGH,
8989 "processed from host: %u msdus (%llu bytes)",
8990 pdev->stats.tx_i.processed.num,
8991 pdev->stats.tx_i.processed.bytes);
8992 DP_TRACE_STATS(INFO_HIGH,
8993 "successfully transmitted: %u msdus (%llu bytes)",
8994 pdev->stats.tx.tx_success.num,
8995 pdev->stats.tx.tx_success.bytes);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08008996
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07008997 DP_TRACE_STATS(INFO_HIGH, "Dropped in host:");
8998 DP_TRACE_STATS(INFO_HIGH, "Total packets dropped: %u,",
8999 pdev->stats.tx_i.dropped.dropped_pkt.num);
9000 DP_TRACE_STATS(INFO_HIGH, "Descriptor not available: %u",
9001 pdev->stats.tx_i.dropped.desc_na.num);
9002 DP_TRACE_STATS(INFO_HIGH, "Ring full: %u",
9003 pdev->stats.tx_i.dropped.ring_full);
9004 DP_TRACE_STATS(INFO_HIGH, "Enqueue fail: %u",
9005 pdev->stats.tx_i.dropped.enqueue_fail);
9006 DP_TRACE_STATS(INFO_HIGH, "DMA Error: %u",
9007 pdev->stats.tx_i.dropped.dma_error);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009008
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009009 DP_TRACE_STATS(INFO_HIGH, "Dropped in hardware:");
9010 DP_TRACE_STATS(INFO_HIGH, "total packets dropped: %u",
9011 pdev->stats.tx.tx_failed);
9012 DP_TRACE_STATS(INFO_HIGH, "mpdu age out: %u",
9013 pdev->stats.tx.dropped.age_out);
Pranita Solanke05881142018-08-17 18:20:51 +05309014 DP_TRACE_STATS(INFO_HIGH, "firmware removed packets: %u",
9015 pdev->stats.tx.dropped.fw_rem.num);
9016 DP_TRACE_STATS(INFO_HIGH, "firmware removed bytes: %llu",
9017 pdev->stats.tx.dropped.fw_rem.bytes);
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009018 DP_TRACE_STATS(INFO_HIGH, "firmware removed tx: %u",
9019 pdev->stats.tx.dropped.fw_rem_tx);
9020 DP_TRACE_STATS(INFO_HIGH, "firmware removed notx %u",
9021 pdev->stats.tx.dropped.fw_rem_notx);
Venkata Sharath Chandra Manchalaec9a5302018-12-13 15:25:35 -08009022 DP_TRACE_STATS(INFO_HIGH, "Invalid peer on tx path: %u",
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009023 pdev->soc->stats.tx.tx_invalid_peer.num);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009024
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009025 DP_TRACE_STATS(INFO_HIGH, "Tx packets sent per interrupt:");
9026 DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
9027 pdev->stats.tx_comp_histogram.pkts_1);
9028 DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
9029 pdev->stats.tx_comp_histogram.pkts_2_20);
9030 DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
9031 pdev->stats.tx_comp_histogram.pkts_21_40);
9032 DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
9033 pdev->stats.tx_comp_histogram.pkts_41_60);
9034 DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
9035 pdev->stats.tx_comp_histogram.pkts_61_80);
9036 DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
9037 pdev->stats.tx_comp_histogram.pkts_81_100);
9038 DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
9039 pdev->stats.tx_comp_histogram.pkts_101_200);
9040 DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
9041 pdev->stats.tx_comp_histogram.pkts_201_plus);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009042
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009043 DP_TRACE_STATS(INFO_HIGH, "Rx path statistics");
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009044
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009045 DP_TRACE_STATS(INFO_HIGH,
9046 "delivered %u msdus ( %llu bytes),",
9047 pdev->stats.rx.to_stack.num,
9048 pdev->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05309049 for (i = 0; i < CDP_MAX_RX_RINGS; i++)
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009050 DP_TRACE_STATS(INFO_HIGH,
9051 "received on reo[%d] %u msdus( %llu bytes),",
9052 i, pdev->stats.rx.rcvd_reo[i].num,
9053 pdev->stats.rx.rcvd_reo[i].bytes);
9054 DP_TRACE_STATS(INFO_HIGH,
9055 "intra-bss packets %u msdus ( %llu bytes),",
9056 pdev->stats.rx.intra_bss.pkts.num,
9057 pdev->stats.rx.intra_bss.pkts.bytes);
9058 DP_TRACE_STATS(INFO_HIGH,
9059 "intra-bss fails %u msdus ( %llu bytes),",
9060 pdev->stats.rx.intra_bss.fail.num,
9061 pdev->stats.rx.intra_bss.fail.bytes);
9062 DP_TRACE_STATS(INFO_HIGH,
9063 "raw packets %u msdus ( %llu bytes),",
9064 pdev->stats.rx.raw.num,
9065 pdev->stats.rx.raw.bytes);
Mohit Khannaf085b612019-04-02 14:43:10 -07009066 DP_TRACE_STATS(INFO_HIGH, "mic errors %u",
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009067 pdev->stats.rx.err.mic_err);
Venkata Sharath Chandra Manchalaec9a5302018-12-13 15:25:35 -08009068 DP_TRACE_STATS(INFO_HIGH, "Invalid peer on rx path: %u",
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009069 pdev->soc->stats.rx.err.rx_invalid_peer.num);
Krunal Soni53add652018-10-05 22:42:35 -07009070 DP_TRACE_STATS(INFO_HIGH, "sw_peer_id invalid %u",
9071 pdev->soc->stats.rx.err.rx_invalid_peer_id.num);
Mohit Khannaf085b612019-04-02 14:43:10 -07009072 DP_TRACE_STATS(INFO_HIGH, "packet_len invalid %u",
9073 pdev->soc->stats.rx.err.rx_invalid_pkt_len.num);
Krunal Soni53add652018-10-05 22:42:35 -07009074
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009075
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009076 DP_TRACE_STATS(INFO_HIGH, "Reo Statistics");
9077 DP_TRACE_STATS(INFO_HIGH, "rbm error: %u msdus",
9078 pdev->soc->stats.rx.err.invalid_rbm);
9079 DP_TRACE_STATS(INFO_HIGH, "hal ring access fail: %u msdus",
9080 pdev->soc->stats.rx.err.hal_ring_access_fail);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009081
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05309082 for (error_code = 0; error_code < HAL_REO_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009083 error_code++) {
Mohit Khanna163c3172018-06-27 01:34:02 -07009084 if (!pdev->soc->stats.rx.err.reo_error[error_code])
9085 continue;
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009086 DP_TRACE_STATS(INFO_HIGH,
9087 "Reo error number (%u): %u msdus",
9088 error_code,
9089 pdev->soc->stats.rx.err
9090 .reo_error[error_code]);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009091 }
9092
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05309093 for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009094 error_code++) {
Mohit Khanna163c3172018-06-27 01:34:02 -07009095 if (!pdev->soc->stats.rx.err.rxdma_error[error_code])
9096 continue;
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009097 DP_TRACE_STATS(INFO_HIGH,
9098 "Rxdma error number (%u): %u msdus",
9099 error_code,
9100 pdev->soc->stats.rx.err
9101 .rxdma_error[error_code]);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009102 }
9103
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009104 DP_TRACE_STATS(INFO_HIGH, "Rx packets reaped per interrupt:");
9105 DP_TRACE_STATS(INFO_HIGH, "Single Packet: %u",
9106 pdev->stats.rx_ind_histogram.pkts_1);
9107 DP_TRACE_STATS(INFO_HIGH, "2-20 Packets: %u",
9108 pdev->stats.rx_ind_histogram.pkts_2_20);
9109 DP_TRACE_STATS(INFO_HIGH, "21-40 Packets: %u",
9110 pdev->stats.rx_ind_histogram.pkts_21_40);
9111 DP_TRACE_STATS(INFO_HIGH, "41-60 Packets: %u",
9112 pdev->stats.rx_ind_histogram.pkts_41_60);
9113 DP_TRACE_STATS(INFO_HIGH, "61-80 Packets: %u",
9114 pdev->stats.rx_ind_histogram.pkts_61_80);
9115 DP_TRACE_STATS(INFO_HIGH, "81-100 Packets: %u",
9116 pdev->stats.rx_ind_histogram.pkts_81_100);
9117 DP_TRACE_STATS(INFO_HIGH, "101-200 Packets: %u",
9118 pdev->stats.rx_ind_histogram.pkts_101_200);
9119 DP_TRACE_STATS(INFO_HIGH, " 201+ Packets: %u",
9120 pdev->stats.rx_ind_histogram.pkts_201_plus);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07009121
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009122 DP_TRACE_STATS(INFO_HIGH, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
9123 __func__,
9124 pdev->soc->wlan_cfg_ctx
9125 ->tso_enabled,
9126 pdev->soc->wlan_cfg_ctx
9127 ->lro_enabled,
9128 pdev->soc->wlan_cfg_ctx
9129 ->rx_hash,
9130 pdev->soc->wlan_cfg_ctx
9131 ->napi_enabled);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07009132#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Venkata Sharath Chandra Manchala7f30b272018-08-22 16:04:19 -07009133 DP_TRACE_STATS(INFO_HIGH, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
9134 __func__,
9135 pdev->soc->wlan_cfg_ctx
9136 ->tx_flow_stop_queue_threshold,
9137 pdev->soc->wlan_cfg_ctx
9138 ->tx_flow_start_queue_offset);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07009139#endif
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009140 }
9141}
9142
9143/*
9144 * dp_txrx_dump_stats() - Dump statistics
9145 * @value - Statistics option
9146 */
Mohit Khanna90d7ebd2017-09-12 21:54:21 -07009147static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
9148 enum qdf_stats_verbosity_level level)
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009149{
9150 struct dp_soc *soc =
9151 (struct dp_soc *)psoc;
9152 QDF_STATUS status = QDF_STATUS_SUCCESS;
9153
9154 if (!soc) {
9155 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9156 "%s: soc is NULL", __func__);
9157 return QDF_STATUS_E_INVAL;
9158 }
9159
9160 switch (value) {
9161 case CDP_TXRX_PATH_STATS:
9162 dp_txrx_path_stats(soc);
9163 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07009164
9165 case CDP_RX_RING_STATS:
9166 dp_print_per_ring_stats(soc);
9167 break;
9168
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009169 case CDP_TXRX_TSO_STATS:
9170 /* TODO: NOT IMPLEMENTED */
9171 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07009172
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009173 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07009174 cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009175 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07009176
psimha61b1a362017-07-27 15:45:49 -07009177 case CDP_DP_NAPI_STATS:
9178 dp_print_napi_stats(soc);
9179 break;
9180
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009181 case CDP_TXRX_DESC_STATS:
9182 /* TODO: NOT IMPLEMENTED */
9183 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07009184
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08009185 default:
9186 status = QDF_STATUS_E_INVAL;
9187 break;
9188 }
9189
9190 return status;
9191
9192}
9193
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07009194#ifdef QCA_LL_TX_FLOW_CONTROL_V2
9195/**
9196 * dp_update_flow_control_parameters() - API to store datapath
9197 * config parameters
9198 * @soc: soc handle
9199 * @cfg: ini parameter handle
9200 *
9201 * Return: void
9202 */
9203static inline
9204void dp_update_flow_control_parameters(struct dp_soc *soc,
9205 struct cdp_config_params *params)
9206{
9207 soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
9208 params->tx_flow_stop_queue_threshold;
9209 soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
9210 params->tx_flow_start_queue_offset;
9211}
9212#else
9213static inline
9214void dp_update_flow_control_parameters(struct dp_soc *soc,
9215 struct cdp_config_params *params)
9216{
9217}
9218#endif
9219
9220/**
9221 * dp_update_config_parameters() - API to store datapath
9222 * config parameters
9223 * @soc: soc handle
9224 * @cfg: ini parameter handle
9225 *
9226 * Return: status
9227 */
9228static
9229QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
9230 struct cdp_config_params *params)
9231{
9232 struct dp_soc *soc = (struct dp_soc *)psoc;
9233
9234 if (!(soc)) {
9235 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9236 "%s: Invalid handle", __func__);
9237 return QDF_STATUS_E_INVAL;
9238 }
9239
9240 soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
9241 soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
9242 soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
9243 soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
9244 params->tcp_udp_checksumoffload;
9245 soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
Mohit Khanna81179cb2018-08-16 20:50:43 -07009246 soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
Mohit Khanna16816ae2018-10-30 14:12:03 -07009247 soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
Mohit Khanna81179cb2018-08-16 20:50:43 -07009248
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07009249 dp_update_flow_control_parameters(soc, params);
9250
9251 return QDF_STATUS_SUCCESS;
9252}
9253
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05309254/**
9255 * dp_txrx_set_wds_rx_policy() - API to store datapath
9256 * config parameters
9257 * @vdev_handle - datapath vdev handle
9258 * @cfg: ini parameter handle
9259 *
9260 * Return: status
9261 */
9262#ifdef WDS_VENDOR_EXTENSION
9263void
9264dp_txrx_set_wds_rx_policy(
9265 struct cdp_vdev *vdev_handle,
9266 u_int32_t val)
9267{
9268 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9269 struct dp_peer *peer;
9270 if (vdev->opmode == wlan_op_mode_ap) {
9271 /* for ap, set it on bss_peer */
9272 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
9273 if (peer->bss_peer) {
9274 peer->wds_ecm.wds_rx_filter = 1;
9275 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
9276 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
9277 break;
9278 }
9279 }
9280 } else if (vdev->opmode == wlan_op_mode_sta) {
9281 peer = TAILQ_FIRST(&vdev->peer_list);
9282 peer->wds_ecm.wds_rx_filter = 1;
9283 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
9284 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
9285 }
9286}
9287
9288/**
9289 * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
9290 *
9291 * @peer_handle - datapath peer handle
9292 * @wds_tx_ucast: policy for unicast transmission
9293 * @wds_tx_mcast: policy for multicast transmission
9294 *
9295 * Return: void
9296 */
9297void
9298dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
9299 int wds_tx_ucast, int wds_tx_mcast)
9300{
9301 struct dp_peer *peer = (struct dp_peer *)peer_handle;
9302 if (wds_tx_ucast || wds_tx_mcast) {
9303 peer->wds_enabled = 1;
9304 peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
9305 peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
9306 } else {
9307 peer->wds_enabled = 0;
9308 peer->wds_ecm.wds_tx_ucast_4addr = 0;
9309 peer->wds_ecm.wds_tx_mcast_4addr = 0;
9310 }
9311
9312 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9313 FL("Policy Update set to :\
9314 peer->wds_enabled %d\
9315 peer->wds_ecm.wds_tx_ucast_4addr %d\
Aditya Sathishded018e2018-07-02 16:25:21 +05309316 peer->wds_ecm.wds_tx_mcast_4addr %d"),
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05309317 peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
9318 peer->wds_ecm.wds_tx_mcast_4addr);
9319 return;
9320}
9321#endif
9322
Karunakar Dasinenica792542017-01-16 10:08:58 -08009323static struct cdp_wds_ops dp_ops_wds = {
9324 .vdev_set_wds = dp_vdev_set_wds,
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05309325#ifdef WDS_VENDOR_EXTENSION
9326 .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
9327 .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
9328#endif
Karunakar Dasinenica792542017-01-16 10:08:58 -08009329};
9330
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05309331/*
Kabilan Kannan60e3b302017-09-07 20:06:17 -07009332 * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
9333 * @vdev_handle - datapath vdev handle
9334 * @callback - callback function
9335 * @ctxt: callback context
9336 *
9337 */
9338static void
9339dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
9340 ol_txrx_data_tx_cb callback, void *ctxt)
9341{
9342 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9343
9344 vdev->tx_non_std_data_callback.func = callback;
9345 vdev->tx_non_std_data_callback.ctxt = ctxt;
9346}
9347
Santosh Anbu2280e862018-01-03 22:25:53 +05309348/**
9349 * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
9350 * @pdev_hdl: datapath pdev handle
9351 *
9352 * Return: opaque pointer to dp txrx handle
9353 */
9354static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
9355{
9356 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
9357
9358 return pdev->dp_txrx_handle;
9359}
9360
9361/**
9362 * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
9363 * @pdev_hdl: datapath pdev handle
9364 * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
9365 *
9366 * Return: void
9367 */
9368static void
9369dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
9370{
9371 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
9372
9373 pdev->dp_txrx_handle = dp_txrx_hdl;
9374}
9375
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05309376/**
9377 * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
9378 * @soc_handle: datapath soc handle
9379 *
9380 * Return: opaque pointer to external dp (non-core DP)
9381 */
9382static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
9383{
9384 struct dp_soc *soc = (struct dp_soc *)soc_handle;
9385
9386 return soc->external_txrx_handle;
9387}
9388
9389/**
9390 * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
9391 * @soc_handle: datapath soc handle
9392 * @txrx_handle: opaque pointer to external dp (non-core DP)
9393 *
9394 * Return: void
9395 */
9396static void
9397dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
9398{
9399 struct dp_soc *soc = (struct dp_soc *)soc_handle;
9400
9401 soc->external_txrx_handle = txrx_handle;
9402}
9403
Akshay Kosigia4f6e172018-09-03 21:42:27 +05309404/**
Padma Raghunathan93549e12019-02-28 14:30:55 +05309405 * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
9406 * @pdev_hdl: datapath pdev handle
9407 * @lmac_id: lmac id
9408 *
9409 * Return: void
9410 */
9411static void
9412dp_soc_map_pdev_to_lmac(struct cdp_pdev *pdev_hdl, uint32_t lmac_id)
9413{
9414 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
9415 struct dp_soc *soc = pdev->soc;
9416
9417 pdev->lmac_id = lmac_id;
9418 wlan_cfg_set_hw_macid(soc->wlan_cfg_ctx,
9419 pdev->pdev_id,
9420 (lmac_id + 1));
9421}
9422
9423/**
Akshay Kosigia4f6e172018-09-03 21:42:27 +05309424 * dp_get_cfg_capabilities() - get dp capabilities
9425 * @soc_handle: datapath soc handle
9426 * @dp_caps: enum for dp capabilities
9427 *
9428 * Return: bool to determine if dp caps is enabled
9429 */
9430static bool
9431dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
9432 enum cdp_capabilities dp_caps)
9433{
9434 struct dp_soc *soc = (struct dp_soc *)soc_handle;
9435
9436 return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
9437}
9438
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +05309439#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05309440static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
9441{
9442 struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
9443 struct dp_peer *peer = (struct dp_peer *) peer_hdl;
9444 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
9445
Aditya Sathish6add3db2018-04-10 19:43:34 +05309446 /*
9447 * For BSS peer, new peer is not created on alloc_node if the
9448 * peer with same address already exists , instead refcnt is
9449 * increased for existing peer. Correspondingly in delete path,
9450 * only refcnt is decreased; and peer is only deleted , when all
9451 * references are deleted. So delete_in_progress should not be set
9452 * for bss_peer, unless only 2 reference remains (peer map reference
9453 * and peer hash table reference).
9454 */
9455 if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) {
9456 return;
9457 }
9458
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +05309459 qdf_spin_lock_bh(&soc->ast_lock);
Karunakar Dasineni372647d2018-01-15 22:27:39 -08009460 peer->delete_in_progress = true;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05309461 dp_peer_delete_ast_entries(soc, peer);
Chaithanya Garrepalli8fb48772019-01-21 23:11:18 +05309462 qdf_spin_unlock_bh(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05309463}
9464#endif
9465
Soumya Bhatbc719e62018-02-18 18:21:25 +05309466#ifdef ATH_SUPPORT_NAC_RSSI
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05309467/**
9468 * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
9469 * @vdev_hdl: DP vdev handle
9470 * @rssi: rssi value
9471 *
9472 * Return: 0 for success. nonzero for failure.
9473 */
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05309474static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
9475 char *mac_addr,
9476 uint8_t *rssi)
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05309477{
9478 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9479 struct dp_pdev *pdev = vdev->pdev;
9480 struct dp_neighbour_peer *peer = NULL;
9481 QDF_STATUS status = QDF_STATUS_E_FAILURE;
9482
9483 *rssi = 0;
9484 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
9485 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
9486 neighbour_peer_list_elem) {
9487 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08009488 mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05309489 *rssi = peer->rssi;
9490 status = QDF_STATUS_SUCCESS;
9491 break;
9492 }
9493 }
9494 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
9495 return status;
9496}
9497
Soumya Bhatbc719e62018-02-18 18:21:25 +05309498static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
9499 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
9500 uint8_t chan_num)
9501{
9502
9503 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9504 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
9505 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
9506
9507 pdev->nac_rssi_filtering = 1;
9508 /* Store address of NAC (neighbour peer) which will be checked
9509 * against TA of received packets.
9510 */
9511
9512 if (cmd == CDP_NAC_PARAM_ADD) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05309513 dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
9514 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05309515 } else if (cmd == CDP_NAC_PARAM_DEL) {
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +05309516 dp_update_filter_neighbour_peers(vdev_handle,
9517 DP_NAC_PARAM_DEL,
9518 client_macaddr);
Soumya Bhatbc719e62018-02-18 18:21:25 +05309519 }
9520
9521 if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
9522 soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
Akshay Kosigi1a9c6d12018-04-26 00:54:23 +05309523 ((void *)vdev->pdev->ctrl_pdev,
9524 vdev->vdev_id, cmd, bssid);
Soumya Bhatbc719e62018-02-18 18:21:25 +05309525
9526 return QDF_STATUS_SUCCESS;
9527}
9528#endif
9529
Keyur Parekhc28f8392018-11-21 02:50:56 -08009530/**
9531 * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
9532 * for pktlog
9533 * @txrx_pdev_handle: cdp_pdev handle
9534 * @enb_dsb: Enable or disable peer based filtering
9535 *
9536 * Return: QDF_STATUS
9537 */
9538static int
9539dp_enable_peer_based_pktlog(
9540 struct cdp_pdev *txrx_pdev_handle,
9541 char *mac_addr, uint8_t enb_dsb)
9542{
9543 struct dp_peer *peer;
9544 uint8_t local_id;
9545 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
9546
9547 peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
9548 mac_addr, &local_id);
9549
9550 if (!peer) {
9551 dp_err("Invalid Peer");
9552 return QDF_STATUS_E_FAILURE;
9553 }
9554
9555 peer->peer_based_pktlog_filter = enb_dsb;
9556 pdev->dp_peer_based_pktlog = enb_dsb;
9557
9558 return QDF_STATUS_SUCCESS;
9559}
9560
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07009561#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07009562#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
9563/**
9564 * dp_summarize_tag_stats - sums up the given protocol type's counters
9565 * across all the rings and dumps the same
9566 * @pdev_handle: cdp_pdev handle
9567 * @protocol_type: protocol type for which stats should be displayed
9568 *
9569 * Return: none
9570 */
9571static uint64_t dp_summarize_tag_stats(struct cdp_pdev *pdev_handle,
9572 uint16_t protocol_type)
9573{
9574 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9575 uint8_t ring_idx;
9576 uint64_t total_tag_cnt = 0;
9577
9578 for (ring_idx = 0; ring_idx < MAX_REO_DEST_RINGS; ring_idx++) {
9579 total_tag_cnt +=
9580 pdev->reo_proto_tag_stats[ring_idx][protocol_type].tag_ctr;
9581 }
9582 total_tag_cnt += pdev->rx_err_proto_tag_stats[protocol_type].tag_ctr;
9583 DP_PRINT_STATS("ProtoID: %d, Tag: %u Tagged MSDU cnt: %llu",
9584 protocol_type,
9585 pdev->rx_proto_tag_map[protocol_type].tag,
9586 total_tag_cnt);
9587 return total_tag_cnt;
9588}
9589
9590/**
9591 * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
9592 * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
9593 * @pdev_handle: cdp_pdev handle
9594 * @protocol_type: protocol type for which stats should be displayed
9595 *
9596 * Return: none
9597 */
9598static void
9599dp_dump_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
9600 uint16_t protocol_type)
9601{
9602 uint16_t proto_idx;
9603
9604 if (protocol_type != RX_PROTOCOL_TAG_ALL &&
9605 protocol_type >= RX_PROTOCOL_TAG_MAX) {
9606 DP_PRINT_STATS("Invalid protocol type : %u", protocol_type);
9607 return;
9608 }
9609
9610 /* protocol_type in [0 ... RX_PROTOCOL_TAG_MAX] */
9611 if (protocol_type != RX_PROTOCOL_TAG_ALL) {
9612 dp_summarize_tag_stats(pdev_handle, protocol_type);
9613 return;
9614 }
9615
9616 /* protocol_type == RX_PROTOCOL_TAG_ALL */
9617 for (proto_idx = 0; proto_idx < RX_PROTOCOL_TAG_MAX; proto_idx++)
9618 dp_summarize_tag_stats(pdev_handle, proto_idx);
9619}
9620#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9621
9622/**
9623 * dp_reset_pdev_rx_protocol_tag_stats - resets the stats counters for
9624 * given protocol type
9625 * @pdev_handle: cdp_pdev handle
9626 * @protocol_type: protocol type for which stats should be reset
9627 *
9628 * Return: none
9629 */
9630#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
9631static void
9632dp_reset_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
9633 uint16_t protocol_type)
9634{
9635 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9636 uint8_t ring_idx;
9637
9638 for (ring_idx = 0; ring_idx < MAX_REO_DEST_RINGS; ring_idx++)
9639 pdev->reo_proto_tag_stats[ring_idx][protocol_type].tag_ctr = 0;
9640 pdev->rx_err_proto_tag_stats[protocol_type].tag_ctr = 0;
9641}
9642#else
9643static void
9644dp_reset_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
9645 uint16_t protocol_type)
9646{
9647 /** Stub API */
9648}
9649#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9650
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07009651/**
9652 * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
9653 * applied to the desired protocol type packets
9654 * @txrx_pdev_handle: cdp_pdev handle
9655 * @enable_rx_protocol_tag - bitmask that indicates what protocol types
9656 * are enabled for tagging. zero indicates disable feature, non-zero indicates
9657 * enable feature
9658 * @protocol_type: new protocol type for which the tag is being added
9659 * @tag: user configured tag for the new protocol
9660 *
9661 * Return: QDF_STATUS
9662 */
9663static QDF_STATUS
9664dp_update_pdev_rx_protocol_tag(struct cdp_pdev *pdev_handle,
9665 uint32_t enable_rx_protocol_tag,
9666 uint16_t protocol_type,
9667 uint16_t tag)
9668{
9669 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9670 /*
9671 * dynamically enable/disable tagging based on enable_rx_protocol_tag
9672 * flag.
9673 */
9674 if (enable_rx_protocol_tag) {
9675 /* Tagging for one or more protocols has been set by user */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07009676 pdev->is_rx_protocol_tagging_enabled = true;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07009677 } else {
9678 /*
9679 * No protocols being tagged, disable feature till next add
9680 * operation
9681 */
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07009682 pdev->is_rx_protocol_tagging_enabled = false;
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07009683 }
9684
Karunakar Dasinenid8c7ad22019-04-18 18:15:02 -07009685 /** Reset stats counter across all rings for given protocol */
9686 dp_reset_pdev_rx_protocol_tag_stats(pdev_handle, protocol_type);
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07009687
9688 pdev->rx_proto_tag_map[protocol_type].tag = tag;
9689
9690 return QDF_STATUS_SUCCESS;
9691}
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -07009692#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9693
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05309694static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05309695 uint32_t max_peers,
Tallapragada Kalyana7023622018-12-03 19:29:52 +05309696 uint32_t max_ast_index,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05309697 bool peer_map_unmap_v2)
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05309698{
9699 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9700
9701 soc->max_peers = max_peers;
9702
Tallapragada Kalyana7023622018-12-03 19:29:52 +05309703 qdf_print ("%s max_peers %u, max_ast_index: %u\n",
9704 __func__, max_peers, max_ast_index);
9705 wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05309706
9707 if (dp_peer_find_attach(soc))
9708 return QDF_STATUS_E_FAILURE;
9709
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05309710 soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
9711
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +05309712 return QDF_STATUS_SUCCESS;
9713}
9714
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +05309715/**
9716 * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
9717 * @dp_pdev: dp pdev handle
9718 * @ctrl_pdev: UMAC ctrl pdev handle
9719 *
9720 * Return: void
9721 */
9722static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
9723 struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
9724{
9725 struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
9726
9727 pdev->ctrl_pdev = ctrl_pdev;
9728}
9729
Amir Patel468bded2019-03-21 11:42:31 +05309730static void dp_set_rate_stats_cap(struct cdp_soc_t *soc_hdl,
9731 uint8_t val)
9732{
9733 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9734
9735 soc->wlanstats_enabled = val;
9736}
9737
9738static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
9739 void *stats_ctx)
9740{
9741 struct dp_soc *soc = (struct dp_soc *)soc_handle;
9742
9743 soc->rate_stats_ctx = stats_ctx;
9744}
9745
9746#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9747static void dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9748 struct cdp_pdev *pdev_hdl)
9749{
9750 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
9751 struct dp_soc *soc = (struct dp_soc *)pdev->soc;
9752 struct dp_vdev *vdev = NULL;
9753 struct dp_peer *peer = NULL;
9754
9755 qdf_spin_lock_bh(&soc->peer_ref_mutex);
9756 qdf_spin_lock_bh(&pdev->vdev_list_lock);
9757 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9758 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
9759 if (peer)
9760 dp_wdi_event_handler(
9761 WDI_EVENT_FLUSH_RATE_STATS_REQ,
9762 pdev->soc, peer->wlanstats_ctx,
9763 peer->peer_ids[0],
9764 WDI_NO_VAL, pdev->pdev_id);
9765 }
9766 }
9767 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9768 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9769}
9770#else
9771static inline void
9772dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9773 struct cdp_pdev *pdev_hdl)
9774{
9775}
9776#endif
9777
9778#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9779static void dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9780 struct cdp_pdev *pdev_handle,
9781 void *buf)
9782{
9783 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9784
9785 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
9786 pdev->soc, buf, HTT_INVALID_PEER,
9787 WDI_NO_VAL, pdev->pdev_id);
9788}
9789#else
9790static inline void
9791dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9792 struct cdp_pdev *pdev_handle,
9793 void *buf)
9794{
9795}
9796#endif
9797
9798static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
9799{
9800 struct dp_soc *soc = (struct dp_soc *)soc_handle;
9801
9802 return soc->rate_stats_ctx;
9803}
9804
jitiphil60ac9aa2018-10-05 19:54:04 +05309805/*
9806 * dp_get_cfg() - get dp cfg
9807 * @soc: cdp soc handle
9808 * @cfg: cfg enum
9809 *
9810 * Return: cfg value
9811 */
9812static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
9813{
9814 struct dp_soc *dpsoc = (struct dp_soc *)soc;
9815 uint32_t value = 0;
9816
9817 switch (cfg) {
9818 case cfg_dp_enable_data_stall:
9819 value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
9820 break;
9821 case cfg_dp_enable_ip_tcp_udp_checksum_offload:
9822 value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
9823 break;
9824 case cfg_dp_tso_enable:
9825 value = dpsoc->wlan_cfg_ctx->tso_enabled;
9826 break;
9827 case cfg_dp_lro_enable:
9828 value = dpsoc->wlan_cfg_ctx->lro_enabled;
9829 break;
9830 case cfg_dp_gro_enable:
9831 value = dpsoc->wlan_cfg_ctx->gro_enabled;
9832 break;
9833 case cfg_dp_tx_flow_start_queue_offset:
9834 value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
9835 break;
9836 case cfg_dp_tx_flow_stop_queue_threshold:
9837 value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
9838 break;
9839 case cfg_dp_disable_intra_bss_fwd:
9840 value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
9841 break;
9842 default:
9843 value = 0;
9844 }
9845
9846 return value;
9847}
9848
Shashikala Prabhu550e69c2019-03-13 17:41:17 +05309849#ifdef CONFIG_WIN
9850/**
9851 * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
9852 * @pdev_hdl: datapath pdev handle
9853 * @param: ol ath params
9854 * @value: value of the flag
9855 * @buff: Buffer to be passed
9856 *
9857 * Implemented this function same as legacy function. In legacy code, single
9858 * function is used to display stats and update pdev params.
9859 *
9860 * Return: 0 for success. nonzero for failure.
9861 */
9862static uint32_t dp_tx_flow_ctrl_configure_pdev(void *pdev_handle,
9863 enum _ol_ath_param_t param,
9864 uint32_t value, void *buff)
9865{
9866 struct dp_soc *soc = NULL;
9867 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9868
9869 if (qdf_unlikely(!pdev))
9870 return 1;
9871
9872 soc = pdev->soc;
9873 if (!soc)
9874 return 1;
9875
9876 switch (param) {
9877 case OL_ATH_PARAM_VIDEO_DELAY_STATS_FC:
9878 if (value)
9879 pdev->delay_stats_flag = true;
9880 else
9881 pdev->delay_stats_flag = false;
9882 break;
9883 case OL_ATH_PARAM_VIDEO_STATS_FC:
9884 qdf_print("------- TID Stats ------\n");
9885 dp_pdev_print_tid_stats(pdev);
9886 qdf_print("------ Delay Stats ------\n");
9887 dp_pdev_print_delay_stats(pdev);
9888 break;
9889 case OL_ATH_PARAM_TOTAL_Q_SIZE:
9890 {
9891 uint32_t tx_min, tx_max;
9892
9893 tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
9894 tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
9895
9896 if (!buff) {
9897 if ((value >= tx_min) && (value <= tx_max)) {
9898 pdev->num_tx_allowed = value;
9899 } else {
9900 QDF_TRACE(QDF_MODULE_ID_DP,
9901 QDF_TRACE_LEVEL_INFO,
9902 "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
9903 tx_min, tx_max);
9904 break;
9905 }
9906 } else {
9907 *(int *)buff = pdev->num_tx_allowed;
9908 }
9909 }
9910 break;
9911 default:
9912 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9913 "%s: not handled param %d ", __func__, param);
9914 break;
9915 }
9916
9917 return 0;
9918}
9919#endif
9920
Debasis Dasc39a68d2019-01-28 17:02:06 +05309921/**
9922 * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
9923 * @vdev: DP_PDEV handle
9924 * @pcp: pcp value
9925 * @tid: tid value passed by the user
9926 *
9927 * Return: QDF_STATUS_SUCCESS on success
9928 */
9929static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
9930 uint8_t pcp, uint8_t tid)
9931{
9932 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9933 struct dp_soc *soc = pdev->soc;
9934
9935 soc->pcp_tid_map[pcp] = tid;
9936
9937 hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
9938 return QDF_STATUS_SUCCESS;
9939}
9940
9941/**
9942 * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
9943 * @vdev: DP_PDEV handle
9944 * @prio: tidmap priority value passed by the user
9945 *
9946 * Return: QDF_STATUS_SUCCESS on success
9947 */
9948static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct cdp_pdev *pdev_handle,
9949 uint8_t prio)
9950{
9951 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9952 struct dp_soc *soc = pdev->soc;
9953
9954 soc->tidmap_prty = prio;
9955
9956 hal_tx_set_tidmap_prty(soc->hal_soc, prio);
9957 return QDF_STATUS_SUCCESS;
9958}
9959
9960/**
9961 * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
9962 * @vdev: DP_VDEV handle
9963 * @pcp: pcp value
9964 * @tid: tid value passed by the user
9965 *
9966 * Return: QDF_STATUS_SUCCESS on success
9967 */
9968static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
9969 uint8_t pcp, uint8_t tid)
9970{
9971 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9972
9973 vdev->pcp_tid_map[pcp] = tid;
9974
9975 return QDF_STATUS_SUCCESS;
9976}
9977
9978/**
9979 * dp_set_vdev_tidmap_tbl_id_wifi3(): update tidmapi tbl id in vdev
9980 * @vdev: DP_VDEV handle
9981 * @mapid: map_id value passed by the user
9982 *
9983 * Return: QDF_STATUS_SUCCESS on success
9984 */
9985static QDF_STATUS dp_set_vdev_tidmap_tbl_id_wifi3(struct cdp_vdev *vdev_handle,
9986 uint8_t mapid)
9987{
9988 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9989
9990 vdev->tidmap_tbl_id = mapid;
9991
9992 return QDF_STATUS_SUCCESS;
9993}
9994
9995/**
9996 * dp_set_vdev_tidmap_prty_wifi3(): update tidmap priority in vdev
9997 * @vdev: DP_VDEV handle
9998 * @prio: tidmap priority value passed by the user
9999 *
10000 * Return: QDF_STATUS_SUCCESS on success
10001 */
10002static QDF_STATUS dp_set_vdev_tidmap_prty_wifi3(struct cdp_vdev *vdev_handle,
10003 uint8_t prio)
10004{
10005 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
10006
10007 vdev->tidmap_prty = prio;
10008
10009 return QDF_STATUS_SUCCESS;
10010}
10011
Leo Chang5ea93a42016-11-03 12:39:49 -070010012static struct cdp_cmn_ops dp_ops_cmn = {
10013 .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
10014 .txrx_vdev_attach = dp_vdev_attach_wifi3,
10015 .txrx_vdev_detach = dp_vdev_detach_wifi3,
10016 .txrx_pdev_attach = dp_pdev_attach_wifi3,
10017 .txrx_pdev_detach = dp_pdev_detach_wifi3,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010018 .txrx_pdev_deinit = dp_pdev_deinit_wifi3,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -080010019 .txrx_peer_create = dp_peer_create_wifi3,
10020 .txrx_peer_setup = dp_peer_setup_wifi3,
Tallapragada Kalyan71c46b92018-03-01 13:17:10 +053010021#ifdef FEATURE_AST
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +053010022 .txrx_peer_teardown = dp_peer_teardown_wifi3,
10023#else
Dhanashri Atre6d90ef32016-11-10 16:27:38 -080010024 .txrx_peer_teardown = NULL,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +053010025#endif
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053010026 .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053010027 .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +053010028 .txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
10029 .txrx_peer_get_ast_info_by_pdev =
10030 dp_peer_get_ast_info_by_pdevid_wifi3,
10031 .txrx_peer_ast_delete_by_soc =
10032 dp_peer_ast_entry_del_by_soc,
10033 .txrx_peer_ast_delete_by_pdev =
10034 dp_peer_ast_entry_del_by_pdev,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -080010035 .txrx_peer_delete = dp_peer_delete_wifi3,
Leo Chang5ea93a42016-11-03 12:39:49 -070010036 .txrx_vdev_register = dp_vdev_register_wifi3,
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +053010037 .txrx_vdev_flush_peers = dp_vdev_flush_peers,
Leo Chang5ea93a42016-11-03 12:39:49 -070010038 .txrx_soc_detach = dp_soc_detach_wifi3,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010039 .txrx_soc_deinit = dp_soc_deinit_wifi3,
10040 .txrx_soc_init = dp_soc_init_wifi3,
10041 .txrx_tso_soc_attach = dp_tso_soc_attach,
10042 .txrx_tso_soc_detach = dp_tso_soc_detach,
Leo Chang5ea93a42016-11-03 12:39:49 -070010043 .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
10044 .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
chenguo2a733792018-11-01 16:10:38 +080010045 .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
Leo Chang5ea93a42016-11-03 12:39:49 -070010046 .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +053010047 .txrx_ath_getstats = dp_get_device_stats,
Karunakar Dasinenied1de122016-08-02 11:57:59 -070010048 .addba_requestprocess = dp_addba_requestprocess_wifi3,
10049 .addba_responsesetup = dp_addba_responsesetup_wifi3,
Sumedh Baikady1c61e062018-02-12 22:25:47 -080010050 .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
Karunakar Dasinenied1de122016-08-02 11:57:59 -070010051 .delba_process = dp_delba_process_wifi3,
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -080010052 .set_addba_response = dp_set_addba_response,
Ishank Jain1e7401c2017-02-17 15:38:39 +053010053 .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
Manikandan Mohane2fa8b72017-03-22 11:18:26 -070010054 .flush_cache_rx_queue = NULL,
Ishank Jain949674c2017-02-27 17:09:29 +053010055 /* TODO: get API's for dscp-tid need to be added*/
10056 .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
10057 .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
Shashikala Prabhu8f6703b2018-10-31 09:43:00 +053010058 .hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
10059 .set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
Pranita Solanke92096e42018-09-11 11:14:51 +053010060 .txrx_get_total_per = dp_get_total_per,
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +053010061 .txrx_stats_request = dp_txrx_stats_request,
Kai Chen6eca1a62017-01-12 10:17:53 -080010062 .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
phadiman7821bf82018-02-06 16:03:54 +053010063 .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
Varsha Mishraa331e6e2019-03-11 12:16:14 +053010064 .txrx_get_vow_config_frm_pdev = dp_get_delay_stats_flag,
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -070010065 .txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
phadiman7821bf82018-02-06 16:03:54 +053010066 .txrx_set_nac = dp_set_nac,
10067 .txrx_get_tx_pending = dp_get_tx_pending,
10068 .txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
10069 .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -080010070 .display_stats = dp_txrx_dump_stats,
Bharat Kumar M9a5d5372017-05-08 17:41:42 +053010071 .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
10072 .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
psimhac983d7e2017-07-26 15:20:07 -070010073 .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
Venkateswara Swamy Bandarua95b3242017-05-19 20:20:30 +053010074 .txrx_intr_detach = dp_soc_interrupt_detach,
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +053010075 .set_pn_check = dp_set_pn_check_wifi3,
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -070010076 .update_config_parameters = dp_update_config_parameters,
Leo Chang5ea93a42016-11-03 12:39:49 -070010077 /* TODO: Add other functions */
Santosh Anbu2280e862018-01-03 22:25:53 +053010078 .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
10079 .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
10080 .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +053010081 .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
10082 .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
Padma Raghunathan93549e12019-02-28 14:30:55 +053010083 .map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
sumedh baikady1f8f3192018-02-20 17:30:32 -080010084 .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
10085 .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +053010086 .tx_send = dp_tx_send,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +053010087 .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
10088 .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
10089 .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +053010090 .txrx_peer_map_attach = dp_peer_map_attach_wifi3,
Sravan Kumar Kairam5a6f5902018-07-04 17:32:24 +053010091 .txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
Mohit Khanna7ac554b2018-05-24 11:58:13 -070010092 .txrx_get_os_rx_handles_from_vdev =
10093 dp_get_os_rx_handles_from_vdev_wifi3,
sumedh baikadydf4a57c2018-04-08 22:19:22 -070010094 .delba_tx_completion = dp_delba_tx_completion_wifi3,
Akshay Kosigia4f6e172018-09-03 21:42:27 +053010095 .get_dp_capabilities = dp_get_cfg_capabilities,
jitiphil60ac9aa2018-10-05 19:54:04 +053010096 .txrx_get_cfg = dp_get_cfg,
Amir Patel468bded2019-03-21 11:42:31 +053010097 .set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
10098 .get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
10099 .txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
10100 .txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
Debasis Dasc39a68d2019-01-28 17:02:06 +053010101
10102 .set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
10103 .set_pdev_tidmap_prty = dp_set_pdev_tidmap_prty_wifi3,
10104 .set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
10105 .set_vdev_tidmap_prty = dp_set_vdev_tidmap_prty_wifi3,
10106 .set_vdev_tidmap_tbl_id = dp_set_vdev_tidmap_tbl_id_wifi3,
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +053010107
10108 .txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
Leo Chang5ea93a42016-11-03 12:39:49 -070010109};
10110
10111static struct cdp_ctrl_ops dp_ops_ctrl = {
10112 .txrx_peer_authorize = dp_peer_authorize,
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +053010113 .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
10114 .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +053010115#ifdef MESH_MODE_SUPPORT
10116 .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +053010117 .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +053010118#endif
Ishank Jain9f174c62017-03-30 18:37:42 +053010119 .txrx_set_vdev_param = dp_set_vdev_param,
10120 .txrx_peer_set_nawds = dp_peer_set_nawds,
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +053010121 .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
10122 .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
Pratik Gandhi8b8334b2017-03-09 17:41:40 +053010123 .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
10124 .txrx_update_filter_neighbour_peers =
10125 dp_update_filter_neighbour_peers,
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +053010126 .txrx_get_sec_type = dp_get_sec_type,
Leo Chang5ea93a42016-11-03 12:39:49 -070010127 /* TODO: Add other functions */
Keyur Parekhfad6d082017-05-07 08:54:47 -070010128 .txrx_wdi_event_sub = dp_wdi_event_sub,
10129 .txrx_wdi_event_unsub = dp_wdi_event_unsub,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010130#ifdef WDI_EVENT_ENABLE
10131 .txrx_get_pldev = dp_get_pldev,
10132#endif
Soumya Bhatcfbb8952017-10-03 15:04:09 +053010133 .txrx_set_pdev_param = dp_set_pdev_param,
Soumya Bhatbc719e62018-02-18 18:21:25 +053010134#ifdef ATH_SUPPORT_NAC_RSSI
10135 .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +053010136 .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
Soumya Bhatbc719e62018-02-18 18:21:25 +053010137#endif
Pramod Simha6e10cb22018-06-20 12:05:44 -070010138 .set_key = dp_set_michael_key,
phadiman4213e9c2018-10-29 12:50:02 +053010139 .txrx_get_vdev_param = dp_get_vdev_param,
Keyur Parekhc28f8392018-11-21 02:50:56 -080010140 .enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
Varsha Mishraa331e6e2019-03-11 12:16:14 +053010141 .calculate_delay_stats = dp_calculate_delay_stats,
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -070010142#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
10143 .txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
10144#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
10145 .txrx_dump_pdev_rx_protocol_tag_stats =
10146 dp_dump_pdev_rx_protocol_tag_stats,
10147#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
10148#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
Leo Chang5ea93a42016-11-03 12:39:49 -070010149};
10150
10151static struct cdp_me_ops dp_ops_me = {
Ishank Jainc838b132017-02-17 11:08:18 +053010152#ifdef ATH_SUPPORT_IQUE
10153 .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
10154 .tx_me_free_descriptor = dp_tx_me_free_descriptor,
10155 .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
10156#endif
Leo Chang5ea93a42016-11-03 12:39:49 -070010157};
10158
10159static struct cdp_mon_ops dp_ops_mon = {
Kai Chen6eca1a62017-01-12 10:17:53 -080010160 .txrx_monitor_set_filter_ucast_data = NULL,
10161 .txrx_monitor_set_filter_mcast_data = NULL,
10162 .txrx_monitor_set_filter_non_data = NULL,
nobeljc8eb4d62018-01-04 14:29:32 -080010163 .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
10164 .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
10165 .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
sumedh baikady84613b02017-09-19 16:36:14 -070010166 .txrx_reset_monitor_mode = dp_reset_monitor_mode,
nobeljd124b742017-10-16 11:59:12 -070010167 /* Added support for HK advance filter */
10168 .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
Leo Chang5ea93a42016-11-03 12:39:49 -070010169};
10170
10171static struct cdp_host_stats_ops dp_ops_host_stats = {
Ishank Jain6290a3c2017-03-21 10:49:39 +053010172 .txrx_per_peer_stats = dp_get_host_peer_stats,
10173 .get_fw_peer_stats = dp_get_fw_peer_stats,
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +053010174 .get_htt_stats = dp_get_htt_stats,
Pamidipati, Vijaybe379452017-06-21 00:31:06 +053010175 .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
10176 .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
Prathyusha Guduri184b6402018-02-04 23:01:49 +053010177 .txrx_stats_publish = dp_txrx_stats_publish,
Ruchi, Agrawal2a6e6142018-06-01 18:47:55 +053010178 .txrx_get_vdev_stats = dp_txrx_get_vdev_stats,
10179 .txrx_get_peer_stats = dp_txrx_get_peer_stats,
10180 .txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
Amir Patel756d05e2018-10-10 12:35:30 +053010181 .txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +053010182 .txrx_get_ratekbps = dp_txrx_get_ratekbps,
Amir Patel468bded2019-03-21 11:42:31 +053010183 .configure_rate_stats = dp_set_rate_stats_cap,
Leo Chang5ea93a42016-11-03 12:39:49 -070010184 /* TODO */
10185};
10186
Leo Chang5ea93a42016-11-03 12:39:49 -070010187static struct cdp_raw_ops dp_ops_raw = {
10188 /* TODO */
10189};
10190
10191#ifdef CONFIG_WIN
10192static struct cdp_pflow_ops dp_ops_pflow = {
Shashikala Prabhu550e69c2019-03-13 17:41:17 +053010193 dp_tx_flow_ctrl_configure_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -070010194};
10195#endif /* CONFIG_WIN */
10196
Yue Ma245b47b2017-02-21 16:35:31 -080010197#ifdef FEATURE_RUNTIME_PM
10198/**
10199 * dp_runtime_suspend() - ensure DP is ready to runtime suspend
10200 * @opaque_pdev: DP pdev context
10201 *
10202 * DP is ready to runtime suspend if there are no pending TX packets.
10203 *
10204 * Return: QDF_STATUS
10205 */
10206static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
10207{
10208 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
10209 struct dp_soc *soc = pdev->soc;
10210
Yue Maaf4272d2018-08-27 12:35:21 -070010211 /* Abort if there are any pending TX packets */
10212 if (dp_get_tx_pending(opaque_pdev) > 0) {
10213 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
10214 FL("Abort suspend due to pending TX packets"));
10215 return QDF_STATUS_E_AGAIN;
10216 }
Yue Ma245b47b2017-02-21 16:35:31 -080010217
10218 if (soc->intr_mode == DP_INTR_POLL)
10219 qdf_timer_stop(&soc->int_timer);
10220
10221 return QDF_STATUS_SUCCESS;
10222}
10223
10224/**
10225 * dp_runtime_resume() - ensure DP is ready to runtime resume
10226 * @opaque_pdev: DP pdev context
10227 *
10228 * Resume DP for runtime PM.
10229 *
10230 * Return: QDF_STATUS
10231 */
10232static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
10233{
10234 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
10235 struct dp_soc *soc = pdev->soc;
10236 void *hal_srng;
10237 int i;
10238
10239 if (soc->intr_mode == DP_INTR_POLL)
10240 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10241
10242 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
10243 hal_srng = soc->tcl_data_ring[i].hal_srng;
10244 if (hal_srng) {
10245 /* We actually only need to acquire the lock */
10246 hal_srng_access_start(soc->hal_soc, hal_srng);
10247 /* Update SRC ring head pointer for HW to send
10248 all pending packets */
10249 hal_srng_access_end(soc->hal_soc, hal_srng);
10250 }
10251 }
10252
10253 return QDF_STATUS_SUCCESS;
10254}
10255#endif /* FEATURE_RUNTIME_PM */
10256
Sravan Kumar Kairamc71219e2019-04-19 22:08:16 +053010257/**
10258 * dp_tx_get_success_ack_stats() - get tx success completion count
10259 * @opaque_pdev: dp pdev context
10260 * @vdevid: vdev identifier
10261 *
10262 * Return: tx success ack count
10263 */
10264static uint32_t dp_tx_get_success_ack_stats(struct cdp_pdev *pdev,
10265 uint8_t vdev_id)
10266{
10267 struct dp_vdev *vdev =
10268 (struct dp_vdev *)dp_get_vdev_from_vdev_id_wifi3(pdev,
10269 vdev_id);
10270 struct dp_soc *soc = ((struct dp_pdev *)pdev)->soc;
10271 struct cdp_vdev_stats *vdev_stats = NULL;
10272 uint32_t tx_success;
10273
10274 if (!vdev) {
10275 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10276 FL("Invalid vdev id %d"), vdev_id);
10277 return 0;
10278 }
10279
10280 vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
10281 if (!vdev_stats) {
10282 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10283 "DP alloc failure - unable to get alloc vdev stats");
10284 return 0;
10285 }
10286
10287 qdf_spin_lock_bh(&soc->peer_ref_mutex);
10288 dp_aggregate_vdev_stats(vdev, vdev_stats);
10289 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
10290
10291 tx_success = vdev_stats->tx.tx_success.num;
10292 qdf_mem_free(vdev_stats);
10293
10294 return tx_success;
10295}
10296
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +053010297#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -070010298static struct cdp_misc_ops dp_ops_misc = {
Jeff Johnson6889ddf2019-02-08 07:22:01 -080010299#ifdef FEATURE_WLAN_TDLS
Kabilan Kannan60e3b302017-09-07 20:06:17 -070010300 .tx_non_std = dp_tx_non_std,
Jeff Johnson6889ddf2019-02-08 07:22:01 -080010301#endif /* FEATURE_WLAN_TDLS */
Leo Chang5ea93a42016-11-03 12:39:49 -070010302 .get_opmode = dp_get_opmode,
Dustin Brown4a3b96b2017-05-10 15:49:38 -070010303#ifdef FEATURE_RUNTIME_PM
Yue Ma245b47b2017-02-21 16:35:31 -080010304 .runtime_suspend = dp_runtime_suspend,
10305 .runtime_resume = dp_runtime_resume,
10306#endif /* FEATURE_RUNTIME_PM */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010307 .pkt_log_init = dp_pkt_log_init,
10308 .pkt_log_con_service = dp_pkt_log_con_service,
Mohit Khanna16816ae2018-10-30 14:12:03 -070010309 .get_num_rx_contexts = dp_get_num_rx_contexts,
Sravan Kumar Kairamc71219e2019-04-19 22:08:16 +053010310 .get_tx_ack_stats = dp_tx_get_success_ack_stats,
Leo Chang5ea93a42016-11-03 12:39:49 -070010311};
10312
10313static struct cdp_flowctl_ops dp_ops_flowctl = {
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070010314 /* WIFI 3.0 DP implement as required. */
10315#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -080010316 .flow_pool_map_handler = dp_tx_flow_pool_map,
10317 .flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070010318 .register_pause_cb = dp_txrx_register_pause_cb,
10319 .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
Sravan Kumar Kairamb75565e2018-12-17 17:55:44 +053010320 .tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070010321#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
Leo Chang5ea93a42016-11-03 12:39:49 -070010322};
10323
10324static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
10325 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
10326};
10327
Yun Parkfde6b9e2017-06-26 17:13:11 -070010328#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -070010329static struct cdp_ipa_ops dp_ops_ipa = {
Yun Parkfde6b9e2017-06-26 17:13:11 -070010330 .ipa_get_resource = dp_ipa_get_resource,
10331 .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
10332 .ipa_op_response = dp_ipa_op_response,
10333 .ipa_register_op_cb = dp_ipa_register_op_cb,
10334 .ipa_get_stat = dp_ipa_get_stat,
10335 .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
10336 .ipa_enable_autonomy = dp_ipa_enable_autonomy,
10337 .ipa_disable_autonomy = dp_ipa_disable_autonomy,
10338 .ipa_setup = dp_ipa_setup,
10339 .ipa_cleanup = dp_ipa_cleanup,
10340 .ipa_setup_iface = dp_ipa_setup_iface,
10341 .ipa_cleanup_iface = dp_ipa_cleanup_iface,
10342 .ipa_enable_pipes = dp_ipa_enable_pipes,
10343 .ipa_disable_pipes = dp_ipa_disable_pipes,
jiad5a4530f2019-03-25 15:33:52 +080010344 .ipa_set_perf_level = dp_ipa_set_perf_level,
10345 .ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
Leo Chang5ea93a42016-11-03 12:39:49 -070010346};
Yun Parkfde6b9e2017-06-26 17:13:11 -070010347#endif
Leo Chang5ea93a42016-11-03 12:39:49 -070010348
Dustin Brown9ae22322019-01-25 09:51:47 -080010349static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
10350{
10351 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
10352 struct dp_soc *soc = pdev->soc;
10353 int timeout = SUSPEND_DRAIN_WAIT;
10354 int drain_wait_delay = 50; /* 50 ms */
10355
10356 /* Abort if there are any pending TX packets */
10357 while (dp_get_tx_pending(opaque_pdev) > 0) {
10358 qdf_sleep(drain_wait_delay);
10359 if (timeout <= 0) {
10360 dp_err("TX frames are pending, abort suspend");
10361 return QDF_STATUS_E_TIMEOUT;
10362 }
10363 timeout = timeout - drain_wait_delay;
10364 }
10365
10366 if (soc->intr_mode == DP_INTR_POLL)
10367 qdf_timer_stop(&soc->int_timer);
10368
10369 return QDF_STATUS_SUCCESS;
10370}
10371
10372static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
10373{
10374 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
10375 struct dp_soc *soc = pdev->soc;
10376
10377 if (soc->intr_mode == DP_INTR_POLL)
10378 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10379
10380 return QDF_STATUS_SUCCESS;
10381}
10382
Leo Chang5ea93a42016-11-03 12:39:49 -070010383static struct cdp_bus_ops dp_ops_bus = {
Dustin Brown4a3b96b2017-05-10 15:49:38 -070010384 .bus_suspend = dp_bus_suspend,
10385 .bus_resume = dp_bus_resume
Leo Chang5ea93a42016-11-03 12:39:49 -070010386};
10387
10388static struct cdp_ocb_ops dp_ops_ocb = {
10389 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
10390};
10391
10392
10393static struct cdp_throttle_ops dp_ops_throttle = {
10394 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
10395};
10396
10397static struct cdp_mob_stats_ops dp_ops_mob_stats = {
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -080010398 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
Leo Chang5ea93a42016-11-03 12:39:49 -070010399};
10400
10401static struct cdp_cfg_ops dp_ops_cfg = {
10402 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
10403};
10404
Mohit Khannaadfe9082017-11-17 13:11:17 -080010405/*
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +053010406 * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
Mohit Khannaadfe9082017-11-17 13:11:17 -080010407 * @dev: physical device instance
10408 * @peer_mac_addr: peer mac address
10409 * @local_id: local id for the peer
10410 * @debug_id: to track enum peer access
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +053010411 *
Mohit Khannaadfe9082017-11-17 13:11:17 -080010412 * Return: peer instance pointer
10413 */
10414static inline void *
Krunal Sonibe43d552018-10-03 11:20:20 -070010415dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
10416 uint8_t *local_id,
10417 enum peer_debug_id_type debug_id)
Mohit Khannaadfe9082017-11-17 13:11:17 -080010418{
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +053010419 struct dp_pdev *pdev = (struct dp_pdev *)dev;
10420 struct dp_peer *peer;
10421
10422 peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
10423
10424 if (!peer)
10425 return NULL;
10426
10427 *local_id = peer->local_id;
10428 DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
10429
10430 return peer;
10431}
10432
10433/*
10434 * dp_peer_release_ref - release peer ref count
10435 * @peer: peer handle
10436 * @debug_id: to track enum peer access
10437 *
10438 * Return: None
10439 */
10440static inline
10441void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
10442{
10443 dp_peer_unref_delete(peer);
Mohit Khannaadfe9082017-11-17 13:11:17 -080010444}
10445
Leo Chang5ea93a42016-11-03 12:39:49 -070010446static struct cdp_peer_ops dp_ops_peer = {
10447 .register_peer = dp_register_peer,
10448 .clear_peer = dp_clear_peer,
10449 .find_peer_by_addr = dp_find_peer_by_addr,
10450 .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
Sravan Kumar Kairam31ee37a2018-08-14 11:38:19 +053010451 .peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
10452 .peer_release_ref = dp_peer_release_ref,
Leo Chang5ea93a42016-11-03 12:39:49 -070010453 .local_peer_id = dp_local_peer_id,
10454 .peer_find_by_local_id = dp_peer_find_by_local_id,
10455 .peer_state_update = dp_peer_state_update,
10456 .get_vdevid = dp_get_vdevid,
Yun Parkfde6b9e2017-06-26 17:13:11 -070010457 .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
Leo Chang5ea93a42016-11-03 12:39:49 -070010458 .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
10459 .get_vdev_for_peer = dp_get_vdev_for_peer,
10460 .get_peer_state = dp_get_peer_state,
10461};
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +053010462#endif
Leo Chang5ea93a42016-11-03 12:39:49 -070010463
10464static struct cdp_ops dp_txrx_ops = {
10465 .cmn_drv_ops = &dp_ops_cmn,
10466 .ctrl_ops = &dp_ops_ctrl,
10467 .me_ops = &dp_ops_me,
10468 .mon_ops = &dp_ops_mon,
10469 .host_stats_ops = &dp_ops_host_stats,
10470 .wds_ops = &dp_ops_wds,
10471 .raw_ops = &dp_ops_raw,
10472#ifdef CONFIG_WIN
10473 .pflow_ops = &dp_ops_pflow,
10474#endif /* CONFIG_WIN */
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +053010475#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -070010476 .misc_ops = &dp_ops_misc,
10477 .cfg_ops = &dp_ops_cfg,
10478 .flowctl_ops = &dp_ops_flowctl,
10479 .l_flowctl_ops = &dp_ops_l_flowctl,
Yun Parkfde6b9e2017-06-26 17:13:11 -070010480#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -070010481 .ipa_ops = &dp_ops_ipa,
Yun Parkfde6b9e2017-06-26 17:13:11 -070010482#endif
Leo Chang5ea93a42016-11-03 12:39:49 -070010483 .bus_ops = &dp_ops_bus,
10484 .ocb_ops = &dp_ops_ocb,
10485 .peer_ops = &dp_ops_peer,
10486 .throttle_ops = &dp_ops_throttle,
10487 .mob_stats_ops = &dp_ops_mob_stats,
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +053010488#endif
Leo Chang5ea93a42016-11-03 12:39:49 -070010489};
10490
10491/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +053010492 * dp_soc_set_txrx_ring_map()
10493 * @dp_soc: DP handler for soc
10494 *
10495 * Return: Void
10496 */
10497static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
10498{
10499 uint32_t i;
10500 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
Aniruddha Paulc34164e2018-09-14 14:25:30 +053010501 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +053010502 }
10503}
10504
Basamma Yakkanahallib85768e2019-04-27 05:24:00 +053010505#if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010506
10507#ifndef QCA_MEM_ATTACH_ON_WIFI3
10508
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -070010509/**
Leo Chang5ea93a42016-11-03 12:39:49 -070010510 * dp_soc_attach_wifi3() - Attach txrx SOC
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010511 * @ctrl_psoc: Opaque SOC handle from control plane
10512 * @htc_handle: Opaque HTC handle
10513 * @hif_handle: Opaque HIF handle
10514 * @qdf_osdev: QDF device
10515 * @ol_ops: Offload Operations
10516 * @device_id: Device ID
Leo Chang5ea93a42016-11-03 12:39:49 -070010517 *
10518 * Return: DP SOC handle on success, NULL on failure
10519 */
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +053010520void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -070010521 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10522 struct ol_if_ops *ol_ops, uint16_t device_id)
Leo Chang5ea93a42016-11-03 12:39:49 -070010523{
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010524 struct dp_soc *dp_soc = NULL;
10525
10526 dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
10527 ol_ops, device_id);
10528 if (!dp_soc)
10529 return NULL;
10530
10531 if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
10532 return NULL;
10533
10534 return (void *)dp_soc;
10535}
10536#else
10537
10538/**
10539 * dp_soc_attach_wifi3() - Attach txrx SOC
10540 * @ctrl_psoc: Opaque SOC handle from control plane
10541 * @htc_handle: Opaque HTC handle
10542 * @hif_handle: Opaque HIF handle
10543 * @qdf_osdev: QDF device
10544 * @ol_ops: Offload Operations
10545 * @device_id: Device ID
10546 *
10547 * Return: DP SOC handle on success, NULL on failure
10548 */
10549void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
10550 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10551 struct ol_if_ops *ol_ops, uint16_t device_id)
10552{
10553 struct dp_soc *dp_soc = NULL;
10554
10555 dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
10556 ol_ops, device_id);
10557 return (void *)dp_soc;
10558}
10559
10560#endif
10561
10562/**
10563 * dp_soc_attach() - Attach txrx SOC
10564 * @ctrl_psoc: Opaque SOC handle from control plane
10565 * @htc_handle: Opaque HTC handle
10566 * @qdf_osdev: QDF device
10567 * @ol_ops: Offload Operations
10568 * @device_id: Device ID
10569 *
10570 * Return: DP SOC handle on success, NULL on failure
10571 */
10572static struct dp_soc *
10573dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10574 struct ol_if_ops *ol_ops, uint16_t device_id)
10575{
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -070010576 int int_ctx;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010577 struct dp_soc *soc = NULL;
10578 struct htt_soc *htt_soc = NULL;
10579
10580 soc = qdf_mem_malloc(sizeof(*soc));
Leo Chang5ea93a42016-11-03 12:39:49 -070010581
10582 if (!soc) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010583 dp_err("DP SOC memory allocation failed");
Leo Chang5ea93a42016-11-03 12:39:49 -070010584 goto fail0;
10585 }
10586
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -070010587 int_ctx = 0;
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -070010588 soc->device_id = device_id;
Leo Chang5ea93a42016-11-03 12:39:49 -070010589 soc->cdp_soc.ops = &dp_txrx_ops;
10590 soc->cdp_soc.ol_ops = ol_ops;
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +053010591 soc->ctrl_psoc = ctrl_psoc;
Leo Chang5ea93a42016-11-03 12:39:49 -070010592 soc->osdev = qdf_osdev;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +053010593 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
10594
Vivek126db5d2018-07-25 22:05:04 +053010595 soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
Leo Chang5ea93a42016-11-03 12:39:49 -070010596 if (!soc->wlan_cfg_ctx) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010597 dp_err("wlan_cfg_ctx failed\n");
10598 goto fail1;
Leo Chang5ea93a42016-11-03 12:39:49 -070010599 }
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010600 htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
10601 if (!htt_soc) {
10602 dp_err("HTT attach failed");
10603 goto fail1;
10604 }
10605 soc->htt_handle = htt_soc;
10606 htt_soc->dp_soc = soc;
10607 htt_soc->htc_soc = htc_handle;
10608
10609 if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
10610 goto fail2;
10611
10612 return (void *)soc;
10613fail2:
10614 qdf_mem_free(htt_soc);
10615fail1:
10616 qdf_mem_free(soc);
10617fail0:
10618 return NULL;
10619}
10620
10621/**
10622 * dp_soc_init() - Initialize txrx SOC
10623 * @dp_soc: Opaque DP SOC handle
10624 * @htc_handle: Opaque HTC handle
10625 * @hif_handle: Opaque HIF handle
10626 *
10627 * Return: DP SOC handle on success, NULL on failure
10628 */
10629void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
10630{
10631 int target_type;
10632 struct dp_soc *soc = (struct dp_soc *)dpsoc;
10633 struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
10634
10635 htt_soc->htc_soc = htc_handle;
10636 soc->hif_handle = hif_handle;
10637
10638 soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
10639 if (!soc->hal_soc)
10640 return NULL;
10641
10642 htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
10643 soc->hal_soc, soc->osdev);
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053010644 target_type = hal_get_target_type(soc->hal_soc);
10645 switch (target_type) {
10646 case TARGET_TYPE_QCA6290:
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +053010647 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10648 REO_DST_RING_SIZE_QCA6290);
10649 soc->ast_override_support = 1;
Chaithanya Garrepallidedc49b2019-02-20 23:21:20 +053010650 soc->da_war_enabled = false;
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +053010651 break;
Balamurugan Mahalingam96d2d412018-07-10 10:11:58 +053010652#ifdef QCA_WIFI_QCA6390
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053010653 case TARGET_TYPE_QCA6390:
10654 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10655 REO_DST_RING_SIZE_QCA6290);
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +053010656 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +053010657 soc->ast_override_support = 1;
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -070010658 if (con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
Krunal Soni07215e82018-11-30 14:57:10 -080010659 int int_ctx;
10660
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -070010661 for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
10662 soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
10663 soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
10664 }
10665 }
10666 soc->wlan_cfg_ctx->rxdma1_enable = 0;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053010667 break;
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +053010668#endif
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053010669 case TARGET_TYPE_QCA8074:
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +053010670 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10671 REO_DST_RING_SIZE_QCA8074);
10672 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
Chaithanya Garrepallidedc49b2019-02-20 23:21:20 +053010673 soc->hw_nac_monitor_support = 1;
10674 soc->da_war_enabled = true;
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +053010675 break;
Venkateswara Swamy Bandaru29757ad2018-08-07 13:06:55 +053010676 case TARGET_TYPE_QCA8074V2:
Basamma Yakkanahalli5f7cfd42018-11-02 15:52:37 +053010677 case TARGET_TYPE_QCA6018:
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053010678 wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10679 REO_DST_RING_SIZE_QCA8074);
Venkateswara Swamy Bandaru6ca41122018-08-03 16:07:06 +053010680 wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
sumedh baikady59a2d332018-05-22 01:50:38 -070010681 soc->hw_nac_monitor_support = 1;
Chaithanya Garrepalli2467ed12018-09-11 23:57:43 +053010682 soc->ast_override_support = 1;
sumedh baikady61cbe852018-10-09 11:04:34 -070010683 soc->per_tid_basize_max_tid = 8;
Ruchi, Agrawalfea1a842018-08-29 12:14:41 +053010684 soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
Chaithanya Garrepallidedc49b2019-02-20 23:21:20 +053010685 soc->da_war_enabled = false;
Balamurugan Mahalingam54d16a92018-06-25 17:08:08 +053010686 break;
10687 default:
10688 qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
10689 qdf_assert_always(0);
10690 break;
10691 }
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +053010692
Vivek126db5d2018-07-25 22:05:04 +053010693 wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010694 cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
Ruchi, Agrawal34721392017-11-13 18:02:09 +053010695 soc->cce_disable = false;
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +053010696
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +053010697 if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +053010698 int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +053010699 CDP_CFG_MAX_PEER_ID);
10700
10701 if (ret != -EINVAL) {
10702 wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
10703 }
Ruchi, Agrawal34721392017-11-13 18:02:09 +053010704
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +053010705 ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Ruchi, Agrawal34721392017-11-13 18:02:09 +053010706 CDP_CFG_CCE_DISABLE);
Ruchi, Agrawalf279a4a2018-02-26 18:12:44 +053010707 if (ret == 1)
Ruchi, Agrawal34721392017-11-13 18:02:09 +053010708 soc->cce_disable = true;
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +053010709 }
10710
Leo Chang5ea93a42016-11-03 12:39:49 -070010711 qdf_spinlock_create(&soc->peer_ref_mutex);
Pamidipati, Vijay8a4c03a2018-12-08 12:52:38 +053010712 qdf_spinlock_create(&soc->ast_lock);
Leo Chang5ea93a42016-11-03 12:39:49 -070010713
Karunakar Dasineni8bebb002017-02-09 22:15:23 -080010714 qdf_spinlock_create(&soc->reo_desc_freelist_lock);
10715 qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
10716
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +053010717 /* fill the tx/rx cpu ring map*/
10718 dp_soc_set_txrx_ring_map(soc);
Om Prakash Tripathi12126822017-08-03 10:21:24 +053010719
10720 qdf_spinlock_create(&soc->htt_stats.lock);
10721 /* initialize work queue for stats processing */
10722 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
10723
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010724 return soc;
Leo Chang5ea93a42016-11-03 12:39:49 -070010725
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070010726}
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010727
10728/**
10729 * dp_soc_init_wifi3() - Initialize txrx SOC
10730 * @dp_soc: Opaque DP SOC handle
10731 * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
10732 * @hif_handle: Opaque HIF handle
10733 * @htc_handle: Opaque HTC handle
10734 * @qdf_osdev: QDF device (Unused)
10735 * @ol_ops: Offload Operations (Unused)
10736 * @device_id: Device ID (Unused)
10737 *
10738 * Return: DP SOC handle on success, NULL on failure
10739 */
10740void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
10741 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10742 struct ol_if_ops *ol_ops, uint16_t device_id)
10743{
10744 return dp_soc_init(dpsoc, htc_handle, hif_handle);
10745}
10746
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -070010747#endif
Keyur Parekhfad6d082017-05-07 08:54:47 -070010748
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -080010749/*
10750 * dp_get_pdev_for_mac_id() - Return pdev for mac_id
10751 *
10752 * @soc: handle to DP soc
10753 * @mac_id: MAC id
10754 *
10755 * Return: Return pdev corresponding to MAC
10756 */
10757void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
10758{
10759 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
10760 return soc->pdev_list[mac_id];
10761
10762 /* Typically for MCL as there only 1 PDEV*/
10763 return soc->pdev_list[0];
10764}
10765
10766/*
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010767 * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
10768 * @soc: DP SoC context
10769 * @max_mac_rings: No of MAC rings
10770 *
10771 * Return: None
10772 */
10773static
10774void dp_is_hw_dbs_enable(struct dp_soc *soc,
10775 int *max_mac_rings)
10776{
10777 bool dbs_enable = false;
10778 if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
10779 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +053010780 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010781
10782 *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
10783}
10784
Keyur Parekhfad6d082017-05-07 08:54:47 -070010785/*
phadimana1f79822019-02-15 15:02:37 +053010786* dp_is_soc_reinit() - Check if soc reinit is true
10787* @soc: DP SoC context
10788*
10789* Return: true or false
10790*/
10791bool dp_is_soc_reinit(struct dp_soc *soc)
10792{
10793 return soc->dp_soc_reinit;
10794}
10795
10796/*
Keyur Parekhfad6d082017-05-07 08:54:47 -070010797* dp_set_pktlog_wifi3() - attach txrx vdev
10798* @pdev: Datapath PDEV handle
10799* @event: which event's notifications are being subscribed to
10800* @enable: WDI event subscribe or not. (True or False)
10801*
10802* Return: Success, NULL on failure
10803*/
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010804#ifdef WDI_EVENT_ENABLE
Keyur Parekhfad6d082017-05-07 08:54:47 -070010805int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
Mainak Sen2e43fb22019-02-21 14:03:24 +053010806 bool enable)
Keyur Parekhfad6d082017-05-07 08:54:47 -070010807{
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010808 struct dp_soc *soc = NULL;
Keyur Parekhfad6d082017-05-07 08:54:47 -070010809 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010810 int max_mac_rings = wlan_cfg_get_num_mac_rings
10811 (pdev->wlan_cfg_ctx);
10812 uint8_t mac_id = 0;
10813
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053010814 soc = pdev->soc;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010815 dp_is_hw_dbs_enable(soc, &max_mac_rings);
10816
10817 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +053010818 FL("Max_mac_rings %d "),
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010819 max_mac_rings);
Keyur Parekhfad6d082017-05-07 08:54:47 -070010820
10821 if (enable) {
10822 switch (event) {
10823 case WDI_EVENT_RX_DESC:
10824 if (pdev->monitor_vdev) {
10825 /* Nothing needs to be done if monitor mode is
10826 * enabled
10827 */
10828 return 0;
10829 }
10830 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
10831 pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
10832 htt_tlv_filter.mpdu_start = 1;
10833 htt_tlv_filter.msdu_start = 1;
10834 htt_tlv_filter.msdu_end = 1;
10835 htt_tlv_filter.mpdu_end = 1;
10836 htt_tlv_filter.packet_header = 1;
10837 htt_tlv_filter.attention = 1;
10838 htt_tlv_filter.ppdu_start = 1;
10839 htt_tlv_filter.ppdu_end = 1;
10840 htt_tlv_filter.ppdu_end_user_stats = 1;
10841 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10842 htt_tlv_filter.ppdu_end_status_done = 1;
10843 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -070010844 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
10845 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
10846 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
10847 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
10848 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
10849 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Kiran Venkatappa07921612019-03-02 23:14:12 +053010850 htt_tlv_filter.offset_valid = false;
Keyur Parekhfad6d082017-05-07 08:54:47 -070010851
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010852 for (mac_id = 0; mac_id < max_mac_rings;
10853 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010854 int mac_for_pdev =
10855 dp_get_mac_id_for_pdev(mac_id,
10856 pdev->pdev_id);
10857
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010858 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010859 mac_for_pdev,
10860 pdev->rxdma_mon_status_ring[mac_id]
10861 .hal_srng,
10862 RXDMA_MONITOR_STATUS,
10863 RX_BUFFER_SIZE,
10864 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010865
10866 }
10867
10868 if (soc->reap_timer_init)
10869 qdf_timer_mod(&soc->mon_reap_timer,
10870 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -070010871 }
10872 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010873
Keyur Parekhfad6d082017-05-07 08:54:47 -070010874 case WDI_EVENT_LITE_RX:
10875 if (pdev->monitor_vdev) {
10876 /* Nothing needs to be done if monitor mode is
10877 * enabled
10878 */
10879 return 0;
10880 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010881
Keyur Parekhfad6d082017-05-07 08:54:47 -070010882 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
10883 pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010884
Keyur Parekhfad6d082017-05-07 08:54:47 -070010885 htt_tlv_filter.ppdu_start = 1;
10886 htt_tlv_filter.ppdu_end = 1;
10887 htt_tlv_filter.ppdu_end_user_stats = 1;
10888 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10889 htt_tlv_filter.ppdu_end_status_done = 1;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010890 htt_tlv_filter.mpdu_start = 1;
Keyur Parekhfad6d082017-05-07 08:54:47 -070010891 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -070010892 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
10893 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
10894 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
10895 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
10896 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
10897 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Kiran Venkatappa07921612019-03-02 23:14:12 +053010898 htt_tlv_filter.offset_valid = false;
Keyur Parekhfad6d082017-05-07 08:54:47 -070010899
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010900 for (mac_id = 0; mac_id < max_mac_rings;
10901 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010902 int mac_for_pdev =
10903 dp_get_mac_id_for_pdev(mac_id,
10904 pdev->pdev_id);
10905
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010906 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010907 mac_for_pdev,
10908 pdev->rxdma_mon_status_ring[mac_id]
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010909 .hal_srng,
Keyur Parekhfad6d082017-05-07 08:54:47 -070010910 RXDMA_MONITOR_STATUS,
10911 RX_BUFFER_SIZE_PKTLOG_LITE,
10912 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010913 }
10914
10915 if (soc->reap_timer_init)
10916 qdf_timer_mod(&soc->mon_reap_timer,
10917 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -070010918 }
10919 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010920
Keyur Parekhdb0fa142017-07-13 19:40:22 -070010921 case WDI_EVENT_LITE_T2H:
10922 if (pdev->monitor_vdev) {
10923 /* Nothing needs to be done if monitor mode is
10924 * enabled
10925 */
10926 return 0;
10927 }
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -080010928
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010929 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010930 int mac_for_pdev = dp_get_mac_id_for_pdev(
10931 mac_id, pdev->pdev_id);
10932
Soumya Bhat0d6245c2018-02-08 21:02:57 +053010933 pdev->pktlog_ppdu_stats = true;
Venkata Sharath Chandra Manchala0ad4fda2018-03-01 13:45:46 -080010934 dp_h2t_cfg_stats_msg_send(pdev,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010935 DP_PPDU_TXLITE_STATS_BITMASK_CFG,
10936 mac_for_pdev);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010937 }
Keyur Parekhdb0fa142017-07-13 19:40:22 -070010938 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010939
Keyur Parekhfad6d082017-05-07 08:54:47 -070010940 default:
10941 /* Nothing needs to be done for other pktlog types */
10942 break;
10943 }
10944 } else {
10945 switch (event) {
10946 case WDI_EVENT_RX_DESC:
10947 case WDI_EVENT_LITE_RX:
10948 if (pdev->monitor_vdev) {
10949 /* Nothing needs to be done if monitor mode is
10950 * enabled
10951 */
10952 return 0;
10953 }
10954 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
10955 pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010956
10957 for (mac_id = 0; mac_id < max_mac_rings;
10958 mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010959 int mac_for_pdev =
10960 dp_get_mac_id_for_pdev(mac_id,
10961 pdev->pdev_id);
10962
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010963 htt_h2t_rx_ring_cfg(soc->htt_handle,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010964 mac_for_pdev,
10965 pdev->rxdma_mon_status_ring[mac_id]
10966 .hal_srng,
10967 RXDMA_MONITOR_STATUS,
10968 RX_BUFFER_SIZE,
10969 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010970 }
10971
10972 if (soc->reap_timer_init)
10973 qdf_timer_stop(&soc->mon_reap_timer);
Keyur Parekhfad6d082017-05-07 08:54:47 -070010974 }
10975 break;
Keyur Parekhdb0fa142017-07-13 19:40:22 -070010976 case WDI_EVENT_LITE_T2H:
10977 if (pdev->monitor_vdev) {
10978 /* Nothing needs to be done if monitor mode is
10979 * enabled
10980 */
10981 return 0;
10982 }
10983 /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
10984 * passing value 0. Once these macros will define in htt
10985 * header file will use proper macros
10986 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070010987 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010988 int mac_for_pdev =
10989 dp_get_mac_id_for_pdev(mac_id,
10990 pdev->pdev_id);
10991
Soumya Bhat0d6245c2018-02-08 21:02:57 +053010992 pdev->pktlog_ppdu_stats = false;
10993 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
10994 dp_h2t_cfg_stats_msg_send(pdev, 0,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010995 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +053010996 } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
10997 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080010998 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +053010999 } else if (pdev->enhanced_stats_en) {
11000 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -080011001 mac_for_pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +053011002 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070011003 }
11004
Keyur Parekhdb0fa142017-07-13 19:40:22 -070011005 break;
Keyur Parekhfad6d082017-05-07 08:54:47 -070011006 default:
11007 /* Nothing needs to be done for other pktlog types */
11008 break;
11009 }
11010 }
11011 return 0;
11012}
11013#endif
Varsha Mishraa331e6e2019-03-11 12:16:14 +053011014
11015/**
11016 * dp_bucket_index() - Return index from array
11017 *
11018 * @delay: delay measured
11019 * @array: array used to index corresponding delay
11020 *
11021 * Return: index
11022 */
11023static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
11024{
Varsha Mishra3e9d6472019-03-14 17:56:58 +053011025 uint8_t i = CDP_DELAY_BUCKET_0;
Varsha Mishraa331e6e2019-03-11 12:16:14 +053011026
11027 for (; i < CDP_DELAY_BUCKET_MAX; i++) {
Varsha Mishra3e9d6472019-03-14 17:56:58 +053011028 if (delay >= array[i] && delay <= array[i + 1])
Varsha Mishraa331e6e2019-03-11 12:16:14 +053011029 return i;
11030 }
11031
11032 return (CDP_DELAY_BUCKET_MAX - 1);
11033}
11034
11035/**
11036 * dp_fill_delay_buckets() - Fill delay statistics bucket for each
11037 * type of delay
11038 *
11039 * @pdev: pdev handle
11040 * @delay: delay in ms
11041 * @t: tid value
11042 * @mode: type of tx delay mode
11043 * Return: pointer to cdp_delay_stats structure
11044 */
11045static struct cdp_delay_stats *
11046dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
11047 uint8_t tid, uint8_t mode)
11048{
11049 uint8_t delay_index = 0;
11050 struct cdp_tid_tx_stats *tstats =
11051 &pdev->stats.tid_stats.tid_tx_stats[tid];
11052 struct cdp_tid_rx_stats *rstats =
11053 &pdev->stats.tid_stats.tid_rx_stats[tid];
11054 /*
11055 * cdp_fw_to_hw_delay_range
11056 * Fw to hw delay ranges in milliseconds
11057 */
11058 uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
Varsha Mishra3e9d6472019-03-14 17:56:58 +053011059 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
Varsha Mishraa331e6e2019-03-11 12:16:14 +053011060
11061 /*
11062 * cdp_sw_enq_delay_range
11063 * Software enqueue delay ranges in milliseconds
11064 */
11065 uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
Varsha Mishra3e9d6472019-03-14 17:56:58 +053011066 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
Varsha Mishraa331e6e2019-03-11 12:16:14 +053011067
11068 /*
11069 * cdp_intfrm_delay_range
11070 * Interframe delay ranges in milliseconds
11071 */
11072 uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
Varsha Mishra3e9d6472019-03-14 17:56:58 +053011073 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
Varsha Mishraa331e6e2019-03-11 12:16:14 +053011074
11075 /*
11076 * Update delay stats in proper bucket
11077 */
11078 switch (mode) {
11079 /* Software Enqueue delay ranges */
11080 case CDP_DELAY_STATS_SW_ENQ:
11081
11082 delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
11083 tstats->swq_delay.delay_bucket[delay_index]++;
11084 return &tstats->swq_delay;
11085
11086 /* Tx Completion delay ranges */
11087 case CDP_DELAY_STATS_FW_HW_TRANSMIT:
11088
11089 delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
11090 tstats->hwtx_delay.delay_bucket[delay_index]++;
11091 return &tstats->hwtx_delay;
11092
11093 /* Interframe tx delay ranges */
11094 case CDP_DELAY_STATS_TX_INTERFRAME:
11095
11096 delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11097 tstats->intfrm_delay.delay_bucket[delay_index]++;
11098 return &tstats->intfrm_delay;
11099
11100 /* Interframe rx delay ranges */
11101 case CDP_DELAY_STATS_RX_INTERFRAME:
11102
11103 delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11104 rstats->intfrm_delay.delay_bucket[delay_index]++;
11105 return &rstats->intfrm_delay;
11106
11107 /* Ring reap to indication to network stack */
11108 case CDP_DELAY_STATS_REAP_STACK:
11109
11110 delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11111 rstats->to_stack_delay.delay_bucket[delay_index]++;
Varsha Mishra3e9d6472019-03-14 17:56:58 +053011112 return &rstats->to_stack_delay;
Varsha Mishraa331e6e2019-03-11 12:16:14 +053011113 default:
11114 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
11115 "%s Incorrect delay mode: %d", __func__, mode);
11116 }
11117
11118 return NULL;
11119}
11120
11121/**
11122 * dp_update_delay_stats() - Update delay statistics in structure
11123 * and fill min, max and avg delay
11124 *
11125 * @pdev: pdev handle
11126 * @delay: delay in ms
11127 * @tid: tid value
11128 * @mode: type of tx delay mode
11129 * Return: none
11130 */
11131void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
11132 uint8_t tid, uint8_t mode)
11133{
11134 struct cdp_delay_stats *dstats = NULL;
11135
11136 /*
11137 * Delay ranges are different for different delay modes
11138 * Get the correct index to update delay bucket
11139 */
11140 dstats = dp_fill_delay_buckets(pdev, delay, tid, mode);
11141 if (qdf_unlikely(!dstats))
11142 return;
11143
11144 if (delay != 0) {
11145 /*
11146 * Compute minimum,average and maximum
11147 * delay
11148 */
11149 if (delay < dstats->min_delay)
11150 dstats->min_delay = delay;
11151
11152 if (delay > dstats->max_delay)
11153 dstats->max_delay = delay;
11154
11155 /*
11156 * Average over delay measured till now
11157 */
11158 if (!dstats->avg_delay)
11159 dstats->avg_delay = delay;
11160 else
11161 dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
11162 }
11163}