blob: dbe96a1080ee41a17774ce61f31b9aaca466b34e [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <qdf_types.h>
20#include <qdf_lock.h>
Dhanashri Atre14049172016-11-11 18:32:36 -080021#include <qdf_net_types.h>
Dhanashri Atre0da31222017-03-23 12:30:58 -070022#include <qdf_lro.h>
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +053023#include <qdf_module.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070024#include <hal_api.h>
25#include <hif.h>
26#include <htt.h>
27#include <wdi_event.h>
28#include <queue.h>
29#include "dp_htt.h"
30#include "dp_types.h"
31#include "dp_internal.h"
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +053032#include "dp_tx.h"
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070033#include "dp_tx_desc.h"
Leo Chang5ea93a42016-11-03 12:39:49 -070034#include "dp_rx.h"
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080035#include <cdp_txrx_handle.h>
Ravi Joshiaf9ace82017-02-17 12:41:48 -080036#include <wlan_cfg.h>
Ishank Jainbc2d91f2017-01-03 18:14:54 +053037#include "cdp_txrx_cmn_struct.h"
Prathyusha Guduri184b6402018-02-04 23:01:49 +053038#include "cdp_txrx_stats_struct.h"
Dhanashri Atre14049172016-11-11 18:32:36 -080039#include <qdf_util.h>
Ishank Jain1e7401c2017-02-17 15:38:39 +053040#include "dp_peer.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080041#include "dp_rx_mon.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053042#include "htt_stats.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070043#include "qdf_mem.h" /* qdf_mem_malloc,free */
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070044#ifdef QCA_LL_TX_FLOW_CONTROL_V2
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070045#include "cdp_txrx_flow_ctrl_v2.h"
Manjunathappa Prakash5f050a82017-07-18 22:00:05 -070046#else
47static inline void
48cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
49{
50 return;
51}
52#endif
Yun Parkfde6b9e2017-06-26 17:13:11 -070053#include "dp_ipa.h"
Ravi Joshiaf9ace82017-02-17 12:41:48 -080054
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070055#ifdef CONFIG_MCL
56static void dp_service_mon_rings(void *arg);
57#ifndef REMOVE_PKT_LOG
58#include <pktlog_ac_api.h>
59#include <pktlog_ac.h>
60static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn);
61#endif
62#endif
63static void dp_pktlogmod_exit(struct dp_pdev *handle);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +053064static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
65 uint8_t *peer_mac_addr);
66static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -070067
Karunakar Dasineni1d891ed2017-03-29 15:42:02 -070068#define DP_INTR_POLL_TIMER_MS 10
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +053069#define DP_WDS_AGING_TIMER_DEFAULT_MS 120000
Ishank Jainbc2d91f2017-01-03 18:14:54 +053070#define DP_MCS_LENGTH (6*MAX_MCS)
71#define DP_NSS_LENGTH (6*SS_COUNT)
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +053072#define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX)
73#define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX)
74#define DP_MAX_MCS_STRING_LEN 30
Ishank Jain6290a3c2017-03-21 10:49:39 +053075#define DP_CURR_FW_STATS_AVAIL 19
76#define DP_HTT_DBG_EXT_STATS_MAX 256
Ishank Jain949674c2017-02-27 17:09:29 +053077
Yun Parkfde6b9e2017-06-26 17:13:11 -070078#ifdef IPA_OFFLOAD
79/* Exclude IPA rings from the interrupt context */
Yun Park601d0d82017-08-28 21:49:31 -070080#define TX_RING_MASK_VAL 0xb
Yun Parkfde6b9e2017-06-26 17:13:11 -070081#define RX_RING_MASK_VAL 0x7
82#else
83#define TX_RING_MASK_VAL 0xF
84#define RX_RING_MASK_VAL 0xF
85#endif
Venkateswara Swamy Bandarued15e74a2017-08-18 19:13:10 +053086
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +053087bool rx_hash = 1;
88qdf_declare_param(rx_hash, bool);
89
sumedh baikady72b1c712017-08-24 12:11:46 -070090#define STR_MAXLEN 64
Soumya Bhat89647ef2017-11-16 17:23:48 +053091
Soumya Bhat0d6245c2018-02-08 21:02:57 +053092#define DP_PPDU_STATS_CFG_ALL 0xFFFF
93
94/* PPDU stats mask sent to FW to enable enhanced stats */
95#define DP_PPDU_STATS_CFG_ENH_STATS 0xE67
96/* PPDU stats mask sent to FW to support debug sniffer feature */
97#define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF
Ishank Jain949674c2017-02-27 17:09:29 +053098/**
99 * default_dscp_tid_map - Default DSCP-TID mapping
100 *
101 * DSCP TID AC
102 * 000000 0 WME_AC_BE
103 * 001000 1 WME_AC_BK
104 * 010000 1 WME_AC_BK
105 * 011000 0 WME_AC_BE
106 * 100000 5 WME_AC_VI
107 * 101000 5 WME_AC_VI
108 * 110000 6 WME_AC_VO
109 * 111000 6 WME_AC_VO
110 */
111static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
112 0, 0, 0, 0, 0, 0, 0, 0,
113 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1,
115 0, 0, 0, 0, 0, 0, 0, 0,
116 5, 5, 5, 5, 5, 5, 5, 5,
117 5, 5, 5, 5, 5, 5, 5, 5,
118 6, 6, 6, 6, 6, 6, 6, 6,
119 6, 6, 6, 6, 6, 6, 6, 6,
120};
121
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530122/*
123 * struct dp_rate_debug
124 *
125 * @mcs_type: print string for a given mcs
126 * @valid: valid mcs rate?
127 */
128struct dp_rate_debug {
129 char mcs_type[DP_MAX_MCS_STRING_LEN];
130 uint8_t valid;
131};
132
133#define MCS_VALID 1
134#define MCS_INVALID 0
135
136static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = {
Anish Nataraj072d8972018-01-09 18:23:33 +0530137
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530138 {
139 {"OFDM 48 Mbps", MCS_VALID},
140 {"OFDM 24 Mbps", MCS_VALID},
141 {"OFDM 12 Mbps", MCS_VALID},
142 {"OFDM 6 Mbps ", MCS_VALID},
143 {"OFDM 54 Mbps", MCS_VALID},
144 {"OFDM 36 Mbps", MCS_VALID},
145 {"OFDM 18 Mbps", MCS_VALID},
146 {"OFDM 9 Mbps ", MCS_VALID},
147 {"INVALID ", MCS_INVALID},
148 {"INVALID ", MCS_INVALID},
149 {"INVALID ", MCS_INVALID},
150 {"INVALID ", MCS_INVALID},
151 {"INVALID ", MCS_VALID},
152 },
153 {
Anish Nataraj072d8972018-01-09 18:23:33 +0530154 {"CCK 11 Mbps Long ", MCS_VALID},
155 {"CCK 5.5 Mbps Long ", MCS_VALID},
156 {"CCK 2 Mbps Long ", MCS_VALID},
157 {"CCK 1 Mbps Long ", MCS_VALID},
158 {"CCK 11 Mbps Short ", MCS_VALID},
159 {"CCK 5.5 Mbps Short", MCS_VALID},
160 {"CCK 2 Mbps Short ", MCS_VALID},
161 {"INVALID ", MCS_INVALID},
162 {"INVALID ", MCS_INVALID},
163 {"INVALID ", MCS_INVALID},
164 {"INVALID ", MCS_INVALID},
165 {"INVALID ", MCS_INVALID},
166 {"INVALID ", MCS_VALID},
167 },
168 {
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530169 {"HT MCS 0 (BPSK 1/2) ", MCS_VALID},
170 {"HT MCS 1 (QPSK 1/2) ", MCS_VALID},
171 {"HT MCS 2 (QPSK 3/4) ", MCS_VALID},
172 {"HT MCS 3 (16-QAM 1/2)", MCS_VALID},
173 {"HT MCS 4 (16-QAM 3/4)", MCS_VALID},
174 {"HT MCS 5 (64-QAM 2/3)", MCS_VALID},
175 {"HT MCS 6 (64-QAM 3/4)", MCS_VALID},
176 {"HT MCS 7 (64-QAM 5/6)", MCS_VALID},
177 {"INVALID ", MCS_INVALID},
178 {"INVALID ", MCS_INVALID},
179 {"INVALID ", MCS_INVALID},
180 {"INVALID ", MCS_INVALID},
181 {"INVALID ", MCS_VALID},
182 },
183 {
184 {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID},
185 {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID},
186 {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID},
187 {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID},
188 {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID},
189 {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID},
190 {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID},
191 {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID},
192 {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID},
193 {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID},
194 {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530195 {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530196 {"INVALID ", MCS_VALID},
197 },
198 {
199 {"HE MCS 0 (BPSK 1/2) ", MCS_VALID},
200 {"HE MCS 1 (QPSK 1/2) ", MCS_VALID},
201 {"HE MCS 2 (QPSK 3/4) ", MCS_VALID},
202 {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID},
203 {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID},
204 {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID},
205 {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID},
206 {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID},
207 {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID},
208 {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID},
209 {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID},
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530210 {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID},
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530211 {"INVALID ", MCS_VALID},
212 }
213};
214
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700215/**
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530216 * @brief Cpu ring map types
217 */
218enum dp_cpu_ring_map_types {
219 DP_DEFAULT_MAP,
220 DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
221 DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
222 DP_NSS_ALL_RADIO_OFFLOADED_MAP,
223 DP_CPU_RING_MAP_MAX
224};
225
226/**
227 * @brief Cpu to tx ring map
228 */
229static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = {
230 {0x0, 0x1, 0x2, 0x0},
231 {0x1, 0x2, 0x1, 0x2},
232 {0x0, 0x2, 0x0, 0x2},
233 {0x2, 0x2, 0x2, 0x2}
234};
235
236/**
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800237 * @brief Select the type of statistics
238 */
239enum dp_stats_type {
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530240 STATS_FW = 0,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800241 STATS_HOST = 1,
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +0530242 STATS_TYPE_MAX = 2,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800243};
244
245/**
246 * @brief General Firmware statistics options
247 *
248 */
249enum dp_fw_stats {
250 TXRX_FW_STATS_INVALID = -1,
251};
252
253/**
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +0530254 * dp_stats_mapping_table - Firmware and Host statistics
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800255 * currently supported
256 */
257const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
Ishank Jain6290a3c2017-03-21 10:49:39 +0530258 {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
259 {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
260 {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
261 {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
262 {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
263 {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
264 {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
265 {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
266 {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
267 {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
268 {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800269 {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530270 {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
271 {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
272 {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
273 {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
274 {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
275 {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
276 {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
277 /* Last ENUM for HTT FW STATS */
278 {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800279 {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
Ishank Jain6290a3c2017-03-21 10:49:39 +0530280 {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
281 {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
282 {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800283 {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530284 {TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
sumedh baikady72b1c712017-08-24 12:11:46 -0700285 {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -0800286};
287
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530288static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
289 struct cdp_peer *peer_hdl,
290 uint8_t *mac_addr,
291 enum cdp_txrx_ast_entry_type type,
292 uint32_t flags)
293{
294
295 return dp_peer_add_ast((struct dp_soc *)soc_hdl,
296 (struct dp_peer *)peer_hdl,
297 mac_addr,
298 type,
299 flags);
300}
301
302static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl,
303 void *ast_entry_hdl)
304{
305 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
306 qdf_spin_lock_bh(&soc->ast_lock);
307 dp_peer_del_ast((struct dp_soc *)soc_hdl,
308 (struct dp_ast_entry *)ast_entry_hdl);
309 qdf_spin_unlock_bh(&soc->ast_lock);
310}
311
312static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
313 struct cdp_peer *peer_hdl,
314 void *ast_entry_hdl,
315 uint32_t flags)
316{
317 int status;
318 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
319 qdf_spin_lock_bh(&soc->ast_lock);
320 status = dp_peer_update_ast(soc,
321 (struct dp_peer *)peer_hdl,
322 (struct dp_ast_entry *)ast_entry_hdl,
323 flags);
324 qdf_spin_unlock_bh(&soc->ast_lock);
325 return status;
326}
327
328static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl,
329 uint8_t *ast_mac_addr)
330{
331 struct dp_ast_entry *ast_entry;
332 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
333 qdf_spin_lock_bh(&soc->ast_lock);
334 ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr);
335 qdf_spin_unlock_bh(&soc->ast_lock);
336 return (void *)ast_entry;
337}
338
339static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl,
340 void *ast_entry_hdl)
341{
342 return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl,
343 (struct dp_ast_entry *)ast_entry_hdl);
344}
345
346static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl,
347 void *ast_entry_hdl)
348{
349 return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl,
350 (struct dp_ast_entry *)ast_entry_hdl);
351}
352
353static void dp_peer_ast_set_type_wifi3(
354 struct cdp_soc_t *soc_hdl,
355 void *ast_entry_hdl,
356 enum cdp_txrx_ast_entry_type type)
357{
358 dp_peer_ast_set_type((struct dp_soc *)soc_hdl,
359 (struct dp_ast_entry *)ast_entry_hdl,
360 type);
361}
362
363
364
Houston Hoffman648a9182017-05-21 23:27:50 -0700365/**
366 * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
367 * @ring_num: ring num of the ring being queried
368 * @grp_mask: the grp_mask array for the ring type in question.
369 *
370 * The grp_mask array is indexed by group number and the bit fields correspond
371 * to ring numbers. We are finding which interrupt group a ring belongs to.
372 *
373 * Return: the index in the grp_mask array with the ring number.
374 * -QDF_STATUS_E_NOENT if no entry is found
375 */
376static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
377{
378 int ext_group_num;
379 int mask = 1 << ring_num;
380
381 for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
382 ext_group_num++) {
383 if (mask & grp_mask[ext_group_num])
384 return ext_group_num;
385 }
386
387 return -QDF_STATUS_E_NOENT;
388}
389
390static int dp_srng_calculate_msi_group(struct dp_soc *soc,
391 enum hal_ring_type ring_type,
392 int ring_num)
393{
394 int *grp_mask;
395
396 switch (ring_type) {
397 case WBM2SW_RELEASE:
398 /* dp_tx_comp_handler - soc->tx_comp_ring */
399 if (ring_num < 3)
400 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
401
402 /* dp_rx_wbm_err_process - soc->rx_rel_ring */
403 else if (ring_num == 3) {
404 /* sw treats this as a separate ring type */
405 grp_mask = &soc->wlan_cfg_ctx->
406 int_rx_wbm_rel_ring_mask[0];
407 ring_num = 0;
408 } else {
409 qdf_assert(0);
410 return -QDF_STATUS_E_NOENT;
411 }
412 break;
413
414 case REO_EXCEPTION:
415 /* dp_rx_err_process - &soc->reo_exception_ring */
416 grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
417 break;
418
419 case REO_DST:
420 /* dp_rx_process - soc->reo_dest_ring */
421 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
422 break;
423
424 case REO_STATUS:
425 /* dp_reo_status_ring_handler - soc->reo_status_ring */
426 grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
427 break;
428
429 /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
430 case RXDMA_MONITOR_STATUS:
431 /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
432 case RXDMA_MONITOR_DST:
433 /* dp_mon_process */
434 grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
435 break;
Karunakar Dasineniea027c52017-09-20 16:27:46 -0700436 case RXDMA_DST:
437 /* dp_rxdma_err_process */
438 grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
439 break;
Houston Hoffman648a9182017-05-21 23:27:50 -0700440
Houston Hoffman648a9182017-05-21 23:27:50 -0700441 case RXDMA_BUF:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700442 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
443 break;
444
445 case RXDMA_MONITOR_BUF:
Houston Hoffman648a9182017-05-21 23:27:50 -0700446 /* TODO: support low_thresh interrupt */
447 return -QDF_STATUS_E_NOENT;
448 break;
449
450 case TCL_DATA:
451 case TCL_CMD:
452 case REO_CMD:
453 case SW2WBM_RELEASE:
454 case WBM_IDLE_LINK:
455 /* normally empty SW_TO_HW rings */
456 return -QDF_STATUS_E_NOENT;
457 break;
458
459 case TCL_STATUS:
460 case REO_REINJECT:
Houston Hoffman648a9182017-05-21 23:27:50 -0700461 /* misc unused rings */
462 return -QDF_STATUS_E_NOENT;
463 break;
464
465 case CE_SRC:
466 case CE_DST:
467 case CE_DST_STATUS:
468 /* CE_rings - currently handled by hif */
469 default:
470 return -QDF_STATUS_E_NOENT;
471 break;
472 }
473
474 return dp_srng_find_ring_in_mask(ring_num, grp_mask);
475}
476
477static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
478 *ring_params, int ring_type, int ring_num)
479{
480 int msi_group_number;
481 int msi_data_count;
482 int ret;
483 uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
484
485 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
486 &msi_data_count, &msi_data_start,
487 &msi_irq_start);
488
489 if (ret)
490 return;
491
492 msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
493 ring_num);
494 if (msi_group_number < 0) {
Houston Hoffman41b912c2017-08-30 14:27:51 -0700495 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Houston Hoffman648a9182017-05-21 23:27:50 -0700496 FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
497 ring_type, ring_num);
498 ring_params->msi_addr = 0;
499 ring_params->msi_data = 0;
500 return;
501 }
502
503 if (msi_group_number > msi_data_count) {
504 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
505 FL("2 msi_groups will share an msi; msi_group_num %d"),
506 msi_group_number);
507
508 QDF_ASSERT(0);
509 }
510
511 pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
512
513 ring_params->msi_addr = addr_low;
514 ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
515 ring_params->msi_data = (msi_group_number % msi_data_count)
516 + msi_data_start;
517 ring_params->flags |= HAL_SRNG_MSI_INTR;
518}
519
520/**
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530521 * dp_print_ast_stats() - Dump AST table contents
522 * @soc: Datapath soc handle
523 *
524 * return void
525 */
526#ifdef FEATURE_WDS
527static void dp_print_ast_stats(struct dp_soc *soc)
528{
529 uint8_t i;
530 uint8_t num_entries = 0;
531 struct dp_vdev *vdev;
532 struct dp_pdev *pdev;
533 struct dp_peer *peer;
534 struct dp_ast_entry *ase, *tmp_ase;
535
536 DP_PRINT_STATS("AST Stats:");
537 DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
538 DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
539 DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
540 DP_PRINT_STATS("AST Table:");
541 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
542 pdev = soc->pdev_list[i];
543 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
544 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
545 DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
546 DP_PRINT_STATS("%6d mac_addr = %pM"
547 " peer_mac_addr = %pM"
548 " type = %d"
549 " next_hop = %d"
550 " is_active = %d"
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530551 " is_bss = %d"
552 " ast_idx = %d"
553 " pdev_id = %d"
554 " vdev_id = %d",
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530555 ++num_entries,
556 ase->mac_addr.raw,
557 ase->peer->mac_addr.raw,
558 ase->type,
559 ase->next_hop,
560 ase->is_active,
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530561 ase->is_bss,
562 ase->ast_idx,
563 ase->pdev_id,
564 ase->vdev_id);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +0530565 }
566 }
567 }
568 }
569}
570#else
571static void dp_print_ast_stats(struct dp_soc *soc)
572{
573 DP_PRINT_STATS("AST Stats not available.Enable FEATURE_WDS");
574 return;
575}
576#endif
577
578/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700579 * dp_setup_srng - Internal function to setup SRNG rings used by data path
580 */
581static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800582 int ring_type, int ring_num, int mac_id, uint32_t num_entries)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700583{
584 void *hal_soc = soc->hal_soc;
585 uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
586 /* TODO: See if we should get align size from hal */
587 uint32_t ring_base_align = 8;
588 struct hal_srng_params ring_params;
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800589 uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700590
Houston Hoffman648a9182017-05-21 23:27:50 -0700591 /* TODO: Currently hal layer takes care of endianness related settings.
592 * See if these settings need to passed from DP layer
593 */
594 ring_params.flags = 0;
Houston Hoffman41b912c2017-08-30 14:27:51 -0700595 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Yun Parkfde6b9e2017-06-26 17:13:11 -0700596 FL("Ring type: %d, num:%d"), ring_type, ring_num);
Houston Hoffman648a9182017-05-21 23:27:50 -0700597
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -0800598 num_entries = (num_entries > max_entries) ? max_entries : num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700599 srng->hal_srng = NULL;
600 srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700601 srng->num_entries = num_entries;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700602 srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
Dhanashri Atre57e420d2016-10-25 21:13:54 -0700603 soc->osdev, soc->osdev->dev, srng->alloc_size,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700604 &(srng->base_paddr_unaligned));
605
606 if (!srng->base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530607 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
608 FL("alloc failed - ring_type: %d, ring_num %d"),
609 ring_type, ring_num);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700610 return QDF_STATUS_E_NOMEM;
611 }
612
613 ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
614 ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
615 ring_params.ring_base_paddr = srng->base_paddr_unaligned +
616 ((unsigned long)(ring_params.ring_base_vaddr) -
617 (unsigned long)srng->base_vaddr_unaligned);
618 ring_params.num_entries = num_entries;
619
psimhac983d7e2017-07-26 15:20:07 -0700620 if (soc->intr_mode == DP_INTR_MSI) {
621 dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
622 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
623 FL("Using MSI for ring_type: %d, ring_num %d"),
624 ring_type, ring_num);
625
626 } else {
627 ring_params.msi_data = 0;
628 ring_params.msi_addr = 0;
629 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
630 FL("Skipping MSI for ring_type: %d, ring_num %d"),
631 ring_type, ring_num);
632 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700633
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +0530634 /*
635 * Setup interrupt timer and batch counter thresholds for
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700636 * interrupt mitigation based on ring type
637 */
Pamidipati, Vijay45b1df22017-06-21 03:20:25 +0530638 if (ring_type == REO_DST) {
639 ring_params.intr_timer_thres_us =
640 wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
641 ring_params.intr_batch_cntr_thres_entries =
642 wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
643 } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
644 ring_params.intr_timer_thres_us =
645 wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
646 ring_params.intr_batch_cntr_thres_entries =
647 wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
648 } else {
649 ring_params.intr_timer_thres_us =
650 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
651 ring_params.intr_batch_cntr_thres_entries =
652 wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
653 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700654
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700655 /* Enable low threshold interrupts for rx buffer rings (regular and
656 * monitor buffer rings.
657 * TODO: See if this is required for any other ring
658 */
Karunakar Dasineni37995ac2018-02-06 12:37:30 -0800659 if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
660 (ring_type == RXDMA_MONITOR_STATUS)) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700661 /* TODO: Setting low threshold to 1/8th of ring size
662 * see if this needs to be configurable
663 */
664 ring_params.low_threshold = num_entries >> 3;
665 ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
Houston Hoffman648a9182017-05-21 23:27:50 -0700666 ring_params.intr_timer_thres_us = 0x1000;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700667 }
668
669 srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800670 mac_id, &ring_params);
Manoj Ekbote376116e2017-12-19 10:44:41 -0800671
672 if (!srng->hal_srng) {
673 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
674 srng->alloc_size,
675 srng->base_vaddr_unaligned,
676 srng->base_paddr_unaligned, 0);
677 }
678
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700679 return 0;
680}
681
682/**
683 * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
684 * Any buffers allocated and attached to ring entries are expected to be freed
685 * before calling this function.
686 */
687static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
688 int ring_type, int ring_num)
689{
690 if (!srng->hal_srng) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530691 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
692 FL("Ring type: %d, num:%d not setup"),
693 ring_type, ring_num);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700694 return;
695 }
696
697 hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
698
Dhanashri Atre57e420d2016-10-25 21:13:54 -0700699 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700700 srng->alloc_size,
701 srng->base_vaddr_unaligned,
702 srng->base_paddr_unaligned, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -0700703 srng->hal_srng = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700704}
705
706/* TODO: Need this interface from HIF */
707void *hif_get_hal_handle(void *hif_handle);
708
709/*
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530710 * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
711 * @dp_ctx: DP SOC handle
712 * @budget: Number of frames/descriptors that can be processed in one shot
713 *
714 * Return: remaining budget/quota for the soc device
715 */
Jeff Johnsonf1352572017-01-10 14:24:10 -0800716static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530717{
718 struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
719 struct dp_soc *soc = int_ctx->soc;
720 int ring = 0;
721 uint32_t work_done = 0;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530722 int budget = dp_budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530723 uint8_t tx_mask = int_ctx->tx_ring_mask;
724 uint8_t rx_mask = int_ctx->rx_ring_mask;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530725 uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
726 uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800727 uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530728 uint32_t remaining_quota = dp_budget;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700729 struct dp_pdev *pdev = NULL;
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530730
731 /* Process Tx completion interrupts first to return back buffers */
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530732 while (tx_mask) {
733 if (tx_mask & 0x1) {
Houston Hoffmanae850c62017-08-11 16:47:50 -0700734 work_done = dp_tx_comp_handler(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530735 soc->tx_comp_ring[ring].hal_srng,
736 remaining_quota);
737
Houston Hoffmanae850c62017-08-11 16:47:50 -0700738 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
739 "tx mask 0x%x ring %d, budget %d, work_done %d",
740 tx_mask, ring, budget, work_done);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530741
742 budget -= work_done;
743 if (budget <= 0)
744 goto budget_done;
745
746 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530747 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530748 tx_mask = tx_mask >> 1;
749 ring++;
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530750 }
751
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530752
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530753 /* Process REO Exception ring interrupt */
754 if (rx_err_mask) {
755 work_done = dp_rx_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530756 soc->reo_exception_ring.hal_srng,
757 remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530758
Houston Hoffmanae850c62017-08-11 16:47:50 -0700759 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
760 "REO Exception Ring: work_done %d budget %d",
761 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530762
763 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530764 if (budget <= 0) {
765 goto budget_done;
766 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530767 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530768 }
769
770 /* Process Rx WBM release ring interrupt */
771 if (rx_wbm_rel_mask) {
772 work_done = dp_rx_wbm_err_process(soc,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530773 soc->rx_rel_ring.hal_srng, remaining_quota);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530774
Houston Hoffmanae850c62017-08-11 16:47:50 -0700775 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
776 "WBM Release Ring: work_done %d budget %d",
777 work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530778
779 budget -= work_done;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530780 if (budget <= 0) {
781 goto budget_done;
782 }
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530783 remaining_quota = budget;
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +0530784 }
785
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530786 /* Process Rx interrupts */
787 if (rx_mask) {
788 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
789 if (rx_mask & (1 << ring)) {
Houston Hoffmanae850c62017-08-11 16:47:50 -0700790 work_done = dp_rx_process(int_ctx,
Leo Chang5ea93a42016-11-03 12:39:49 -0700791 soc->reo_dest_ring[ring].hal_srng,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530792 remaining_quota);
793
Houston Hoffmanae850c62017-08-11 16:47:50 -0700794 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
795 "rx mask 0x%x ring %d, work_done %d budget %d",
796 rx_mask, ring, work_done, budget);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530797
798 budget -= work_done;
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530799 if (budget <= 0)
800 goto budget_done;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530801 remaining_quota = budget;
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530802 }
803 }
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -0800804 for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) {
805 /* Need to check on this, why is required */
806 work_done = dp_rxdma_err_process(soc, ring,
807 remaining_quota);
808 budget -= work_done;
809 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530810 }
811
Karunakar Dasinenia0f09ea2016-11-21 17:41:31 -0800812 if (reo_status_mask)
813 dp_reo_status_ring_handler(soc);
814
Karunakar Dasineni10185472017-06-19 16:32:06 -0700815 /* Process LMAC interrupts */
Kai Chen6eca1a62017-01-12 10:17:53 -0800816 for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700817 pdev = soc->pdev_list[ring];
818 if (pdev == NULL)
Karunakar Dasineni10185472017-06-19 16:32:06 -0700819 continue;
Kai Chen6eca1a62017-01-12 10:17:53 -0800820 if (int_ctx->rx_mon_ring_mask & (1 << ring)) {
Houston Hoffmanae850c62017-08-11 16:47:50 -0700821 work_done = dp_mon_process(soc, ring, remaining_quota);
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530822 budget -= work_done;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700823 if (budget <= 0)
824 goto budget_done;
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530825 remaining_quota = budget;
Kai Chen6eca1a62017-01-12 10:17:53 -0800826 }
Pramod Simhae382ff82017-06-05 18:09:26 -0700827
828 if (int_ctx->rxdma2host_ring_mask & (1 << ring)) {
Houston Hoffmanae850c62017-08-11 16:47:50 -0700829 work_done = dp_rxdma_err_process(soc, ring,
Pamidipati, Vijay9e340252017-08-14 16:24:17 +0530830 remaining_quota);
Pramod Simhae382ff82017-06-05 18:09:26 -0700831 budget -= work_done;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700832 if (budget <= 0)
833 goto budget_done;
834 remaining_quota = budget;
835 }
836
837 if (int_ctx->host2rxdma_ring_mask & (1 << ring)) {
838 union dp_rx_desc_list_elem_t *desc_list = NULL;
839 union dp_rx_desc_list_elem_t *tail = NULL;
840 struct dp_srng *rx_refill_buf_ring =
841 &pdev->rx_refill_buf_ring;
842
843 DP_STATS_INC(pdev, replenish.low_thresh_intrs, 1);
844 dp_rx_buffers_replenish(soc, ring,
845 rx_refill_buf_ring,
846 &soc->rx_desc_buf[ring], 0,
847 &desc_list, &tail, HAL_RX_BUF_RBM_SW3_BM);
Pramod Simhae382ff82017-06-05 18:09:26 -0700848 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800849 }
850
Dhanashri Atre0da31222017-03-23 12:30:58 -0700851 qdf_lro_flush(int_ctx->lro_ctx);
852
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530853budget_done:
854 return dp_budget - budget;
855}
856
psimhac983d7e2017-07-26 15:20:07 -0700857#ifdef DP_INTR_POLL_BASED
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530858/* dp_interrupt_timer()- timer poll for interrupts
859 *
860 * @arg: SoC Handle
861 *
862 * Return:
863 *
864 */
Jeff Johnsonf1352572017-01-10 14:24:10 -0800865static void dp_interrupt_timer(void *arg)
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530866{
867 struct dp_soc *soc = (struct dp_soc *) arg;
868 int i;
869
Ravi Joshi86e98262017-03-01 13:47:03 -0800870 if (qdf_atomic_read(&soc->cmn_init_done)) {
871 for (i = 0;
872 i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
873 dp_service_srngs(&soc->intr_ctx[i], 0xffff);
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530874
Ravi Joshi86e98262017-03-01 13:47:03 -0800875 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
876 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530877}
878
879/*
psimhac983d7e2017-07-26 15:20:07 -0700880 * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530881 * @txrx_soc: DP SOC handle
882 *
883 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
884 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
885 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
886 *
887 * Return: 0 for success. nonzero for failure.
888 */
psimhac983d7e2017-07-26 15:20:07 -0700889static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530890{
891 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
892 int i;
893
psimhac983d7e2017-07-26 15:20:07 -0700894 soc->intr_mode = DP_INTR_POLL;
895
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530896 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Houston Hoffman648a9182017-05-21 23:27:50 -0700897 soc->intr_ctx[i].dp_intr_id = i;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -0700898 soc->intr_ctx[i].tx_ring_mask =
899 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
900 soc->intr_ctx[i].rx_ring_mask =
901 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
902 soc->intr_ctx[i].rx_mon_ring_mask =
903 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
904 soc->intr_ctx[i].rx_err_ring_mask =
905 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
906 soc->intr_ctx[i].rx_wbm_rel_ring_mask =
907 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
908 soc->intr_ctx[i].reo_status_ring_mask =
909 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
910 soc->intr_ctx[i].rxdma2host_ring_mask =
911 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530912 soc->intr_ctx[i].soc = soc;
Dhanashri Atre0da31222017-03-23 12:30:58 -0700913 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530914 }
915
916 qdf_timer_init(soc->osdev, &soc->int_timer,
917 dp_interrupt_timer, (void *)soc,
918 QDF_TIMER_TYPE_WAKE_APPS);
Vijay Pamidipatib775e132016-10-19 21:19:52 +0530919
920 return QDF_STATUS_SUCCESS;
921}
D Harilakshmi5da9ee72017-10-04 16:14:12 +0530922
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -0700923#if defined(CONFIG_MCL)
D Harilakshmi5da9ee72017-10-04 16:14:12 +0530924extern int con_mode_monitor;
925static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
D Harilakshmi5da9ee72017-10-04 16:14:12 +0530926/*
927 * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
928 * @txrx_soc: DP SOC handle
929 *
930 * Call the appropriate attach function based on the mode of operation.
931 * This is a WAR for enabling monitor mode.
932 *
933 * Return: 0 for success. nonzero for failure.
934 */
935static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
936{
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -0700937 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
938
939 if (!(soc->wlan_cfg_ctx->napi_enabled) ||
940 con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) {
Mohit Khanna9a6fdd52017-12-12 10:55:48 +0800941 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
942 "%s: Poll mode", __func__);
D Harilakshmi5da9ee72017-10-04 16:14:12 +0530943 return dp_soc_interrupt_attach_poll(txrx_soc);
944 } else {
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -0700945
Mohit Khanna9a6fdd52017-12-12 10:55:48 +0800946 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
947 "%s: Interrupt mode", __func__);
D Harilakshmi5da9ee72017-10-04 16:14:12 +0530948 return dp_soc_interrupt_attach(txrx_soc);
949 }
950}
951#else
952static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
953{
954 return dp_soc_interrupt_attach_poll(txrx_soc);
955}
956#endif
psimhac983d7e2017-07-26 15:20:07 -0700957#endif
Houston Hoffman648a9182017-05-21 23:27:50 -0700958
959static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
960 int intr_ctx_num, int *irq_id_map, int *num_irq_r)
961{
962 int j;
963 int num_irq = 0;
964
965 int tx_mask =
966 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
967 int rx_mask =
968 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
969 int rx_mon_mask =
970 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
971 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
972 soc->wlan_cfg_ctx, intr_ctx_num);
973 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
974 soc->wlan_cfg_ctx, intr_ctx_num);
975 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
976 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -0700977 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
978 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700979 int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
980 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -0700981
982 for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
983
984 if (tx_mask & (1 << j)) {
985 irq_id_map[num_irq++] =
986 (wbm2host_tx_completions_ring1 - j);
987 }
988
989 if (rx_mask & (1 << j)) {
990 irq_id_map[num_irq++] =
991 (reo2host_destination_ring1 - j);
992 }
993
Karunakar Dasineniea027c52017-09-20 16:27:46 -0700994 if (rxdma2host_ring_mask & (1 << j)) {
995 irq_id_map[num_irq++] =
996 rxdma2host_destination_ring_mac1 -
997 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
998 }
999
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001000 if (host2rxdma_ring_mask & (1 << j)) {
1001 irq_id_map[num_irq++] =
1002 host2rxdma_host_buf_ring_mac1 -
1003 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1004 }
1005
Houston Hoffman648a9182017-05-21 23:27:50 -07001006 if (rx_mon_mask & (1 << j)) {
1007 irq_id_map[num_irq++] =
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001008 ppdu_end_interrupts_mac1 -
1009 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001010 irq_id_map[num_irq++] =
1011 rxdma2host_monitor_status_ring_mac1 -
1012 wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
Houston Hoffman648a9182017-05-21 23:27:50 -07001013 }
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001014
Houston Hoffman648a9182017-05-21 23:27:50 -07001015 if (rx_wbm_rel_ring_mask & (1 << j))
1016 irq_id_map[num_irq++] = wbm2host_rx_release;
1017
1018 if (rx_err_ring_mask & (1 << j))
1019 irq_id_map[num_irq++] = reo2host_exception;
1020
1021 if (reo_status_ring_mask & (1 << j))
1022 irq_id_map[num_irq++] = reo2host_status;
1023
1024 }
1025 *num_irq_r = num_irq;
1026}
1027
1028static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1029 int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1030 int msi_vector_count, int msi_vector_start)
1031{
1032 int tx_mask = wlan_cfg_get_tx_ring_mask(
1033 soc->wlan_cfg_ctx, intr_ctx_num);
1034 int rx_mask = wlan_cfg_get_rx_ring_mask(
1035 soc->wlan_cfg_ctx, intr_ctx_num);
1036 int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1037 soc->wlan_cfg_ctx, intr_ctx_num);
1038 int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1039 soc->wlan_cfg_ctx, intr_ctx_num);
1040 int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1041 soc->wlan_cfg_ctx, intr_ctx_num);
1042 int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1043 soc->wlan_cfg_ctx, intr_ctx_num);
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001044 int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1045 soc->wlan_cfg_ctx, intr_ctx_num);
Houston Hoffman648a9182017-05-21 23:27:50 -07001046
1047 unsigned int vector =
1048 (intr_ctx_num % msi_vector_count) + msi_vector_start;
1049 int num_irq = 0;
1050
psimhac983d7e2017-07-26 15:20:07 -07001051 soc->intr_mode = DP_INTR_MSI;
1052
Houston Hoffman648a9182017-05-21 23:27:50 -07001053 if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001054 rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
Houston Hoffman648a9182017-05-21 23:27:50 -07001055 irq_id_map[num_irq++] =
1056 pld_get_msi_irq(soc->osdev->dev, vector);
1057
1058 *num_irq_r = num_irq;
1059}
1060
1061static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1062 int *irq_id_map, int *num_irq)
1063{
1064 int msi_vector_count, ret;
1065 uint32_t msi_base_data, msi_vector_start;
1066
1067 ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1068 &msi_vector_count,
1069 &msi_base_data,
1070 &msi_vector_start);
1071 if (ret)
1072 return dp_soc_interrupt_map_calculate_integrated(soc,
1073 intr_ctx_num, irq_id_map, num_irq);
1074
1075 else
1076 dp_soc_interrupt_map_calculate_msi(soc,
1077 intr_ctx_num, irq_id_map, num_irq,
1078 msi_vector_count, msi_vector_start);
1079}
1080
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301081/*
1082 * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1083 * @txrx_soc: DP SOC handle
1084 *
1085 * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1086 * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1087 * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1088 *
1089 * Return: 0 for success. nonzero for failure.
1090 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001091static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301092{
1093 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1094
1095 int i = 0;
1096 int num_irq = 0;
1097
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301098 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
Leo Chang5ea93a42016-11-03 12:39:49 -07001099 int ret = 0;
1100
1101 /* Map of IRQ ids registered with one interrupt context */
1102 int irq_id_map[HIF_MAX_GRP_IRQ];
1103
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301104 int tx_mask =
1105 wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1106 int rx_mask =
1107 wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1108 int rx_mon_mask =
1109 wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
Nandha Kishore Easwaran82ac62e2017-06-20 17:55:07 +05301110 int rx_err_ring_mask =
1111 wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1112 int rx_wbm_rel_ring_mask =
1113 wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1114 int reo_status_ring_mask =
1115 wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni10185472017-06-19 16:32:06 -07001116 int rxdma2host_ring_mask =
1117 wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001118 int host2rxdma_ring_mask =
1119 wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1120
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301121
Houston Hoffman648a9182017-05-21 23:27:50 -07001122 soc->intr_ctx[i].dp_intr_id = i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301123 soc->intr_ctx[i].tx_ring_mask = tx_mask;
1124 soc->intr_ctx[i].rx_ring_mask = rx_mask;
1125 soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301126 soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
Karunakar Dasineni10185472017-06-19 16:32:06 -07001127 soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001128 soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
Pamidipati, Vijay3d8e1e82017-05-29 14:29:31 +05301129 soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1130 soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1131
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301132 soc->intr_ctx[i].soc = soc;
1133
1134 num_irq = 0;
1135
Houston Hoffman648a9182017-05-21 23:27:50 -07001136 dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1137 &num_irq);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301138
Houston Hoffmandef86a32017-04-21 20:23:45 -07001139 ret = hif_register_ext_group(soc->hif_handle,
1140 num_irq, irq_id_map, dp_service_srngs,
1141 &soc->intr_ctx[i], "dp_intr",
chenguof2548862017-11-08 16:33:25 +08001142 HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301143
1144 if (ret) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301145 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1146 FL("failed, ret = %d"), ret);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301147
1148 return QDF_STATUS_E_FAILURE;
1149 }
Dhanashri Atre0da31222017-03-23 12:30:58 -07001150 soc->intr_ctx[i].lro_ctx = qdf_lro_init();
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301151 }
1152
Venkateswara Swamy Bandaru19dc8b22017-03-13 15:09:24 +05301153 hif_configure_ext_group_interrupts(soc->hif_handle);
1154
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301155 return QDF_STATUS_SUCCESS;
1156}
1157
1158/*
1159 * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1160 * @txrx_soc: DP SOC handle
1161 *
1162 * Return: void
1163 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001164static void dp_soc_interrupt_detach(void *txrx_soc)
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301165{
1166 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Leo Chang5ea93a42016-11-03 12:39:49 -07001167 int i;
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301168
psimhac983d7e2017-07-26 15:20:07 -07001169 if (soc->intr_mode == DP_INTR_POLL) {
1170 qdf_timer_stop(&soc->int_timer);
1171 qdf_timer_free(&soc->int_timer);
psimhaa079b8c2017-08-02 17:27:14 -07001172 } else {
1173 hif_deregister_exec_group(soc->hif_handle, "dp_intr");
psimhac983d7e2017-07-26 15:20:07 -07001174 }
1175
Leo Chang5ea93a42016-11-03 12:39:49 -07001176 for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1177 soc->intr_ctx[i].tx_ring_mask = 0;
1178 soc->intr_ctx[i].rx_ring_mask = 0;
1179 soc->intr_ctx[i].rx_mon_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001180 soc->intr_ctx[i].rx_err_ring_mask = 0;
1181 soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1182 soc->intr_ctx[i].reo_status_ring_mask = 0;
Karunakar Dasineniea027c52017-09-20 16:27:46 -07001183 soc->intr_ctx[i].rxdma2host_ring_mask = 0;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001184 soc->intr_ctx[i].host2rxdma_ring_mask = 0;
Houston Hoffman648a9182017-05-21 23:27:50 -07001185
Dhanashri Atre0da31222017-03-23 12:30:58 -07001186 qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
Leo Chang5ea93a42016-11-03 12:39:49 -07001187 }
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301188}
Vijay Pamidipatib775e132016-10-19 21:19:52 +05301189
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001190#define AVG_MAX_MPDUS_PER_TID 128
1191#define AVG_TIDS_PER_CLIENT 2
1192#define AVG_FLOWS_PER_TID 2
1193#define AVG_MSDUS_PER_FLOW 128
1194#define AVG_MSDUS_PER_MPDU 4
1195
1196/*
1197 * Allocate and setup link descriptor pool that will be used by HW for
1198 * various link and queue descriptors and managed by WBM
1199 */
1200static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1201{
1202 int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1203 int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1204 uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1205 uint32_t num_mpdus_per_link_desc =
1206 hal_num_mpdus_per_link_desc(soc->hal_soc);
1207 uint32_t num_msdus_per_link_desc =
1208 hal_num_msdus_per_link_desc(soc->hal_soc);
1209 uint32_t num_mpdu_links_per_queue_desc =
1210 hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1211 uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1212 uint32_t total_link_descs, total_mem_size;
1213 uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1214 uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1215 uint32_t num_link_desc_banks;
1216 uint32_t last_bank_size = 0;
1217 uint32_t entry_size, num_entries;
1218 int i;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001219 uint32_t desc_id = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001220
1221 /* Only Tx queue descriptors are allocated from common link descriptor
1222 * pool Rx queue descriptors are not included in this because (REO queue
1223 * extension descriptors) they are expected to be allocated contiguously
1224 * with REO queue descriptors
1225 */
1226 num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1227 AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1228
1229 num_mpdu_queue_descs = num_mpdu_link_descs /
1230 num_mpdu_links_per_queue_desc;
1231
1232 num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1233 AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
1234 num_msdus_per_link_desc;
1235
1236 num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1237 AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
1238
1239 num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
1240 num_tx_msdu_link_descs + num_rx_msdu_link_descs;
1241
1242 /* Round up to power of 2 */
1243 total_link_descs = 1;
1244 while (total_link_descs < num_entries)
1245 total_link_descs <<= 1;
1246
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301247 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1248 FL("total_link_descs: %u, link_desc_size: %d"),
1249 total_link_descs, link_desc_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001250 total_mem_size = total_link_descs * link_desc_size;
1251
1252 total_mem_size += link_desc_align;
1253
1254 if (total_mem_size <= max_alloc_size) {
1255 num_link_desc_banks = 0;
1256 last_bank_size = total_mem_size;
1257 } else {
1258 num_link_desc_banks = (total_mem_size) /
1259 (max_alloc_size - link_desc_align);
1260 last_bank_size = total_mem_size %
1261 (max_alloc_size - link_desc_align);
1262 }
1263
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301264 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
1265 FL("total_mem_size: %d, num_link_desc_banks: %u"),
1266 total_mem_size, num_link_desc_banks);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001267
1268 for (i = 0; i < num_link_desc_banks; i++) {
1269 soc->link_desc_banks[i].base_vaddr_unaligned =
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001270 qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001271 max_alloc_size,
1272 &(soc->link_desc_banks[i].base_paddr_unaligned));
1273 soc->link_desc_banks[i].size = max_alloc_size;
1274
1275 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
1276 soc->link_desc_banks[i].base_vaddr_unaligned) +
1277 ((unsigned long)(
1278 soc->link_desc_banks[i].base_vaddr_unaligned) %
1279 link_desc_align));
1280
1281 soc->link_desc_banks[i].base_paddr = (unsigned long)(
1282 soc->link_desc_banks[i].base_paddr_unaligned) +
1283 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1284 (unsigned long)(
1285 soc->link_desc_banks[i].base_vaddr_unaligned));
1286
1287 if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301288 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1289 FL("Link descriptor memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001290 goto fail;
1291 }
1292 }
1293
1294 if (last_bank_size) {
1295 /* Allocate last bank in case total memory required is not exact
1296 * multiple of max_alloc_size
1297 */
1298 soc->link_desc_banks[i].base_vaddr_unaligned =
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001299 qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001300 last_bank_size,
1301 &(soc->link_desc_banks[i].base_paddr_unaligned));
1302 soc->link_desc_banks[i].size = last_bank_size;
1303
1304 soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
1305 (soc->link_desc_banks[i].base_vaddr_unaligned) +
1306 ((unsigned long)(
1307 soc->link_desc_banks[i].base_vaddr_unaligned) %
1308 link_desc_align));
1309
1310 soc->link_desc_banks[i].base_paddr =
1311 (unsigned long)(
1312 soc->link_desc_banks[i].base_paddr_unaligned) +
1313 ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
1314 (unsigned long)(
1315 soc->link_desc_banks[i].base_vaddr_unaligned));
1316 }
1317
1318
1319 /* Allocate and setup link descriptor idle list for HW internal use */
1320 entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
1321 total_mem_size = entry_size * total_link_descs;
1322
1323 if (total_mem_size <= max_alloc_size) {
1324 void *desc;
1325
1326 if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
1327 WBM_IDLE_LINK, 0, 0, total_link_descs)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301328 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1329 FL("Link desc idle ring setup failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001330 goto fail;
1331 }
1332
1333 hal_srng_access_start_unlocked(soc->hal_soc,
1334 soc->wbm_idle_link_ring.hal_srng);
1335
1336 for (i = 0; i < MAX_LINK_DESC_BANKS &&
1337 soc->link_desc_banks[i].base_paddr; i++) {
1338 uint32_t num_entries = (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001339 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001340 soc->link_desc_banks[i].base_vaddr) -
1341 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001342 soc->link_desc_banks[i].base_vaddr_unaligned)))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001343 / link_desc_size;
1344 unsigned long paddr = (unsigned long)(
1345 soc->link_desc_banks[i].base_paddr);
1346
1347 while (num_entries && (desc = hal_srng_src_get_next(
1348 soc->hal_soc,
1349 soc->wbm_idle_link_ring.hal_srng))) {
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001350 hal_set_link_desc_addr(desc,
1351 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001352 num_entries--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001353 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001354 paddr += link_desc_size;
1355 }
1356 }
1357 hal_srng_access_end_unlocked(soc->hal_soc,
1358 soc->wbm_idle_link_ring.hal_srng);
1359 } else {
1360 uint32_t num_scatter_bufs;
1361 uint32_t num_entries_per_buf;
1362 uint32_t rem_entries;
1363 uint8_t *scatter_buf_ptr;
1364 uint16_t scatter_buf_num;
1365
1366 soc->wbm_idle_scatter_buf_size =
1367 hal_idle_list_scatter_buf_size(soc->hal_soc);
1368 num_entries_per_buf = hal_idle_scatter_buf_num_entries(
1369 soc->hal_soc, soc->wbm_idle_scatter_buf_size);
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001370 num_scatter_bufs = hal_idle_list_num_scatter_bufs(
1371 soc->hal_soc, total_mem_size,
1372 soc->wbm_idle_scatter_buf_size);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001373
1374 for (i = 0; i < num_scatter_bufs; i++) {
1375 soc->wbm_idle_scatter_buf_base_vaddr[i] =
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001376 qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001377 soc->wbm_idle_scatter_buf_size,
1378 &(soc->wbm_idle_scatter_buf_base_paddr[i]));
1379 if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301380 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001381 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301382 FL("Scatter list memory alloc failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001383 goto fail;
1384 }
1385 }
1386
1387 /* Populate idle list scatter buffers with link descriptor
1388 * pointers
1389 */
1390 scatter_buf_num = 0;
1391 scatter_buf_ptr = (uint8_t *)(
1392 soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
1393 rem_entries = num_entries_per_buf;
1394
1395 for (i = 0; i < MAX_LINK_DESC_BANKS &&
1396 soc->link_desc_banks[i].base_paddr; i++) {
1397 uint32_t num_link_descs =
1398 (soc->link_desc_banks[i].size -
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001399 ((unsigned long)(
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001400 soc->link_desc_banks[i].base_vaddr) -
1401 (unsigned long)(
Karunakar Dasinenic45b01e2017-06-07 11:38:01 -07001402 soc->link_desc_banks[i].base_vaddr_unaligned)))
1403 / link_desc_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001404 unsigned long paddr = (unsigned long)(
1405 soc->link_desc_banks[i].base_paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001406
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001407 while (num_link_descs) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001408 hal_set_link_desc_addr((void *)scatter_buf_ptr,
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001409 LINK_DESC_COOKIE(desc_id, i), paddr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001410 num_link_descs--;
Karunakar Dasinenidbaf4be2017-07-19 18:12:43 -07001411 desc_id++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001412 paddr += link_desc_size;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001413 rem_entries--;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001414 if (rem_entries) {
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001415 scatter_buf_ptr += entry_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001416 } else {
1417 rem_entries = num_entries_per_buf;
1418 scatter_buf_num++;
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001419
1420 if (scatter_buf_num >= num_scatter_bufs)
1421 break;
1422
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001423 scatter_buf_ptr = (uint8_t *)(
1424 soc->wbm_idle_scatter_buf_base_vaddr[
1425 scatter_buf_num]);
1426 }
1427 }
1428 }
1429 /* Setup link descriptor idle list in HW */
1430 hal_setup_link_idle_list(soc->hal_soc,
1431 soc->wbm_idle_scatter_buf_base_paddr,
1432 soc->wbm_idle_scatter_buf_base_vaddr,
1433 num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
Leo Chang5ea93a42016-11-03 12:39:49 -07001434 (uint32_t)(scatter_buf_ptr -
Pramod Simhaccb15fb2017-06-19 12:21:13 -07001435 (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
1436 scatter_buf_num-1])), total_link_descs);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001437 }
1438 return 0;
1439
1440fail:
1441 if (soc->wbm_idle_link_ring.hal_srng) {
1442 dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
1443 WBM_IDLE_LINK, 0);
1444 }
1445
1446 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1447 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001448 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001449 soc->wbm_idle_scatter_buf_size,
1450 soc->wbm_idle_scatter_buf_base_vaddr[i],
1451 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001452 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001453 }
1454 }
1455
1456 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1457 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001458 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001459 soc->link_desc_banks[i].size,
1460 soc->link_desc_banks[i].base_vaddr_unaligned,
1461 soc->link_desc_banks[i].base_paddr_unaligned,
1462 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001463 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001464 }
1465 }
1466 return QDF_STATUS_E_FAILURE;
1467}
1468
1469/*
1470 * Free link descriptor pool that was setup HW
1471 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08001472static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001473{
1474 int i;
1475
1476 if (soc->wbm_idle_link_ring.hal_srng) {
Manoj Ekbote525bcab2017-09-01 17:23:32 -07001477 dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001478 WBM_IDLE_LINK, 0);
1479 }
1480
1481 for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
1482 if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001483 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001484 soc->wbm_idle_scatter_buf_size,
1485 soc->wbm_idle_scatter_buf_base_vaddr[i],
1486 soc->wbm_idle_scatter_buf_base_paddr[i], 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001487 soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001488 }
1489 }
1490
1491 for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
1492 if (soc->link_desc_banks[i].base_vaddr_unaligned) {
Dhanashri Atre57e420d2016-10-25 21:13:54 -07001493 qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001494 soc->link_desc_banks[i].size,
1495 soc->link_desc_banks[i].base_vaddr_unaligned,
1496 soc->link_desc_banks[i].base_paddr_unaligned,
1497 0);
Karunakar Dasinenib71ad042018-01-22 16:50:20 -08001498 soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001499 }
1500 }
1501}
1502
1503/* TODO: Following should be configurable */
1504#define WBM_RELEASE_RING_SIZE 64
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001505#define TCL_CMD_RING_SIZE 32
1506#define TCL_STATUS_RING_SIZE 32
Ajit Pal Singhd27d7462017-07-07 17:17:43 +05301507#if defined(QCA_WIFI_QCA6290)
1508#define REO_DST_RING_SIZE 1024
1509#else
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001510#define REO_DST_RING_SIZE 2048
Ajit Pal Singhd27d7462017-07-07 17:17:43 +05301511#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001512#define REO_REINJECT_RING_SIZE 32
Karunakar Dasinenid0ea21f2017-01-31 22:58:15 -08001513#define RX_RELEASE_RING_SIZE 1024
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001514#define REO_EXCEPTION_RING_SIZE 128
1515#define REO_CMD_RING_SIZE 32
1516#define REO_STATUS_RING_SIZE 32
Dhanashri Atred4032ab2017-01-17 15:05:41 -08001517#define RXDMA_BUF_RING_SIZE 1024
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001518#define RXDMA_REFILL_RING_SIZE 4096
Kai Chenad516ae2017-09-08 18:35:47 -07001519#define RXDMA_MONITOR_BUF_RING_SIZE 4096
1520#define RXDMA_MONITOR_DST_RING_SIZE 2048
Kai Chen6eca1a62017-01-12 10:17:53 -08001521#define RXDMA_MONITOR_STATUS_RING_SIZE 1024
Karunakar Dasineni37995ac2018-02-06 12:37:30 -08001522#define RXDMA_MONITOR_DESC_RING_SIZE 4096
Pramod Simhae382ff82017-06-05 18:09:26 -07001523#define RXDMA_ERR_DST_RING_SIZE 1024
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001524
1525/*
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301526 * dp_wds_aging_timer_fn() - Timer callback function for WDS aging
1527 * @soc: Datapath SOC handle
1528 *
1529 * This is a timer function used to age out stale WDS nodes from
1530 * AST table
1531 */
1532#ifdef FEATURE_WDS
1533static void dp_wds_aging_timer_fn(void *soc_hdl)
1534{
1535 struct dp_soc *soc = (struct dp_soc *) soc_hdl;
1536 struct dp_pdev *pdev;
1537 struct dp_vdev *vdev;
1538 struct dp_peer *peer;
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05301539 struct dp_ast_entry *ase, *temp_ase;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301540 int i;
1541
1542 qdf_spin_lock_bh(&soc->ast_lock);
1543
1544 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1545 pdev = soc->pdev_list[i];
1546 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1547 DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05301548 DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301549 /*
1550 * Do not expire static ast entries
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301551 * and HM WDS entries
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301552 */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05301553 if (ase->type ==
1554 CDP_TXRX_AST_TYPE_STATIC ||
1555 ase->type ==
1556 CDP_TXRX_AST_TYPE_WDS_HM)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301557 continue;
1558
1559 if (ase->is_active) {
1560 ase->is_active = FALSE;
1561 continue;
1562 }
1563
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05301564 DP_STATS_INC(soc, ast.aged_out, 1);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301565 dp_peer_del_ast(soc, ase);
1566 }
1567 }
1568 }
1569
1570 }
1571
1572 qdf_spin_unlock_bh(&soc->ast_lock);
1573
1574 if (qdf_atomic_read(&soc->cmn_init_done))
1575 qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1576}
1577
1578/*
1579 * dp_soc_wds_attach() - Setup WDS timer and AST table
1580 * @soc: Datapath SOC handle
1581 *
1582 * Return: None
1583 */
1584static void dp_soc_wds_attach(struct dp_soc *soc)
1585{
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301586 qdf_timer_init(soc->osdev, &soc->wds_aging_timer,
1587 dp_wds_aging_timer_fn, (void *)soc,
1588 QDF_TIMER_TYPE_WAKE_APPS);
1589
1590 qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS);
1591}
1592
1593/*
1594 * dp_soc_wds_detach() - Detach WDS data structures and timers
1595 * @txrx_soc: DP SOC handle
1596 *
1597 * Return: None
1598 */
1599static void dp_soc_wds_detach(struct dp_soc *soc)
1600{
1601 qdf_timer_stop(&soc->wds_aging_timer);
1602 qdf_timer_free(&soc->wds_aging_timer);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05301603}
1604#else
1605static void dp_soc_wds_attach(struct dp_soc *soc)
1606{
1607}
1608
1609static void dp_soc_wds_detach(struct dp_soc *soc)
1610{
1611}
1612#endif
1613
1614/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05301615 * dp_soc_reset_ring_map() - Reset cpu ring map
1616 * @soc: Datapath soc handler
1617 *
1618 * This api resets the default cpu ring map
1619 */
1620
1621static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
1622{
1623 uint8_t i;
1624 int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1625
1626 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
1627 if (nss_config == 1) {
1628 /*
1629 * Setting Tx ring map for one nss offloaded radio
1630 */
1631 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
1632 } else if (nss_config == 2) {
1633 /*
1634 * Setting Tx ring for two nss offloaded radios
1635 */
1636 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
1637 } else {
1638 /*
1639 * Setting Tx ring map for all nss offloaded radios
1640 */
1641 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i];
1642 }
1643 }
1644}
1645
Aniruddha Paule3a03342017-09-19 16:42:10 +05301646/*
1647 * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
1648 * @dp_soc - DP soc handle
1649 * @ring_type - ring type
1650 * @ring_num - ring_num
1651 *
1652 * return 0 or 1
1653 */
1654static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
1655{
1656 uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1657 uint8_t status = 0;
1658
1659 switch (ring_type) {
1660 case WBM2SW_RELEASE:
1661 case REO_DST:
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001662 case RXDMA_BUF:
Aniruddha Paule3a03342017-09-19 16:42:10 +05301663 status = ((nss_config) & (1 << ring_num));
1664 break;
1665 default:
1666 break;
1667 }
1668
1669 return status;
1670}
1671
1672/*
1673 * dp_soc_reset_intr_mask() - reset interrupt mask
1674 * @dp_soc - DP Soc handle
1675 *
1676 * Return: Return void
1677 */
1678static void dp_soc_reset_intr_mask(struct dp_soc *soc)
1679{
1680 uint8_t j;
1681 int *grp_mask = NULL;
1682 int group_number, mask, num_ring;
1683
1684 /* number of tx ring */
1685 num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
1686
1687 /*
1688 * group mask for tx completion ring.
1689 */
1690 grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1691
1692 /* loop and reset the mask for only offloaded ring */
1693 for (j = 0; j < num_ring; j++) {
1694 if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
1695 continue;
1696 }
1697
1698 /*
1699 * Group number corresponding to tx offloaded ring.
1700 */
1701 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1702 if (group_number < 0) {
1703 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001704 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05301705 WBM2SW_RELEASE, j);
1706 return;
1707 }
1708
1709 /* reset the tx mask for offloaded ring */
1710 mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
1711 mask &= (~(1 << j));
1712
1713 /*
1714 * reset the interrupt mask for offloaded ring.
1715 */
1716 wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1717 }
1718
1719 /* number of rx rings */
1720 num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
1721
1722 /*
1723 * group mask for reo destination ring.
1724 */
1725 grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1726
1727 /* loop and reset the mask for only offloaded ring */
1728 for (j = 0; j < num_ring; j++) {
1729 if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
1730 continue;
1731 }
1732
1733 /*
1734 * Group number corresponding to rx offloaded ring.
1735 */
1736 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1737 if (group_number < 0) {
1738 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001739 FL("ring not part of any group; ring_type: %d,ring_num %d"),
Aniruddha Paule3a03342017-09-19 16:42:10 +05301740 REO_DST, j);
1741 return;
1742 }
1743
1744 /* set the interrupt mask for offloaded ring */
1745 mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
1746 mask &= (~(1 << j));
1747
1748 /*
1749 * set the interrupt mask to zero for rx offloaded radio.
1750 */
1751 wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
1752 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07001753
1754 /*
1755 * group mask for Rx buffer refill ring
1756 */
1757 grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1758
1759 /* loop and reset the mask for only offloaded ring */
1760 for (j = 0; j < MAX_PDEV_CNT; j++) {
1761 if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
1762 continue;
1763 }
1764
1765 /*
1766 * Group number corresponding to rx offloaded ring.
1767 */
1768 group_number = dp_srng_find_ring_in_mask(j, grp_mask);
1769 if (group_number < 0) {
1770 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1771 FL("ring not part of any group; ring_type: %d,ring_num %d"),
1772 REO_DST, j);
1773 return;
1774 }
1775
1776 /* set the interrupt mask for offloaded ring */
1777 mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1778 group_number);
1779 mask &= (~(1 << j));
1780
1781 /*
1782 * set the interrupt mask to zero for rx offloaded radio.
1783 */
1784 wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
1785 group_number, mask);
1786 }
Aniruddha Paule3a03342017-09-19 16:42:10 +05301787}
1788
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05301789#ifdef IPA_OFFLOAD
1790/**
1791 * dp_reo_remap_config() - configure reo remap register value based
1792 * nss configuration.
1793 * based on offload_radio value below remap configuration
1794 * get applied.
1795 * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
1796 * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
1797 * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
1798 * 3 - both Radios handled by NSS (remap not required)
1799 * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
1800 *
1801 * @remap1: output parameter indicates reo remap 1 register value
1802 * @remap2: output parameter indicates reo remap 2 register value
1803 * Return: bool type, true if remap is configured else false.
1804 */
1805static bool dp_reo_remap_config(struct dp_soc *soc,
1806 uint32_t *remap1,
1807 uint32_t *remap2)
1808{
1809
1810 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) |
1811 (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8;
1812
1813 *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) |
1814 (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8;
1815
1816 return true;
1817}
1818#else
1819static bool dp_reo_remap_config(struct dp_soc *soc,
1820 uint32_t *remap1,
1821 uint32_t *remap2)
1822{
1823 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1824
1825 switch (offload_radio) {
1826 case 0:
1827 *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1828 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1829 (0x3 << 18) | (0x4 << 21)) << 8;
1830
1831 *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) |
1832 (0x4 << 9) | (0x1 << 12) | (0x2 << 15) |
1833 (0x3 << 18) | (0x4 << 21)) << 8;
1834 break;
1835
1836 case 1:
1837 *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) |
1838 (0x2 << 9) | (0x3 << 12) | (0x4 << 15) |
1839 (0x2 << 18) | (0x3 << 21)) << 8;
1840
1841 *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) |
1842 (0x4 << 9) | (0x2 << 12) | (0x3 << 15) |
1843 (0x4 << 18) | (0x2 << 21)) << 8;
1844 break;
1845
1846 case 2:
1847 *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) |
1848 (0x1 << 9) | (0x3 << 12) | (0x4 << 15) |
1849 (0x1 << 18) | (0x3 << 21)) << 8;
1850
1851 *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) |
1852 (0x4 << 9) | (0x1 << 12) | (0x3 << 15) |
1853 (0x4 << 18) | (0x1 << 21)) << 8;
1854 break;
1855
1856 case 3:
1857 /* return false if both radios are offloaded to NSS */
1858 return false;
1859 }
1860 return true;
1861}
1862#endif
1863
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05301864/*
Aniruddha Paul91dfd502018-01-08 11:24:34 +05301865 * dp_reo_frag_dst_set() - configure reo register to set the
1866 * fragment destination ring
1867 * @soc : Datapath soc
1868 * @frag_dst_ring : output parameter to set fragment destination ring
1869 *
1870 * Based on offload_radio below fragment destination rings is selected
1871 * 0 - TCL
1872 * 1 - SW1
1873 * 2 - SW2
1874 * 3 - SW3
1875 * 4 - SW4
1876 * 5 - Release
1877 * 6 - FW
1878 * 7 - alternate select
1879 *
1880 * return: void
1881 */
1882static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
1883{
1884 uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
1885
1886 switch (offload_radio) {
1887 case 0:
1888 *frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
1889 break;
1890 case 3:
1891 *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
1892 break;
1893 default:
1894 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1895 FL("dp_reo_frag_dst_set invalid offload radio config"));
1896 break;
1897 }
1898}
1899
1900/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001901 * dp_soc_cmn_setup() - Common SoC level initializion
1902 * @soc: Datapath SOC handle
1903 *
1904 * This is an internal function used to setup common SOC data structures,
1905 * to be called from PDEV attach after receiving HW mode capabilities from FW
1906 */
1907static int dp_soc_cmn_setup(struct dp_soc *soc)
1908{
1909 int i;
Dhanashri Atre14049172016-11-11 18:32:36 -08001910 struct hal_reo_params reo_params;
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05301911 int tx_ring_size;
1912 int tx_comp_ring_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001913
Ravi Joshi86e98262017-03-01 13:47:03 -08001914 if (qdf_atomic_read(&soc->cmn_init_done))
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001915 return 0;
1916
1917 if (dp_peer_find_attach(soc))
1918 goto fail0;
1919
1920 if (dp_hw_link_desc_pool_setup(soc))
1921 goto fail1;
1922
1923 /* Setup SRNG rings */
1924 /* Common rings */
1925 if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
1926 WBM_RELEASE_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301927 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1928 FL("dp_srng_setup failed for wbm_desc_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001929 goto fail1;
1930 }
1931
1932
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301933 soc->num_tcl_data_rings = 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001934 /* Tx data rings */
1935 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
1936 soc->num_tcl_data_rings =
1937 wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05301938 tx_comp_ring_size =
1939 wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
1940 tx_ring_size =
1941 wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001942 for (i = 0; i < soc->num_tcl_data_rings; i++) {
1943 if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05301944 TCL_DATA, i, 0, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301945 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001946 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301947 FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001948 goto fail1;
1949 }
Yun Parkfde6b9e2017-06-26 17:13:11 -07001950 /*
1951 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
1952 * count
1953 */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001954 if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05301955 WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301956 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001957 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301958 FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001959 goto fail1;
1960 }
1961 }
1962 } else {
1963 /* This will be incremented during per pdev ring setup */
1964 soc->num_tcl_data_rings = 0;
1965 }
1966
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301967 if (dp_tx_soc_attach(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301968 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1969 FL("dp_tx_soc_attach failed"));
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05301970 goto fail1;
1971 }
1972
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001973 /* TCL command and status rings */
1974 if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
1975 TCL_CMD_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301976 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1977 FL("dp_srng_setup failed for tcl_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001978 goto fail1;
1979 }
1980
1981 if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
1982 TCL_STATUS_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05301983 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1984 FL("dp_srng_setup failed for tcl_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001985 goto fail1;
1986 }
1987
1988
1989 /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
1990 * descriptors
1991 */
1992
1993 /* Rx data rings */
1994 if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
1995 soc->num_reo_dest_rings =
1996 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
Dhanashri Atre14049172016-11-11 18:32:36 -08001997 QDF_TRACE(QDF_MODULE_ID_DP,
1998 QDF_TRACE_LEVEL_ERROR,
1999 FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002000 for (i = 0; i < soc->num_reo_dest_rings; i++) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002001 if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2002 i, 0, REO_DST_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302003 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002004 QDF_TRACE_LEVEL_ERROR,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302005 FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002006 goto fail1;
2007 }
2008 }
2009 } else {
2010 /* This will be incremented during per pdev ring setup */
2011 soc->num_reo_dest_rings = 0;
2012 }
2013
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002014 /* LMAC RxDMA to SW Rings configuration */
2015 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2016 /* Only valid for MCL */
2017 struct dp_pdev *pdev = soc->pdev_list[0];
2018
2019 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2020 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2021 RXDMA_DST, 0, i, RXDMA_ERR_DST_RING_SIZE)) {
2022 QDF_TRACE(QDF_MODULE_ID_DP,
2023 QDF_TRACE_LEVEL_ERROR,
2024 FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2025 goto fail1;
2026 }
2027 }
2028 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002029 /* TBD: call dp_rx_init to setup Rx SW descriptors */
2030
2031 /* REO reinjection ring */
2032 if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2033 REO_REINJECT_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302034 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2035 FL("dp_srng_setup failed for reo_reinject_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002036 goto fail1;
2037 }
2038
2039
2040 /* Rx release ring */
2041 if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2042 RX_RELEASE_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302043 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2044 FL("dp_srng_setup failed for rx_rel_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002045 goto fail1;
2046 }
2047
2048
2049 /* Rx exception ring */
2050 if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
2051 MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302052 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2053 FL("dp_srng_setup failed for reo_exception_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002054 goto fail1;
2055 }
2056
2057
2058 /* REO command and status rings */
2059 if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2060 REO_CMD_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302061 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2062 FL("dp_srng_setup failed for reo_cmd_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002063 goto fail1;
2064 }
2065
Manoj Ekbote4f0c6b12016-10-30 16:01:38 -07002066 hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2067 TAILQ_INIT(&soc->rx.reo_cmd_list);
2068 qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2069
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002070 if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2071 REO_STATUS_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302072 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2073 FL("dp_srng_setup failed for reo_status_ring"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002074 goto fail1;
2075 }
2076
Yun Park92af7132017-09-13 16:33:35 -07002077 qdf_spinlock_create(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302078 dp_soc_wds_attach(soc);
2079
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302080 /* Reset the cpu ring map if radio is NSS offloaded */
2081 if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2082 dp_soc_reset_cpu_ring_map(soc);
Aniruddha Paule3a03342017-09-19 16:42:10 +05302083 dp_soc_reset_intr_mask(soc);
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302084 }
2085
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002086 /* Setup HW REO */
Dhanashri Atre14049172016-11-11 18:32:36 -08002087 qdf_mem_zero(&reo_params, sizeof(reo_params));
2088
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302089 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
Dhanashri Atre14049172016-11-11 18:32:36 -08002090
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302091 /*
2092 * Reo ring remap is not required if both radios
2093 * are offloaded to NSS
2094 */
2095 if (!dp_reo_remap_config(soc,
2096 &reo_params.remap1,
2097 &reo_params.remap2))
2098 goto out;
2099
2100 reo_params.rx_hash_enabled = true;
2101 }
2102
psimhafc2f91b2018-01-10 15:30:03 -08002103 /* setup the global rx defrag waitlist */
2104 TAILQ_INIT(&soc->rx.defrag.waitlist);
2105 soc->rx.defrag.timeout_ms =
2106 wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
2107 soc->rx.flags.defrag_timeout_check =
2108 wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
2109
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05302110out:
Aniruddha Paul91dfd502018-01-08 11:24:34 +05302111 /*
2112 * set the fragment destination ring
2113 */
2114 dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
2115
Dhanashri Atre14049172016-11-11 18:32:36 -08002116 hal_reo_setup(soc->hal_soc, &reo_params);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002117
Ravi Joshi86e98262017-03-01 13:47:03 -08002118 qdf_atomic_set(&soc->cmn_init_done, 1);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05302119 qdf_nbuf_queue_init(&soc->htt_stats.msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002120 return 0;
2121fail1:
2122 /*
2123 * Cleanup will be done as part of soc_detach, which will
2124 * be called on pdev attach failure
2125 */
2126fail0:
2127 return QDF_STATUS_E_FAILURE;
2128}
2129
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002130static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002131
Dhanashri Atre14049172016-11-11 18:32:36 -08002132static void dp_lro_hash_setup(struct dp_soc *soc)
2133{
2134 struct cdp_lro_hash_config lro_hash;
2135
2136 if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
2137 !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2138 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2139 FL("LRO disabled RX hash disabled"));
2140 return;
2141 }
2142
2143 qdf_mem_zero(&lro_hash, sizeof(lro_hash));
2144
2145 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
2146 lro_hash.lro_enable = 1;
2147 lro_hash.tcp_flag = QDF_TCPHDR_ACK;
2148 lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
Houston Hoffman41b912c2017-08-30 14:27:51 -07002149 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
2150 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
Dhanashri Atre14049172016-11-11 18:32:36 -08002151 }
2152
Houston Hoffman41b912c2017-08-30 14:27:51 -07002153 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled"));
2154 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
Dhanashri Atre14049172016-11-11 18:32:36 -08002155 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2156 LRO_IPV4_SEED_ARR_SZ));
Dhanashri Atre14049172016-11-11 18:32:36 -08002157 qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
2158 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2159 LRO_IPV6_SEED_ARR_SZ));
2160
Houston Hoffman41b912c2017-08-30 14:27:51 -07002161 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
2162 "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
Dhanashri Atre14049172016-11-11 18:32:36 -08002163 lro_hash.lro_enable, lro_hash.tcp_flag,
2164 lro_hash.tcp_flag_mask);
2165
Dhanashri Atre14049172016-11-11 18:32:36 -08002166 qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2167 QDF_TRACE_LEVEL_ERROR,
2168 (void *)lro_hash.toeplitz_hash_ipv4,
2169 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
2170 LRO_IPV4_SEED_ARR_SZ));
2171
Dhanashri Atre14049172016-11-11 18:32:36 -08002172 qdf_trace_hex_dump(QDF_MODULE_ID_DP,
2173 QDF_TRACE_LEVEL_ERROR,
2174 (void *)lro_hash.toeplitz_hash_ipv6,
2175 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
2176 LRO_IPV6_SEED_ARR_SZ));
2177
2178 qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
2179
2180 if (soc->cdp_soc.ol_ops->lro_hash_config)
2181 (void)soc->cdp_soc.ol_ops->lro_hash_config
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05302182 (soc->ctrl_psoc, &lro_hash);
Dhanashri Atre14049172016-11-11 18:32:36 -08002183}
2184
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002185/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002186* dp_rxdma_ring_setup() - configure the RX DMA rings
2187* @soc: data path SoC handle
2188* @pdev: Physical device handle
2189*
2190* Return: 0 - success, > 0 - failure
2191*/
2192#ifdef QCA_HOST2FW_RXBUF_RING
2193static int dp_rxdma_ring_setup(struct dp_soc *soc,
2194 struct dp_pdev *pdev)
2195{
2196 int max_mac_rings =
2197 wlan_cfg_get_num_mac_rings
2198 (pdev->wlan_cfg_ctx);
2199 int i;
2200
2201 for (i = 0; i < max_mac_rings; i++) {
2202 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2203 "%s: pdev_id %d mac_id %d\n",
2204 __func__, pdev->pdev_id, i);
2205 if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
2206 RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
2207 QDF_TRACE(QDF_MODULE_ID_DP,
2208 QDF_TRACE_LEVEL_ERROR,
2209 FL("failed rx mac ring setup"));
2210 return QDF_STATUS_E_FAILURE;
2211 }
2212 }
2213 return QDF_STATUS_SUCCESS;
2214}
2215#else
2216static int dp_rxdma_ring_setup(struct dp_soc *soc,
2217 struct dp_pdev *pdev)
2218{
2219 return QDF_STATUS_SUCCESS;
2220}
2221#endif
Ishank Jain949674c2017-02-27 17:09:29 +05302222
2223/**
2224 * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
2225 * @pdev - DP_PDEV handle
2226 *
2227 * Return: void
2228 */
2229static inline void
2230dp_dscp_tid_map_setup(struct dp_pdev *pdev)
2231{
2232 uint8_t map_id;
2233 for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
2234 qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
2235 sizeof(default_dscp_tid_map));
2236 }
2237 for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
2238 hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
2239 pdev->dscp_tid_map[map_id],
2240 map_id);
2241 }
2242}
2243
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302244#ifdef QCA_SUPPORT_SON
2245/**
2246 * dp_mark_peer_inact(): Update peer inactivity status
2247 * @peer_handle - datapath peer handle
2248 *
2249 * Return: void
2250 */
2251void dp_mark_peer_inact(void *peer_handle, bool inactive)
2252{
2253 struct dp_peer *peer = (struct dp_peer *)peer_handle;
2254 struct dp_pdev *pdev;
2255 struct dp_soc *soc;
2256 bool inactive_old;
2257
2258 if (!peer)
2259 return;
2260
2261 pdev = peer->vdev->pdev;
2262 soc = pdev->soc;
2263
2264 inactive_old = peer->peer_bs_inact_flag == 1;
2265 if (!inactive)
2266 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2267 peer->peer_bs_inact_flag = inactive ? 1 : 0;
2268
2269 if (inactive_old != inactive) {
2270 struct ieee80211com *ic;
2271 struct ol_ath_softc_net80211 *scn;
2272
2273 scn = (struct ol_ath_softc_net80211 *)pdev->osif_pdev;
2274 ic = &scn->sc_ic;
2275 /**
2276 * Note: a node lookup can happen in RX datapath context
2277 * when a node changes from inactive to active (at most once
2278 * per inactivity timeout threshold)
2279 */
2280 if (soc->cdp_soc.ol_ops->record_act_change) {
2281 soc->cdp_soc.ol_ops->record_act_change(ic->ic_pdev_obj,
2282 peer->mac_addr.raw, !inactive);
2283 }
2284 }
2285}
2286
2287/**
2288 * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function
2289 *
2290 * Periodically checks the inactivity status
2291 */
2292static os_timer_func(dp_txrx_peer_find_inact_timeout_handler)
2293{
2294 struct dp_pdev *pdev;
2295 struct dp_vdev *vdev;
2296 struct dp_peer *peer;
2297 struct dp_soc *soc;
2298 int i;
2299
2300 OS_GET_TIMER_ARG(soc, struct dp_soc *);
2301
2302 qdf_spin_lock(&soc->peer_ref_mutex);
2303
2304 for (i = 0; i < soc->pdev_count; i++) {
2305 pdev = soc->pdev_list[i];
2306 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2307 if (vdev->opmode != wlan_op_mode_ap)
2308 continue;
2309
2310 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2311 if (!peer->authorize) {
2312 /**
2313 * Inactivity check only interested in
2314 * connected node
2315 */
2316 continue;
2317 }
2318 if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) {
2319 /**
2320 * This check ensures we do not wait extra long
2321 * due to the potential race condition
2322 */
2323 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
2324 }
2325 if (peer->peer_bs_inact > 0) {
2326 /* Do not let it wrap around */
2327 peer->peer_bs_inact--;
2328 }
2329 if (peer->peer_bs_inact == 0)
2330 dp_mark_peer_inact(peer, true);
2331 }
2332 }
2333 }
2334
2335 qdf_spin_unlock(&soc->peer_ref_mutex);
2336 qdf_timer_mod(&soc->pdev_bs_inact_timer,
2337 soc->pdev_bs_inact_interval * 1000);
2338}
Subhranil Choudhuryeea67382018-01-18 20:24:36 +05302339
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05302340
2341/**
2342 * dp_free_inact_timer(): free inact timer
2343 * @timer - inact timer handle
2344 *
2345 * Return: bool
2346 */
2347void dp_free_inact_timer(struct dp_soc *soc)
2348{
2349 qdf_timer_free(&soc->pdev_bs_inact_timer);
2350}
Subhranil Choudhuryeea67382018-01-18 20:24:36 +05302351#else
2352
2353void dp_mark_peer_inact(void *peer, bool inactive)
2354{
2355 return;
2356}
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05302357
2358void dp_free_inact_timer(struct dp_soc *soc)
2359{
2360 return;
2361}
2362
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05302363#endif
2364
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002365/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002366* dp_pdev_attach_wifi3() - attach txrx pdev
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05302367* @ctrl_pdev: Opaque PDEV object
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002368* @txrx_soc: Datapath SOC handle
2369* @htc_handle: HTC handle for host-target interface
2370* @qdf_osdev: QDF OS device
2371* @pdev_id: PDEV ID
2372*
2373* Return: DP PDEV handle on success, NULL on failure
2374*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002375static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
2376 struct cdp_cfg *ctrl_pdev,
Leo Chang5ea93a42016-11-03 12:39:49 -07002377 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002378{
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302379 int tx_ring_size;
2380 int tx_comp_ring_size;
2381
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002382 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2383 struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
2384
2385 if (!pdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302386 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2387 FL("DP PDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002388 goto fail0;
2389 }
2390
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05302391 pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
2392
2393 if (!pdev->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302394 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2395 FL("pdev cfg_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302396
2397 qdf_mem_free(pdev);
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05302398 goto fail0;
2399 }
2400
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302401 /*
2402 * set nss pdev config based on soc config
2403 */
2404 wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302405 (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev_id)));
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302406
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002407 pdev->soc = soc;
2408 pdev->osif_pdev = ctrl_pdev;
2409 pdev->pdev_id = pdev_id;
2410 soc->pdev_list[pdev_id] = pdev;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08002411 soc->pdev_count++;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002412
2413 TAILQ_INIT(&pdev->vdev_list);
2414 pdev->vdev_count = 0;
2415
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05302416 qdf_spinlock_create(&pdev->tx_mutex);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302417 qdf_spinlock_create(&pdev->neighbour_peer_mutex);
2418 TAILQ_INIT(&pdev->neighbour_peers_list);
2419
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002420 if (dp_soc_cmn_setup(soc)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302421 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2422 FL("dp_soc_cmn_setup failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302423 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002424 }
2425
2426 /* Setup per PDEV TCL rings if configured */
2427 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302428 tx_ring_size =
2429 wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
2430 tx_comp_ring_size =
2431 wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx);
2432
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002433 if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302434 pdev_id, pdev_id, tx_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302435 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2436 FL("dp_srng_setup failed for tcl_data_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302437 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002438 }
2439 if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
Pamidipati, Vijaybea353e2017-07-05 03:09:20 +05302440 WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302441 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2442 FL("dp_srng_setup failed for tx_comp_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302443 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002444 }
2445 soc->num_tcl_data_rings++;
2446 }
2447
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302448 /* Tx specific init */
2449 if (dp_tx_pdev_attach(pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302450 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2451 FL("dp_tx_pdev_attach failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302452 goto fail1;
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05302453 }
2454
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002455 /* Setup per PDEV REO rings if configured */
2456 if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2457 if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
2458 pdev_id, pdev_id, REO_DST_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302459 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2460 FL("dp_srng_setup failed for reo_dest_ringn"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302461 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002462 }
2463 soc->num_reo_dest_rings++;
2464
2465 }
Dhanashri Atre7351d172016-10-12 13:08:09 -07002466 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002467 RXDMA_REFILL_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302468 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2469 FL("dp_srng_setup failed rx refill ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302470 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002471 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002472
2473 if (dp_rxdma_ring_setup(soc, pdev)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302474 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002475 FL("RXDMA ring config failed"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302476 goto fail1;
Dhanashri Atre7351d172016-10-12 13:08:09 -07002477 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002478
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002479 if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0,
2480 pdev_id, RXDMA_MONITOR_BUF_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302481 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2482 FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302483 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002484 }
2485
2486 if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0,
2487 pdev_id, RXDMA_MONITOR_DST_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302488 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2489 FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302490 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002491 }
2492
2493
2494 if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring,
2495 RXDMA_MONITOR_STATUS, 0, pdev_id,
2496 RXDMA_MONITOR_STATUS_RING_SIZE)) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05302497 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2498 FL("dp_srng_setup failed for rxdma_mon_status_ring"));
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302499 goto fail1;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002500 }
2501
Kai Chen6eca1a62017-01-12 10:17:53 -08002502 if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring,
2503 RXDMA_MONITOR_DESC, 0, pdev_id, RXDMA_MONITOR_DESC_RING_SIZE)) {
Yun Park601d0d82017-08-28 21:49:31 -07002504 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Kai Chen6eca1a62017-01-12 10:17:53 -08002505 "dp_srng_setup failed for rxdma_mon_desc_ring\n");
2506 goto fail1;
2507 }
2508
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002509 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2510 if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
2511 0, pdev_id, RXDMA_ERR_DST_RING_SIZE)) {
2512 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2513 FL("dp_srng_setup failed for rxdma_err_dst_ring"));
2514 goto fail1;
2515 }
Pramod Simhae382ff82017-06-05 18:09:26 -07002516 }
2517
Yun Park601d0d82017-08-28 21:49:31 -07002518 /* Setup second Rx refill buffer ring */
2519 if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 2,
2520 pdev->pdev_id, RXDMA_REFILL_RING_SIZE)) {
2521 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2522 FL("dp_srng_setup failed second rx refill ring"));
2523 goto fail1;
2524 }
2525
Yun Parkfde6b9e2017-06-26 17:13:11 -07002526 if (dp_ipa_ring_resource_setup(soc, pdev))
2527 goto fail1;
2528
2529 if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
Yun Park601d0d82017-08-28 21:49:31 -07002530 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2531 FL("dp_ipa_uc_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07002532 goto fail1;
2533 }
2534
Leo Chang5ea93a42016-11-03 12:39:49 -07002535 /* Rx specific init */
2536 if (dp_rx_pdev_attach(pdev)) {
Yun Parkfde6b9e2017-06-26 17:13:11 -07002537 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Yun Park601d0d82017-08-28 21:49:31 -07002538 FL("dp_rx_pdev_attach failed"));
Yun Parkfde6b9e2017-06-26 17:13:11 -07002539 goto fail0;
Leo Chang5ea93a42016-11-03 12:39:49 -07002540 }
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302541 DP_STATS_INIT(pdev);
Leo Chang5ea93a42016-11-03 12:39:49 -07002542
nobeljd124b742017-10-16 11:59:12 -07002543 /* Monitor filter init */
2544 pdev->mon_filter_mode = MON_FILTER_ALL;
2545 pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
2546 pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
2547 pdev->fp_data_filter = FILTER_DATA_ALL;
2548 pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
2549 pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
2550 pdev->mo_data_filter = FILTER_DATA_ALL;
2551
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05302552#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07002553 /* MCL */
2554 dp_local_peer_id_pool_init(pdev);
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05302555#endif
Ishank Jain949674c2017-02-27 17:09:29 +05302556 dp_dscp_tid_map_setup(pdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002557
Kai Chen6eca1a62017-01-12 10:17:53 -08002558 /* Rx monitor mode specific init */
2559 if (dp_rx_pdev_mon_attach(pdev)) {
2560 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Keyur Parekhfad6d082017-05-07 08:54:47 -07002561 "dp_rx_pdev_attach failed\n");
2562 goto fail1;
2563 }
2564
2565 if (dp_wdi_event_attach(pdev)) {
2566 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2567 "dp_wdi_evet_attach failed\n");
2568 goto fail1;
Kai Chen6eca1a62017-01-12 10:17:53 -08002569 }
2570
Om Prakash Tripathia7fb93f2017-06-27 18:41:41 +05302571 /* set the reo destination during initialization */
2572 pdev->reo_dest = pdev->pdev_id + 1;
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05302573
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002574 return (struct cdp_pdev *)pdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002575
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302576fail1:
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002577 dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
Vijay Pamidipatib775e132016-10-19 21:19:52 +05302578
2579fail0:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002580 return NULL;
2581}
2582
2583/*
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002584* dp_rxdma_ring_cleanup() - configure the RX DMA rings
2585* @soc: data path SoC handle
2586* @pdev: Physical device handle
2587*
2588* Return: void
2589*/
2590#ifdef QCA_HOST2FW_RXBUF_RING
2591static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2592 struct dp_pdev *pdev)
2593{
2594 int max_mac_rings =
2595 wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2596 int i;
2597
2598 max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
2599 max_mac_rings : MAX_RX_MAC_RINGS;
2600 for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2601 dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
2602 RXDMA_BUF, 1);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07002603
2604 qdf_timer_free(&soc->mon_reap_timer);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002605}
2606#else
2607static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
2608 struct dp_pdev *pdev)
2609{
2610}
2611#endif
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302612
2613/*
2614 * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
2615 * @pdev: device object
2616 *
2617 * Return: void
2618 */
2619static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
2620{
2621 struct dp_neighbour_peer *peer = NULL;
2622 struct dp_neighbour_peer *temp_peer = NULL;
2623
2624 TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
2625 neighbour_peer_list_elem, temp_peer) {
2626 /* delete this peer from the list */
2627 TAILQ_REMOVE(&pdev->neighbour_peers_list,
2628 peer, neighbour_peer_list_elem);
2629 qdf_mem_free(peer);
2630 }
2631
2632 qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
2633}
2634
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002635/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002636* dp_pdev_detach_wifi3() - detach txrx pdev
2637* @txrx_pdev: Datapath PDEV handle
2638* @force: Force detach
2639*
2640*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08002641static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002642{
2643 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
2644 struct dp_soc *soc = pdev->soc;
Tallapragada Kalyan94034632017-12-07 17:29:13 +05302645 qdf_nbuf_t curr_nbuf, next_nbuf;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002646
Keyur Parekhfad6d082017-05-07 08:54:47 -07002647 dp_wdi_event_detach(pdev);
2648
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05302649 dp_tx_pdev_detach(pdev);
2650
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002651 if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2652 dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
2653 TCL_DATA, pdev->pdev_id);
2654 dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
2655 WBM2SW_RELEASE, pdev->pdev_id);
2656 }
2657
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07002658 dp_pktlogmod_exit(pdev);
2659
Leo Chang5ea93a42016-11-03 12:39:49 -07002660 dp_rx_pdev_detach(pdev);
2661
Kai Chen6eca1a62017-01-12 10:17:53 -08002662 dp_rx_pdev_mon_detach(pdev);
2663
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302664 dp_neighbour_peers_detach(pdev);
Pamidipati, Vijay9c9a2872017-05-31 10:06:34 +05302665 qdf_spinlock_destroy(&pdev->tx_mutex);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05302666
Yun Parkfde6b9e2017-06-26 17:13:11 -07002667 dp_ipa_uc_detach(soc, pdev);
2668
Yun Park601d0d82017-08-28 21:49:31 -07002669 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 2);
2670
Yun Parkfde6b9e2017-06-26 17:13:11 -07002671 /* Cleanup per PDEV REO rings if configured */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002672 if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2673 dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
2674 REO_DST, pdev->pdev_id);
2675 }
2676
Dhanashri Atre7351d172016-10-12 13:08:09 -07002677 dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002678
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002679 dp_rxdma_ring_cleanup(soc, pdev);
2680
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002681 dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0);
2682
2683 dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0);
2684
2685 dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
2686 RXDMA_MONITOR_STATUS, 0);
2687
Kai Chen6eca1a62017-01-12 10:17:53 -08002688 dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring,
2689 RXDMA_MONITOR_DESC, 0);
2690
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002691 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
2692 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST, 0);
2693 } else {
2694 int i;
2695
2696 for (i = 0; i < MAX_RX_MAC_RINGS; i++)
2697 dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[i],
2698 RXDMA_DST, 0);
2699 }
Pramod Simhae382ff82017-06-05 18:09:26 -07002700
Tallapragada Kalyan94034632017-12-07 17:29:13 +05302701 curr_nbuf = pdev->invalid_peer_head_msdu;
2702 while (curr_nbuf) {
2703 next_nbuf = qdf_nbuf_next(curr_nbuf);
2704 qdf_nbuf_free(curr_nbuf);
2705 curr_nbuf = next_nbuf;
2706 }
2707
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002708 soc->pdev_list[pdev->pdev_id] = NULL;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08002709 soc->pdev_count--;
Manikandan Mohanb01696b2017-05-09 18:03:19 -07002710 wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
Santosh Anbu2280e862018-01-03 22:25:53 +05302711 qdf_mem_free(pdev->dp_txrx_handle);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002712 qdf_mem_free(pdev);
2713}
2714
2715/*
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002716 * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
2717 * @soc: DP SOC handle
2718 */
2719static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
2720{
2721 struct reo_desc_list_node *desc;
2722 struct dp_rx_tid *rx_tid;
2723
2724 qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
2725 while (qdf_list_remove_front(&soc->reo_desc_freelist,
2726 (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
2727 rx_tid = &desc->rx_tid;
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08002728 qdf_mem_unmap_nbytes_single(soc->osdev,
Pramod Simha6b23f752017-03-30 11:54:18 -07002729 rx_tid->hw_qdesc_paddr,
Karunakar Dasineni7957fa92017-02-23 23:05:40 -08002730 QDF_DMA_BIDIRECTIONAL,
2731 rx_tid->hw_qdesc_alloc_size);
2732 qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002733 qdf_mem_free(desc);
2734 }
2735 qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
2736 qdf_list_destroy(&soc->reo_desc_freelist);
2737 qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
2738}
2739
2740/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002741 * dp_soc_detach_wifi3() - Detach txrx SOC
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07002742 * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002743 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08002744static void dp_soc_detach_wifi3(void *txrx_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002745{
2746 struct dp_soc *soc = (struct dp_soc *)txrx_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002747 int i;
2748
Ravi Joshi86e98262017-03-01 13:47:03 -08002749 qdf_atomic_set(&soc->cmn_init_done, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002750
Dustin Brownf653d162017-09-19 11:29:41 -07002751 qdf_flush_work(&soc->htt_stats.work);
2752 qdf_disable_work(&soc->htt_stats.work);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05302753
2754 /* Free pending htt stats messages */
2755 qdf_nbuf_queue_free(&soc->htt_stats.msg);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05302756
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05302757 dp_free_inact_timer(soc);
2758
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002759 for (i = 0; i < MAX_PDEV_CNT; i++) {
2760 if (soc->pdev_list[i])
Kiran Venkatappa5dba3a32017-03-01 16:00:22 +05302761 dp_pdev_detach_wifi3(
2762 (struct cdp_pdev *)soc->pdev_list[i], 1);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002763 }
2764
2765 dp_peer_find_detach(soc);
2766
2767 /* TBD: Call Tx and Rx cleanup functions to free buffers and
2768 * SW descriptors
2769 */
2770
2771 /* Free the ring memories */
2772 /* Common rings */
2773 dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
2774
Manikandan Mohanb01696b2017-05-09 18:03:19 -07002775 dp_tx_soc_detach(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002776 /* Tx data rings */
2777 if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
2778 for (i = 0; i < soc->num_tcl_data_rings; i++) {
2779 dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
2780 TCL_DATA, i);
2781 dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
2782 WBM2SW_RELEASE, i);
2783 }
2784 }
2785
2786 /* TCL command and status rings */
2787 dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
2788 dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
2789
2790 /* Rx data rings */
2791 if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
2792 soc->num_reo_dest_rings =
2793 wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2794 for (i = 0; i < soc->num_reo_dest_rings; i++) {
2795 /* TODO: Get number of rings and ring sizes
2796 * from wlan_cfg
2797 */
2798 dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
2799 REO_DST, i);
2800 }
2801 }
2802 /* REO reinjection ring */
2803 dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
2804
2805 /* Rx release ring */
2806 dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
2807
2808 /* Rx exception ring */
2809 /* TODO: Better to store ring_type and ring_num in
2810 * dp_srng during setup
2811 */
2812 dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
2813
2814 /* REO command and status rings */
2815 dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
2816 dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
Manoj Ekbote525bcab2017-09-01 17:23:32 -07002817 dp_hw_link_desc_pool_cleanup(soc);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002818
Leo Chang5ea93a42016-11-03 12:39:49 -07002819 qdf_spinlock_destroy(&soc->peer_ref_mutex);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05302820 qdf_spinlock_destroy(&soc->htt_stats.lock);
2821
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002822 htt_soc_detach(soc->htt_handle);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002823
Manoj Ekbote2a372d22017-06-29 14:54:57 -07002824 dp_reo_cmdlist_destroy(soc);
2825 qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08002826 dp_reo_desc_freelist_destroy(soc);
Manoj Ekbote2a372d22017-06-29 14:54:57 -07002827
Manikandan Mohanb01696b2017-05-09 18:03:19 -07002828 wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302829
2830 dp_soc_wds_detach(soc);
Yun Park92af7132017-09-13 16:33:35 -07002831 qdf_spinlock_destroy(&soc->ast_lock);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05302832
Venkata Sharath Chandra Manchala65bf2302017-03-09 17:28:56 -08002833 qdf_mem_free(soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002834}
2835
2836/*
Yun Parkfde6b9e2017-06-26 17:13:11 -07002837 * dp_rxdma_ring_config() - configure the RX DMA rings
2838 *
2839 * This function is used to configure the MAC rings.
2840 * On MCL host provides buffers in Host2FW ring
2841 * FW refills (copies) buffers to the ring and updates
2842 * ring_idx in register
2843 *
2844 * @soc: data path SoC handle
2845 *
2846 * Return: void
2847 */
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002848#ifdef QCA_HOST2FW_RXBUF_RING
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002849static void dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002850{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002851 int i;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002852
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002853 for (i = 0; i < MAX_PDEV_CNT; i++) {
2854 struct dp_pdev *pdev = soc->pdev_list[i];
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002855
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002856 if (pdev) {
2857 int mac_id = 0;
2858 int j;
Dhanashri Atre398935e2017-03-31 15:34:28 -07002859 bool dbs_enable = 0;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002860 int max_mac_rings =
2861 wlan_cfg_get_num_mac_rings
2862 (pdev->wlan_cfg_ctx);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002863
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002864 htt_srng_setup(soc->htt_handle, 0,
2865 pdev->rx_refill_buf_ring.hal_srng,
2866 RXDMA_BUF);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002867
Yun Park601d0d82017-08-28 21:49:31 -07002868 if (pdev->rx_refill_buf_ring2.hal_srng)
2869 htt_srng_setup(soc->htt_handle, 0,
2870 pdev->rx_refill_buf_ring2.hal_srng,
2871 RXDMA_BUF);
Yun Parkfde6b9e2017-06-26 17:13:11 -07002872
Dhanashri Atre2c6381d2017-03-30 19:33:52 -07002873 if (soc->cdp_soc.ol_ops->
2874 is_hw_dbs_2x2_capable) {
Dhanashri Atre398935e2017-03-31 15:34:28 -07002875 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05302876 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Dhanashri Atre398935e2017-03-31 15:34:28 -07002877 }
2878
2879 if (dbs_enable) {
2880 QDF_TRACE(QDF_MODULE_ID_TXRX,
2881 QDF_TRACE_LEVEL_ERROR,
2882 FL("DBS enabled max_mac_rings %d\n"),
2883 max_mac_rings);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002884 } else {
Dhanashri Atre398935e2017-03-31 15:34:28 -07002885 max_mac_rings = 1;
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002886 QDF_TRACE(QDF_MODULE_ID_TXRX,
2887 QDF_TRACE_LEVEL_ERROR,
Dhanashri Atre398935e2017-03-31 15:34:28 -07002888 FL("DBS disabled, max_mac_rings %d\n"),
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002889 max_mac_rings);
2890 }
2891
2892 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2893 FL("pdev_id %d max_mac_rings %d\n"),
2894 pdev->pdev_id, max_mac_rings);
2895
2896 for (j = 0; j < max_mac_rings; j++) {
2897 QDF_TRACE(QDF_MODULE_ID_TXRX,
2898 QDF_TRACE_LEVEL_ERROR,
2899 FL("mac_id %d\n"), mac_id);
2900 htt_srng_setup(soc->htt_handle, mac_id,
2901 pdev->rx_mac_buf_ring[j]
2902 .hal_srng,
2903 RXDMA_BUF);
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002904 htt_srng_setup(soc->htt_handle, mac_id,
2905 pdev->rxdma_err_dst_ring[j]
2906 .hal_srng,
2907 RXDMA_DST);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002908 mac_id++;
2909 }
Ravi Joshi8851f4e2017-06-07 21:22:08 -07002910
2911 /* Configure monitor mode rings */
2912 htt_srng_setup(soc->htt_handle, i,
2913 pdev->rxdma_mon_buf_ring.hal_srng,
2914 RXDMA_MONITOR_BUF);
2915
2916 htt_srng_setup(soc->htt_handle, i,
2917 pdev->rxdma_mon_dst_ring.hal_srng,
2918 RXDMA_MONITOR_DST);
2919
2920 htt_srng_setup(soc->htt_handle, i,
2921 pdev->rxdma_mon_status_ring.hal_srng,
2922 RXDMA_MONITOR_STATUS);
2923
2924 htt_srng_setup(soc->htt_handle, i,
2925 pdev->rxdma_mon_desc_ring.hal_srng,
2926 RXDMA_MONITOR_DESC);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002927 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002928 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07002929
2930 /*
2931 * Timer to reap rxdma status rings.
2932 * Needed until we enable ppdu end interrupts
2933 */
2934 qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
2935 dp_service_mon_rings, (void *)soc,
2936 QDF_TIMER_TYPE_WAKE_APPS);
2937 soc->reap_timer_init = 1;
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002938}
2939#else
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002940static void dp_rxdma_ring_config(struct dp_soc *soc)
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002941{
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002942 int i;
2943
2944 for (i = 0; i < MAX_PDEV_CNT; i++) {
2945 struct dp_pdev *pdev = soc->pdev_list[i];
2946
2947 if (pdev) {
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002948 int ring_idx = dp_get_ring_id_for_mac_id(soc, i);
2949
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002950 htt_srng_setup(soc->htt_handle, i,
2951 pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
Kai Chen6eca1a62017-01-12 10:17:53 -08002952
2953 htt_srng_setup(soc->htt_handle, i,
2954 pdev->rxdma_mon_buf_ring.hal_srng,
2955 RXDMA_MONITOR_BUF);
2956 htt_srng_setup(soc->htt_handle, i,
2957 pdev->rxdma_mon_dst_ring.hal_srng,
2958 RXDMA_MONITOR_DST);
2959 htt_srng_setup(soc->htt_handle, i,
2960 pdev->rxdma_mon_status_ring.hal_srng,
2961 RXDMA_MONITOR_STATUS);
2962 htt_srng_setup(soc->htt_handle, i,
2963 pdev->rxdma_mon_desc_ring.hal_srng,
2964 RXDMA_MONITOR_DESC);
Pramod Simhae382ff82017-06-05 18:09:26 -07002965 htt_srng_setup(soc->htt_handle, i,
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08002966 pdev->rxdma_err_dst_ring[ring_idx].hal_srng,
2967 RXDMA_DST);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002968 }
2969 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002970}
2971#endif
2972
2973/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002974 * dp_soc_attach_target_wifi3() - SOC initialization in the target
2975 * @txrx_soc: Datapath SOC handle
2976 */
Jeff Johnsonf1352572017-01-10 14:24:10 -08002977static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002978{
Leo Chang5ea93a42016-11-03 12:39:49 -07002979 struct dp_soc *soc = (struct dp_soc *)cdp_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002980
2981 htt_soc_attach_target(soc->htt_handle);
2982
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -08002983 dp_rxdma_ring_config(soc);
Dhanashri Atred4032ab2017-01-17 15:05:41 -08002984
Ishank Jainbc2d91f2017-01-03 18:14:54 +05302985 DP_STATS_INIT(soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05302986
2987 /* initialize work queue for stats processing */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05302988 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05302989
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07002990 return 0;
2991}
2992
2993/*
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05302994 * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
2995 * @txrx_soc: Datapath SOC handle
2996 */
2997static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
2998{
2999 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
3000 return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
3001}
3002/*
3003 * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
3004 * @txrx_soc: Datapath SOC handle
3005 * @nss_cfg: nss config
3006 */
3007static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
3008{
3009 struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
Aniruddha Paul9d7dc272018-02-11 19:40:41 +05303010 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
3011
3012 wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
3013
3014 /*
3015 * TODO: masked out based on the per offloaded radio
3016 */
3017 if (config == dp_nss_cfg_dbdc) {
3018 wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
3019 wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
3020 wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
3021 wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
3022 }
3023
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303024 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3025 FL("nss-wifi<0> nss config is enabled"));
3026}
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303027/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003028* dp_vdev_attach_wifi3() - attach txrx vdev
3029* @txrx_pdev: Datapath PDEV handle
3030* @vdev_mac_addr: MAC address of the virtual interface
3031* @vdev_id: VDEV Id
3032* @wlan_op_mode: VDEV operating mode
3033*
3034* Return: DP VDEV handle on success, NULL on failure
3035*/
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003036static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003037 uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
3038{
3039 struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3040 struct dp_soc *soc = pdev->soc;
3041 struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003042 int tx_ring_size;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003043
3044 if (!vdev) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303045 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3046 FL("DP VDEV memory allocation failed"));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003047 goto fail0;
3048 }
3049
3050 vdev->pdev = pdev;
3051 vdev->vdev_id = vdev_id;
3052 vdev->opmode = op_mode;
Vijay Pamidipatid41d6d62016-10-19 21:19:00 +05303053 vdev->osdev = soc->osdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003054
3055 vdev->osif_rx = NULL;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05303056 vdev->osif_rsim_rx_decap = NULL;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05303057 vdev->osif_get_key = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003058 vdev->osif_rx_mon = NULL;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05303059 vdev->osif_tx_free_ext = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003060 vdev->osif_vdev = NULL;
3061
3062 vdev->delete.pending = 0;
3063 vdev->safemode = 0;
3064 vdev->drop_unenc = 1;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05303065 vdev->sec_type = cdp_sec_type_none;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003066#ifdef notyet
3067 vdev->filters_num = 0;
3068#endif
3069
3070 qdf_mem_copy(
3071 &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3072
3073 vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
3074 vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
Ishank Jain949674c2017-02-27 17:09:29 +05303075 vdev->dscp_tid_map_id = 0;
Ishank Jainc838b132017-02-17 11:08:18 +05303076 vdev->mcast_enhancement_en = 0;
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003077 tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003078
3079 /* TODO: Initialize default HTT meta data that will be used in
3080 * TCL descriptors for packets transmitted from this VDEV
3081 */
3082
3083 TAILQ_INIT(&vdev->peer_list);
3084
3085 /* add this vdev into the pdev's list */
3086 TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
3087 pdev->vdev_count++;
3088
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303089 dp_tx_vdev_attach(vdev);
3090
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003091 if (QDF_STATUS_SUCCESS != dp_tx_flow_pool_map_handler(pdev, vdev_id,
3092 FLOW_TYPE_VDEV, vdev_id, tx_ring_size))
3093 goto fail1;
3094
3095
psimhac983d7e2017-07-26 15:20:07 -07003096 if ((soc->intr_mode == DP_INTR_POLL) &&
3097 wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05303098 if (pdev->vdev_count == 1)
3099 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3100 }
Vijay Pamidipati88c40ff2016-11-17 21:27:02 +05303101
Dhanashri Atreb178eb42017-03-21 12:32:33 -07003102 dp_lro_hash_setup(soc);
3103
Dhanashri Atre0da31222017-03-23 12:30:58 -07003104 /* LRO */
3105 if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3106 wlan_op_mode_sta == vdev->opmode)
3107 vdev->lro_enable = true;
3108
3109 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3110 "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
3111
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303112 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003113 "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05303114 DP_STATS_INIT(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003115
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303116 if (wlan_op_mode_sta == vdev->opmode)
3117 dp_peer_create_wifi3((struct cdp_vdev *)vdev,
3118 vdev->mac_addr.raw);
3119
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003120 return (struct cdp_vdev *)vdev;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003121
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003122fail1:
3123 dp_tx_vdev_detach(vdev);
3124 qdf_mem_free(vdev);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003125fail0:
3126 return NULL;
3127}
3128
3129/**
3130 * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
3131 * @vdev: Datapath VDEV handle
3132 * @osif_vdev: OSIF vdev handle
3133 * @txrx_ops: Tx and Rx operations
3134 *
3135 * Return: DP VDEV handle on success, NULL on failure
3136 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003137static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
3138 void *osif_vdev,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003139 struct ol_txrx_ops *txrx_ops)
3140{
3141 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3142 vdev->osif_vdev = osif_vdev;
3143 vdev->osif_rx = txrx_ops->rx.rx;
Venkateswara Swamy Bandarubfbef4f2016-12-16 19:12:31 +05303144 vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
Venkateswara Swamy Bandaru3f4e1c42017-07-10 19:47:09 +05303145 vdev->osif_get_key = txrx_ops->get_key;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003146 vdev->osif_rx_mon = txrx_ops->rx.mon;
Venkateswara Swamy Bandaru97482342017-02-16 12:04:50 +05303147 vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003148#ifdef notyet
3149#if ATH_SUPPORT_WAPI
3150 vdev->osif_check_wai = txrx_ops->rx.wai_check;
3151#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003152#endif
Ishank Jain997955e2017-03-24 18:18:50 +05303153#ifdef UMAC_SUPPORT_PROXY_ARP
3154 vdev->osif_proxy_arp = txrx_ops->proxy_arp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003155#endif
Ishank Jainc838b132017-02-17 11:08:18 +05303156 vdev->me_convert = txrx_ops->me_convert;
3157
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003158 /* TODO: Enable the following once Tx code is integrated */
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05303159 if (vdev->mesh_vdev)
3160 txrx_ops->tx.tx = dp_tx_send_mesh;
3161 else
3162 txrx_ops->tx.tx = dp_tx_send;
Leo Chang5ea93a42016-11-03 12:39:49 -07003163
Prathyusha Guduribe41d972018-01-19 14:17:14 +05303164 txrx_ops->tx.tx_exception = dp_tx_send_exception;
3165
Houston Hoffman41b912c2017-08-30 14:27:51 -07003166 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303167 "DP Vdev Register success");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003168}
3169
3170/*
3171 * dp_vdev_detach_wifi3() - Detach txrx vdev
3172 * @txrx_vdev: Datapath VDEV handle
3173 * @callback: Callback OL_IF on completion of detach
3174 * @cb_context: Callback context
3175 *
3176 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003177static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003178 ol_txrx_vdev_delete_cb callback, void *cb_context)
3179{
3180 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3181 struct dp_pdev *pdev = vdev->pdev;
3182 struct dp_soc *soc = pdev->soc;
3183
3184 /* preconditions */
3185 qdf_assert(vdev);
3186
3187 /* remove the vdev from its parent pdev's list */
3188 TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
3189
3190 /*
3191 * Use peer_ref_mutex while accessing peer_list, in case
3192 * a peer is in the process of being removed from the list.
3193 */
3194 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3195 /* check that the vdev has no peers allocated */
3196 if (!TAILQ_EMPTY(&vdev->peer_list)) {
3197 /* debug print - will be removed later */
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303198 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003199 FL("not deleting vdev object %pK (%pM)"
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303200 "until deletion finishes for all its peers"),
3201 vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003202 /* indicate that the vdev needs to be deleted */
3203 vdev->delete.pending = 1;
3204 vdev->delete.callback = callback;
3205 vdev->delete.context = cb_context;
3206 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3207 return;
3208 }
3209 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3210
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003211 dp_tx_flow_pool_unmap_handler(pdev, vdev->vdev_id, FLOW_TYPE_VDEV,
3212 vdev->vdev_id);
Pamidipati, Vijay576bd152016-09-27 20:58:18 +05303213 dp_tx_vdev_detach(vdev);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303214 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003215 FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003216
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303217 if (wlan_op_mode_sta == vdev->opmode)
3218 dp_peer_delete_wifi3(vdev->vap_bss_peer, 0);
3219
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003220 qdf_mem_free(vdev);
3221
3222 if (callback)
3223 callback(cb_context);
3224}
3225
3226/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003227 * dp_peer_create_wifi3() - attach txrx peer
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003228 * @txrx_vdev: Datapath VDEV handle
3229 * @peer_mac_addr: Peer MAC address
3230 *
3231 * Return: DP peeer handle on success, NULL on failure
3232 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003233static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
3234 uint8_t *peer_mac_addr)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003235{
3236 struct dp_peer *peer;
3237 int i;
3238 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3239 struct dp_pdev *pdev;
3240 struct dp_soc *soc;
3241
3242 /* preconditions */
3243 qdf_assert(vdev);
3244 qdf_assert(peer_mac_addr);
3245
3246 pdev = vdev->pdev;
3247 soc = pdev->soc;
3248#ifdef notyet
3249 peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
3250 soc->mempool_ol_ath_peer);
3251#else
3252 peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
3253#endif
3254
3255 if (!peer)
3256 return NULL; /* failure */
3257
Tallapragada57d86602017-03-31 07:53:58 +05303258 qdf_mem_zero(peer, sizeof(struct dp_peer));
3259
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303260 TAILQ_INIT(&peer->ast_entry_list);
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303261
Tallapragada Kalyanfb72b632017-07-07 12:51:58 +05303262 /* store provided params */
3263 peer->vdev = vdev;
3264
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303265 dp_peer_add_ast(soc, peer, peer_mac_addr, CDP_TXRX_AST_TYPE_STATIC, 0);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303266
Leo Chang5ea93a42016-11-03 12:39:49 -07003267 qdf_spinlock_create(&peer->peer_info_lock);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003268
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003269 qdf_mem_copy(
3270 &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
3271
3272 /* TODO: See of rx_opt_proc is really required */
3273 peer->rx_opt_proc = soc->rx_opt_proc;
3274
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003275 /* initialize the peer_id */
3276 for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
3277 peer->peer_ids[i] = HTT_INVALID_PEER;
3278
3279 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3280
3281 qdf_atomic_init(&peer->ref_cnt);
3282
3283 /* keep one reference for attach */
3284 qdf_atomic_inc(&peer->ref_cnt);
3285
3286 /* add this peer into the vdev's list */
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05303287 if (wlan_op_mode_sta == vdev->opmode)
3288 TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
3289 else
3290 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
3291
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003292 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3293
3294 /* TODO: See if hash based search is required */
3295 dp_peer_find_hash_add(soc, peer);
3296
Varun Reddy Yeturub9ec57e2017-11-28 11:42:09 -08003297 /* Initialize the peer state */
3298 peer->state = OL_TXRX_PEER_STATE_DISC;
3299
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303300 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003301 "vdev %pK created peer %pK (%pM) ref_cnt: %d",
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08003302 vdev, peer, peer->mac_addr.raw,
3303 qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003304 /*
3305 * For every peer MAp message search and set if bss_peer
3306 */
3307 if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303308 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3309 "vdev bss_peer!!!!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003310 peer->bss_peer = 1;
3311 vdev->vap_bss_peer = peer;
3312 }
3313
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05303314
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05303315#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07003316 dp_local_peer_id_alloc(pdev, peer);
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05303317#endif
Ishank Jain1e7401c2017-02-17 15:38:39 +05303318 DP_STATS_INIT(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003319 return (void *)peer;
3320}
3321
3322/*
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003323 * dp_peer_setup_wifi3() - initialize the peer
3324 * @vdev_hdl: virtual device object
3325 * @peer: Peer object
3326 *
3327 * Return: void
3328 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003329static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003330{
3331 struct dp_peer *peer = (struct dp_peer *)peer_hdl;
3332 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
3333 struct dp_pdev *pdev;
3334 struct dp_soc *soc;
Dhanashri Atre14049172016-11-11 18:32:36 -08003335 bool hash_based = 0;
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05303336 enum cdp_host_reo_dest_ring reo_dest;
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003337
3338 /* preconditions */
3339 qdf_assert(vdev);
3340 qdf_assert(peer);
3341
3342 pdev = vdev->pdev;
3343 soc = pdev->soc;
3344
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08003345 peer->last_assoc_rcvd = 0;
3346 peer->last_disassoc_rcvd = 0;
3347 peer->last_deauth_rcvd = 0;
3348
Tallapragada Kalyandbbb0c82017-08-24 20:58:04 +05303349 /*
3350 * hash based steering is disabled for Radios which are offloaded
3351 * to NSS
3352 */
3353 if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
3354 hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
3355
3356 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3357 FL("hash based steering for pdev: %d is %d\n"),
3358 pdev->pdev_id, hash_based);
Dhanashri Atre14049172016-11-11 18:32:36 -08003359
Tallapragada Kalyan61cb97c2017-09-20 12:42:10 +05303360 /*
3361 * Below line of code will ensure the proper reo_dest ring is choosen
3362 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
3363 */
3364 reo_dest = pdev->reo_dest;
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05303365
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003366 if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
3367 /* TODO: Check the destination ring number to be passed to FW */
Dhanashri Atre14049172016-11-11 18:32:36 -08003368 soc->cdp_soc.ol_ops->peer_set_default_routing(
3369 pdev->osif_pdev, peer->mac_addr.raw,
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05303370 peer->vdev->vdev_id, hash_based, reo_dest);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003371 }
Ruchi, Agrawal8e2796b2018-02-07 19:07:43 +05303372
3373 dp_peer_rx_init(pdev, peer);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08003374 return;
3375}
3376
3377/*
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05303378 * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
3379 * @vdev_handle: virtual device object
3380 * @htt_pkt_type: type of pkt
3381 *
3382 * Return: void
3383 */
3384static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
3385 enum htt_cmn_pkt_type val)
3386{
3387 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3388 vdev->tx_encap_type = val;
3389}
3390
3391/*
3392 * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
3393 * @vdev_handle: virtual device object
3394 * @htt_pkt_type: type of pkt
3395 *
3396 * Return: void
3397 */
3398static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
3399 enum htt_cmn_pkt_type val)
3400{
3401 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3402 vdev->rx_decap_type = val;
3403}
3404
3405/*
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05303406 * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
3407 * @pdev_handle: physical device object
3408 * @val: reo destination ring index (1 - 4)
3409 *
3410 * Return: void
3411 */
3412static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
3413 enum cdp_host_reo_dest_ring val)
3414{
3415 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3416
3417 if (pdev)
3418 pdev->reo_dest = val;
3419}
3420
3421/*
3422 * dp_get_pdev_reo_dest() - get the reo destination for this pdev
3423 * @pdev_handle: physical device object
3424 *
3425 * Return: reo destination ring index
3426 */
3427static enum cdp_host_reo_dest_ring
3428dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
3429{
3430 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3431
3432 if (pdev)
3433 return pdev->reo_dest;
3434 else
3435 return cdp_host_reo_dest_ring_unknown;
3436}
3437
Bharat Bhushan Chakravarty145d3932017-03-20 12:52:16 -07003438#ifdef QCA_SUPPORT_SON
3439static void dp_son_peer_authorize(struct dp_peer *peer)
3440{
3441 struct dp_soc *soc;
3442 soc = peer->vdev->pdev->soc;
3443 peer->peer_bs_inact_flag = 0;
3444 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3445 return;
3446}
3447#else
3448static void dp_son_peer_authorize(struct dp_peer *peer)
3449{
3450 return;
3451}
3452#endif
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05303453/*
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05303454 * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
3455 * @pdev_handle: device object
3456 * @val: value to be set
3457 *
3458 * Return: void
3459 */
3460static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3461 uint32_t val)
3462{
3463 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3464
3465 /* Enable/Disable smart mesh filtering. This flag will be checked
3466 * during rx processing to check if packets are from NAC clients.
3467 */
3468 pdev->filter_neighbour_peers = val;
3469 return 0;
3470}
3471
3472/*
3473 * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
3474 * address for smart mesh filtering
3475 * @pdev_handle: device object
3476 * @cmd: Add/Del command
3477 * @macaddr: nac client mac address
3478 *
3479 * Return: void
3480 */
3481static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
3482 uint32_t cmd, uint8_t *macaddr)
3483{
3484 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3485 struct dp_neighbour_peer *peer = NULL;
3486
3487 if (!macaddr)
3488 goto fail0;
3489
3490 /* Store address of NAC (neighbour peer) which will be checked
3491 * against TA of received packets.
3492 */
3493 if (cmd == DP_NAC_PARAM_ADD) {
3494 peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
3495 sizeof(*peer));
3496
3497 if (!peer) {
3498 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3499 FL("DP neighbour peer node memory allocation failed"));
3500 goto fail0;
3501 }
3502
3503 qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
3504 macaddr, DP_MAC_ADDR_LEN);
3505
3506
3507 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3508 /* add this neighbour peer into the list */
3509 TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
3510 neighbour_peer_list_elem);
3511 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3512
3513 return 1;
3514
3515 } else if (cmd == DP_NAC_PARAM_DEL) {
3516 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
3517 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
3518 neighbour_peer_list_elem) {
3519 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
3520 macaddr, DP_MAC_ADDR_LEN)) {
3521 /* delete this peer from the list */
3522 TAILQ_REMOVE(&pdev->neighbour_peers_list,
3523 peer, neighbour_peer_list_elem);
3524 qdf_mem_free(peer);
3525 break;
3526 }
3527 }
3528 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
3529
3530 return 1;
3531
3532 }
3533
3534fail0:
3535 return 0;
3536}
3537
3538/*
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05303539 * dp_get_sec_type() - Get the security type
3540 * @peer: Datapath peer handle
3541 * @sec_idx: Security id (mcast, ucast)
3542 *
3543 * return sec_type: Security type
3544 */
3545static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
3546{
3547 struct dp_peer *dpeer = (struct dp_peer *)peer;
3548
3549 return dpeer->security[sec_idx].sec_type;
3550}
3551
3552/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003553 * dp_peer_authorize() - authorize txrx peer
3554 * @peer_handle: Datapath peer handle
3555 * @authorize
3556 *
3557 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05303558static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003559{
3560 struct dp_peer *peer = (struct dp_peer *)peer_handle;
3561 struct dp_soc *soc;
3562
3563 if (peer != NULL) {
3564 soc = peer->vdev->pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003565 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Bharat Bhushan Chakravarty145d3932017-03-20 12:52:16 -07003566 dp_son_peer_authorize(peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003567 peer->authorize = authorize ? 1 : 0;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003568 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3569 }
3570}
3571
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05303572#ifdef QCA_SUPPORT_SON
3573/*
3574 * dp_txrx_update_inact_threshold() - Update inact timer threshold
3575 * @pdev_handle: Device handle
3576 * @new_threshold : updated threshold value
3577 *
3578 */
3579static void
3580dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle,
3581 u_int16_t new_threshold)
3582{
3583 struct dp_vdev *vdev;
3584 struct dp_peer *peer;
3585 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3586 struct dp_soc *soc = pdev->soc;
3587 u_int16_t old_threshold = soc->pdev_bs_inact_reload;
3588
3589 if (old_threshold == new_threshold)
3590 return;
3591
3592 soc->pdev_bs_inact_reload = new_threshold;
3593
3594 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3595 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3596 if (vdev->opmode != wlan_op_mode_ap)
3597 continue;
3598
3599 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3600 if (!peer->authorize)
3601 continue;
3602
3603 if (old_threshold - peer->peer_bs_inact >=
3604 new_threshold) {
3605 dp_mark_peer_inact((void *)peer, true);
3606 peer->peer_bs_inact = 0;
3607 } else {
3608 peer->peer_bs_inact = new_threshold -
3609 (old_threshold - peer->peer_bs_inact);
3610 }
3611 }
3612 }
3613 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3614}
3615
3616/**
3617 * dp_txrx_reset_inact_count(): Reset inact count
3618 * @pdev_handle - device handle
3619 *
3620 * Return: void
3621 */
3622static void
3623dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle)
3624{
3625 struct dp_vdev *vdev = NULL;
3626 struct dp_peer *peer = NULL;
3627 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3628 struct dp_soc *soc = pdev->soc;
3629
3630 qdf_spin_lock_bh(&soc->peer_ref_mutex);
3631 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3632 if (vdev->opmode != wlan_op_mode_ap)
3633 continue;
3634
3635 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3636 if (!peer->authorize)
3637 continue;
3638
3639 peer->peer_bs_inact = soc->pdev_bs_inact_reload;
3640 }
3641 }
3642 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3643}
3644
3645/**
3646 * dp_set_inact_params(): set inactivity params
3647 * @pdev_handle - device handle
3648 * @inact_check_interval - inactivity interval
3649 * @inact_normal - Inactivity normal
3650 * @inact_overload - Inactivity overload
3651 *
3652 * Return: bool
3653 */
3654bool dp_set_inact_params(struct cdp_pdev *pdev_handle,
3655 u_int16_t inact_check_interval,
3656 u_int16_t inact_normal, u_int16_t inact_overload)
3657{
3658 struct dp_soc *soc;
3659 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3660
3661 if (!pdev)
3662 return false;
3663
3664 soc = pdev->soc;
3665 if (!soc)
3666 return false;
3667
3668 soc->pdev_bs_inact_interval = inact_check_interval;
3669 soc->pdev_bs_inact_normal = inact_normal;
3670 soc->pdev_bs_inact_overload = inact_overload;
3671
3672 dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
3673 soc->pdev_bs_inact_normal);
3674
3675 return true;
3676}
3677
3678/**
3679 * dp_start_inact_timer(): Inactivity timer start
3680 * @pdev_handle - device handle
3681 * @enable - Inactivity timer start/stop
3682 *
3683 * Return: bool
3684 */
3685bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable)
3686{
3687 struct dp_soc *soc;
3688 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3689
3690 if (!pdev)
3691 return false;
3692
3693 soc = pdev->soc;
3694 if (!soc)
3695 return false;
3696
3697 if (enable) {
3698 dp_txrx_reset_inact_count((struct cdp_pdev *)pdev);
3699 qdf_timer_mod(&soc->pdev_bs_inact_timer,
3700 soc->pdev_bs_inact_interval * 1000);
3701 } else {
3702 qdf_timer_stop(&soc->pdev_bs_inact_timer);
3703 }
3704
3705 return true;
3706}
3707
3708/**
3709 * dp_set_overload(): Set inactivity overload
3710 * @pdev_handle - device handle
3711 * @overload - overload status
3712 *
3713 * Return: void
3714 */
3715void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload)
3716{
3717 struct dp_soc *soc;
3718 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3719
3720 if (!pdev)
3721 return;
3722
3723 soc = pdev->soc;
3724 if (!soc)
3725 return;
3726
3727 dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev,
3728 overload ? soc->pdev_bs_inact_overload :
3729 soc->pdev_bs_inact_normal);
3730}
3731
3732/**
3733 * dp_peer_is_inact(): check whether peer is inactive
3734 * @peer_handle - datapath peer handle
3735 *
3736 * Return: bool
3737 */
3738bool dp_peer_is_inact(void *peer_handle)
3739{
3740 struct dp_peer *peer = (struct dp_peer *)peer_handle;
3741
3742 if (!peer)
3743 return false;
3744
3745 return peer->peer_bs_inact_flag == 1;
3746}
3747
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05303748/**
3749 * dp_init_inact_timer: initialize the inact timer
3750 * @soc - SOC handle
3751 *
3752 * Return: void
3753 */
3754void dp_init_inact_timer(struct dp_soc *soc)
3755{
3756 qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer,
3757 dp_txrx_peer_find_inact_timeout_handler,
3758 (void *)soc, QDF_TIMER_TYPE_WAKE_APPS);
3759}
3760
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05303761#else
3762
3763bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval,
3764 u_int16_t inact_normal, u_int16_t inact_overload)
3765{
3766 return false;
3767}
3768
3769bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable)
3770{
3771 return false;
3772}
3773
3774void dp_set_overload(struct cdp_pdev *pdev, bool overload)
3775{
3776 return;
3777}
3778
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05303779void dp_init_inact_timer(struct dp_soc *soc)
3780{
3781 return;
3782}
3783
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05303784bool dp_peer_is_inact(void *peer)
3785{
3786 return false;
3787}
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05303788#endif
3789
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003790/*
3791 * dp_peer_unref_delete() - unref and delete peer
3792 * @peer_handle: Datapath peer handle
3793 *
3794 */
3795void dp_peer_unref_delete(void *peer_handle)
3796{
3797 struct dp_peer *peer = (struct dp_peer *)peer_handle;
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05303798 struct dp_peer *bss_peer = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003799 struct dp_vdev *vdev = peer->vdev;
Kiran Venkatappa9edb9612017-03-16 11:37:35 +05303800 struct dp_pdev *pdev = vdev->pdev;
3801 struct dp_soc *soc = pdev->soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003802 struct dp_peer *tmppeer;
3803 int found = 0;
3804 uint16_t peer_id;
3805
3806 /*
3807 * Hold the lock all the way from checking if the peer ref count
3808 * is zero until the peer references are removed from the hash
3809 * table and vdev list (if the peer ref count is zero).
3810 * This protects against a new HL tx operation starting to use the
3811 * peer object just after this function concludes it's done being used.
3812 * Furthermore, the lock needs to be held while checking whether the
3813 * vdev's list of peers is empty, to make sure that list is not modified
3814 * concurrently with the empty check.
3815 */
3816 qdf_spin_lock_bh(&soc->peer_ref_mutex);
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08003817 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003818 "%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
Krishna Kumaar Natarajan71e5b832017-01-26 08:04:13 -08003819 peer, qdf_atomic_read(&peer->ref_cnt));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003820 if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
3821 peer_id = peer->peer_ids[0];
3822
3823 /*
3824 * Make sure that the reference to the peer in
3825 * peer object map is removed
3826 */
3827 if (peer_id != HTT_INVALID_PEER)
3828 soc->peer_id_to_obj_map[peer_id] = NULL;
3829
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303830 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003831 "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003832
3833 /* remove the reference to the peer from the hash table */
3834 dp_peer_find_hash_remove(soc, peer);
3835
3836 TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
3837 if (tmppeer == peer) {
3838 found = 1;
3839 break;
3840 }
3841 }
3842 if (found) {
3843 TAILQ_REMOVE(&peer->vdev->peer_list, peer,
3844 peer_list_elem);
3845 } else {
3846 /*Ignoring the remove operation as peer not found*/
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303847 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003848 "peer %pK not found in vdev (%pK)->peer_list:%pK",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003849 peer, vdev, &peer->vdev->peer_list);
3850 }
3851
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08003852 /* cleanup the peer data */
3853 dp_peer_cleanup(vdev, peer);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003854
3855 /* check whether the parent vdev has no peers left */
3856 if (TAILQ_EMPTY(&vdev->peer_list)) {
3857 /*
3858 * Now that there are no references to the peer, we can
3859 * release the peer reference lock.
3860 */
3861 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3862 /*
3863 * Check if the parent vdev was waiting for its peers
3864 * to be deleted, in order for it to be deleted too.
3865 */
3866 if (vdev->delete.pending) {
3867 ol_txrx_vdev_delete_cb vdev_delete_cb =
3868 vdev->delete.callback;
3869 void *vdev_delete_context =
3870 vdev->delete.context;
3871
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303872 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003873 QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003874 FL("deleting vdev object %pK (%pM)"
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303875 " - its last peer is done"),
3876 vdev, vdev->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003877 /* all peers are gone, go ahead and delete it */
chenguo7853b792017-12-28 20:59:12 +08003878 dp_tx_flow_pool_unmap_handler(pdev, vdev->vdev_id,
3879 FLOW_TYPE_VDEV,
3880 vdev->vdev_id);
3881 dp_tx_vdev_detach(vdev);
3882 QDF_TRACE(QDF_MODULE_ID_DP,
3883 QDF_TRACE_LEVEL_INFO_HIGH,
3884 FL("deleting vdev object %pK (%pM)"),
3885 vdev, vdev->mac_addr.raw);
3886
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003887 qdf_mem_free(vdev);
3888 if (vdev_delete_cb)
3889 vdev_delete_cb(vdev_delete_context);
3890 }
3891 } else {
3892 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3893 }
chenguo1dead6f2018-01-08 14:51:44 +08003894
3895 if (soc->cdp_soc.ol_ops->peer_unref_delete) {
3896 soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
3897 vdev->vdev_id, peer->mac_addr.raw);
3898 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003899#ifdef notyet
3900 qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
3901#else
Tallapragada Kalyan53f9e392018-01-09 14:27:23 +05303902 if (!vdev || !vdev->vap_bss_peer)
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05303903 goto free_peer;
3904
3905 bss_peer = vdev->vap_bss_peer;
3906 DP_UPDATE_STATS(bss_peer, peer);
3907
3908free_peer:
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003909 qdf_mem_free(peer);
3910#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003911 } else {
3912 qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3913 }
3914}
3915
3916/*
3917 * dp_peer_detach_wifi3() – Detach txrx peer
Naveen Rawat761329b2017-09-19 10:30:11 -07003918 * @peer_handle: Datapath peer handle
3919 * @bitmap: bitmap indicating special handling of request.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003920 *
3921 */
Naveen Rawat761329b2017-09-19 10:30:11 -07003922static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003923{
3924 struct dp_peer *peer = (struct dp_peer *)peer_handle;
3925
3926 /* redirect the peer's rx delivery function to point to a
3927 * discard func
3928 */
3929 peer->rx_opt_proc = dp_rx_discard;
3930
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05303931 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
Jeff Johnson3f217e22017-09-18 10:13:35 -07003932 FL("peer %pK (%pM)"), peer, peer->mac_addr.raw);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003933
Krishna Kumaar Natarajan604fe162017-01-28 18:37:01 -08003934#ifndef CONFIG_WIN
3935 dp_local_peer_id_free(peer->vdev->pdev, peer);
3936#endif
3937 qdf_spinlock_destroy(&peer->peer_info_lock);
3938
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003939 /*
3940 * Remove the reference added during peer_attach.
3941 * The peer will still be left allocated until the
3942 * PEER_UNMAP message arrives to remove the other
3943 * reference, added by the PEER_MAP message.
3944 */
3945 dp_peer_unref_delete(peer_handle);
Leo Chang5ea93a42016-11-03 12:39:49 -07003946}
3947
3948/*
3949 * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
3950 * @peer_handle: Datapath peer handle
3951 *
3952 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003953static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07003954{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003955 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07003956 return vdev->mac_addr.raw;
3957}
3958
3959/*
Karunakar Dasinenica792542017-01-16 10:08:58 -08003960 * dp_vdev_set_wds() - Enable per packet stats
3961 * @vdev_handle: DP VDEV handle
3962 * @val: value
3963 *
3964 * Return: none
3965 */
3966static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
3967{
3968 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
3969
3970 vdev->wds_enabled = val;
3971 return 0;
3972}
3973
3974/*
Leo Chang5ea93a42016-11-03 12:39:49 -07003975 * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
3976 * @peer_handle: Datapath peer handle
3977 *
3978 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003979static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
3980 uint8_t vdev_id)
Leo Chang5ea93a42016-11-03 12:39:49 -07003981{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003982 struct dp_pdev *pdev = (struct dp_pdev *)dev;
Leo Chang5ea93a42016-11-03 12:39:49 -07003983 struct dp_vdev *vdev = NULL;
3984
3985 if (qdf_unlikely(!pdev))
3986 return NULL;
3987
3988 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3989 if (vdev->vdev_id == vdev_id)
3990 break;
3991 }
3992
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003993 return (struct cdp_vdev *)vdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07003994}
3995
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003996static int dp_get_opmode(struct cdp_vdev *vdev_handle)
Leo Chang5ea93a42016-11-03 12:39:49 -07003997{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08003998 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Leo Chang5ea93a42016-11-03 12:39:49 -07003999
4000 return vdev->opmode;
4001}
4002
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004003static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
Leo Chang5ea93a42016-11-03 12:39:49 -07004004{
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004005 struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
Leo Chang5ea93a42016-11-03 12:39:49 -07004006 struct dp_pdev *pdev = vdev->pdev;
4007
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08004008 return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
Leo Chang5ea93a42016-11-03 12:39:49 -07004009}
Kai Chen6eca1a62017-01-12 10:17:53 -08004010/**
sumedh baikady84613b02017-09-19 16:36:14 -07004011 * dp_reset_monitor_mode() - Disable monitor mode
4012 * @pdev_handle: Datapath PDEV handle
4013 *
4014 * Return: 0 on success, not 0 on failure
4015 */
4016static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
4017{
4018 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4019 struct htt_rx_ring_tlv_filter htt_tlv_filter;
4020 struct dp_soc *soc;
4021 uint8_t pdev_id;
4022
4023 pdev_id = pdev->pdev_id;
4024 soc = pdev->soc;
4025
4026 pdev->monitor_vdev = NULL;
4027 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4028
4029 htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4030 pdev->rxdma_mon_buf_ring.hal_srng,
4031 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4032
4033 htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4034 pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4035 RX_BUFFER_SIZE, &htt_tlv_filter);
4036
4037 return 0;
4038}
4039/**
Kai Chen6eca1a62017-01-12 10:17:53 -08004040 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
4041 * @vdev_handle: Datapath VDEV handle
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304042 * @smart_monitor: Flag to denote if its smart monitor mode
Kai Chen6eca1a62017-01-12 10:17:53 -08004043 *
4044 * Return: 0 on success, not 0 on failure
4045 */
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304046static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
4047 uint8_t smart_monitor)
Kai Chen6eca1a62017-01-12 10:17:53 -08004048{
4049 /* Many monitor VAPs can exists in a system but only one can be up at
4050 * anytime
4051 */
4052 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4053 struct dp_pdev *pdev;
4054 struct htt_rx_ring_tlv_filter htt_tlv_filter;
4055 struct dp_soc *soc;
4056 uint8_t pdev_id;
4057
4058 qdf_assert(vdev);
4059
4060 pdev = vdev->pdev;
4061 pdev_id = pdev->pdev_id;
4062 soc = pdev->soc;
4063
4064 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004065 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
Kai Chen6eca1a62017-01-12 10:17:53 -08004066 pdev, pdev_id, soc, vdev);
4067
4068 /*Check if current pdev's monitor_vdev exists */
4069 if (pdev->monitor_vdev) {
4070 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07004071 "vdev=%pK\n", vdev);
Kai Chen6eca1a62017-01-12 10:17:53 -08004072 qdf_assert(vdev);
4073 }
4074
4075 pdev->monitor_vdev = vdev;
4076
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304077 /* If smart monitor mode, do not configure monitor ring */
4078 if (smart_monitor)
4079 return QDF_STATUS_SUCCESS;
4080
nobeljd124b742017-10-16 11:59:12 -07004081 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4082 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4083 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4084 pdev->fp_ctrl_filter, pdev->fp_data_filter,
4085 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4086 pdev->mo_data_filter);
4087
Kai Chen6eca1a62017-01-12 10:17:53 -08004088 htt_tlv_filter.mpdu_start = 1;
4089 htt_tlv_filter.msdu_start = 1;
4090 htt_tlv_filter.packet = 1;
4091 htt_tlv_filter.msdu_end = 1;
4092 htt_tlv_filter.mpdu_end = 1;
4093 htt_tlv_filter.packet_header = 1;
4094 htt_tlv_filter.attention = 1;
4095 htt_tlv_filter.ppdu_start = 0;
4096 htt_tlv_filter.ppdu_end = 0;
4097 htt_tlv_filter.ppdu_end_user_stats = 0;
4098 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4099 htt_tlv_filter.ppdu_end_status_done = 0;
sumedh baikady308ff002017-09-18 16:24:36 -07004100 htt_tlv_filter.header_per_msdu = 1;
nobeljd124b742017-10-16 11:59:12 -07004101 htt_tlv_filter.enable_fp =
4102 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
Kai Chen6eca1a62017-01-12 10:17:53 -08004103 htt_tlv_filter.enable_md = 0;
nobeljd124b742017-10-16 11:59:12 -07004104 htt_tlv_filter.enable_mo =
4105 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4106 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4107 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4108 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4109 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4110 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4111 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
Kai Chen6eca1a62017-01-12 10:17:53 -08004112
4113 htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
Karunakar Dasineni40555682017-03-26 22:44:39 -07004114 pdev->rxdma_mon_buf_ring.hal_srng,
4115 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
Kai Chen6eca1a62017-01-12 10:17:53 -08004116
4117 htt_tlv_filter.mpdu_start = 1;
4118 htt_tlv_filter.msdu_start = 1;
4119 htt_tlv_filter.packet = 0;
4120 htt_tlv_filter.msdu_end = 1;
4121 htt_tlv_filter.mpdu_end = 1;
4122 htt_tlv_filter.packet_header = 1;
4123 htt_tlv_filter.attention = 1;
4124 htt_tlv_filter.ppdu_start = 1;
4125 htt_tlv_filter.ppdu_end = 1;
4126 htt_tlv_filter.ppdu_end_user_stats = 1;
4127 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4128 htt_tlv_filter.ppdu_end_status_done = 1;
sumedh baikady308ff002017-09-18 16:24:36 -07004129 htt_tlv_filter.header_per_msdu = 0;
nobeljd124b742017-10-16 11:59:12 -07004130 htt_tlv_filter.enable_fp =
4131 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
Karunakar Dasineni40555682017-03-26 22:44:39 -07004132 htt_tlv_filter.enable_md = 0;
nobeljd124b742017-10-16 11:59:12 -07004133 htt_tlv_filter.enable_mo =
4134 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4135 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4136 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4137 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4138 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4139 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4140 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4141
4142 htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4143 pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4144 RX_BUFFER_SIZE, &htt_tlv_filter);
4145
4146 return QDF_STATUS_SUCCESS;
4147}
4148
4149/**
4150 * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
4151 * @pdev_handle: Datapath PDEV handle
4152 * @filter_val: Flag to select Filter for monitor mode
4153 * Return: 0 on success, not 0 on failure
4154 */
4155static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
4156 struct cdp_monitor_filter *filter_val)
4157{
4158 /* Many monitor VAPs can exists in a system but only one can be up at
4159 * anytime
4160 */
4161 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
4162 struct dp_vdev *vdev = pdev->monitor_vdev;
4163 struct htt_rx_ring_tlv_filter htt_tlv_filter;
4164 struct dp_soc *soc;
4165 uint8_t pdev_id;
4166
4167 pdev_id = pdev->pdev_id;
4168 soc = pdev->soc;
4169
4170 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
4171 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
4172 pdev, pdev_id, soc, vdev);
4173
4174 /*Check if current pdev's monitor_vdev exists */
4175 if (!pdev->monitor_vdev) {
4176 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4177 "vdev=%pK\n", vdev);
4178 qdf_assert(vdev);
4179 }
4180
4181 /* update filter mode, type in pdev structure */
4182 pdev->mon_filter_mode = filter_val->mode;
4183 pdev->fp_mgmt_filter = filter_val->fp_mgmt;
4184 pdev->fp_ctrl_filter = filter_val->fp_ctrl;
4185 pdev->fp_data_filter = filter_val->fp_data;
4186 pdev->mo_mgmt_filter = filter_val->mo_mgmt;
4187 pdev->mo_ctrl_filter = filter_val->mo_ctrl;
4188 pdev->mo_data_filter = filter_val->mo_data;
4189
4190 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
4191 "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
4192 pdev->mon_filter_mode, pdev->fp_mgmt_filter,
4193 pdev->fp_ctrl_filter, pdev->fp_data_filter,
4194 pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
4195 pdev->mo_data_filter);
4196
4197 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
4198
4199 htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4200 pdev->rxdma_mon_buf_ring.hal_srng,
4201 RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
4202
4203 htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4204 pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4205 RX_BUFFER_SIZE, &htt_tlv_filter);
4206
4207 htt_tlv_filter.mpdu_start = 1;
4208 htt_tlv_filter.msdu_start = 1;
4209 htt_tlv_filter.packet = 1;
4210 htt_tlv_filter.msdu_end = 1;
4211 htt_tlv_filter.mpdu_end = 1;
4212 htt_tlv_filter.packet_header = 1;
4213 htt_tlv_filter.attention = 1;
4214 htt_tlv_filter.ppdu_start = 0;
4215 htt_tlv_filter.ppdu_end = 0;
4216 htt_tlv_filter.ppdu_end_user_stats = 0;
4217 htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4218 htt_tlv_filter.ppdu_end_status_done = 0;
4219 htt_tlv_filter.header_per_msdu = 1;
4220 htt_tlv_filter.enable_fp =
4221 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4222 htt_tlv_filter.enable_md = 0;
4223 htt_tlv_filter.enable_mo =
4224 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4225 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4226 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4227 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4228 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4229 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4230 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
4231
4232 htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4233 pdev->rxdma_mon_buf_ring.hal_srng, RXDMA_MONITOR_BUF,
4234 RX_BUFFER_SIZE, &htt_tlv_filter);
4235
4236 htt_tlv_filter.mpdu_start = 1;
4237 htt_tlv_filter.msdu_start = 1;
4238 htt_tlv_filter.packet = 0;
4239 htt_tlv_filter.msdu_end = 1;
4240 htt_tlv_filter.mpdu_end = 1;
4241 htt_tlv_filter.packet_header = 1;
4242 htt_tlv_filter.attention = 1;
4243 htt_tlv_filter.ppdu_start = 1;
4244 htt_tlv_filter.ppdu_end = 1;
4245 htt_tlv_filter.ppdu_end_user_stats = 1;
4246 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
4247 htt_tlv_filter.ppdu_end_status_done = 1;
4248 htt_tlv_filter.header_per_msdu = 0;
4249 htt_tlv_filter.enable_fp =
4250 (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
4251 htt_tlv_filter.enable_md = 0;
4252 htt_tlv_filter.enable_mo =
4253 (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
4254 htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
4255 htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
4256 htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
4257 htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
4258 htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
4259 htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
Karunakar Dasineni40555682017-03-26 22:44:39 -07004260
4261 htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
4262 pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
4263 RX_BUFFER_SIZE, &htt_tlv_filter);
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05304264
Kai Chen6eca1a62017-01-12 10:17:53 -08004265 return QDF_STATUS_SUCCESS;
4266}
Leo Chang5ea93a42016-11-03 12:39:49 -07004267
nobeljc8eb4d62018-01-04 14:29:32 -08004268/**
4269 * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
4270 * @vdev_handle: Datapath VDEV handle
4271 * Return: true on ucast filter flag set
4272 */
4273static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
4274{
4275 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4276 struct dp_pdev *pdev;
4277
4278 pdev = vdev->pdev;
4279
4280 if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
4281 (pdev->mo_data_filter & FILTER_DATA_UCAST))
4282 return true;
4283
4284 return false;
4285}
4286
4287/**
4288 * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
4289 * @vdev_handle: Datapath VDEV handle
4290 * Return: true on mcast filter flag set
4291 */
4292static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
4293{
4294 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4295 struct dp_pdev *pdev;
4296
4297 pdev = vdev->pdev;
4298
4299 if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
4300 (pdev->mo_data_filter & FILTER_DATA_MCAST))
4301 return true;
4302
4303 return false;
4304}
4305
4306/**
4307 * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
4308 * @vdev_handle: Datapath VDEV handle
4309 * Return: true on non data filter flag set
4310 */
4311static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
4312{
4313 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4314 struct dp_pdev *pdev;
4315
4316 pdev = vdev->pdev;
4317
4318 if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
4319 (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
4320 if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
4321 (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
4322 return true;
4323 }
4324 }
4325
4326 return false;
4327}
4328
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05304329#ifdef MESH_MODE_SUPPORT
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05304330void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05304331{
4332 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4333
4334 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Venkateswara Swamy Bandaru5caa83a2017-03-06 11:33:15 +05304335 FL("val %d"), val);
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05304336 vdev->mesh_vdev = val;
4337}
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05304338
4339/*
4340 * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
4341 * @vdev_hdl: virtual device object
4342 * @val: value to be set
4343 *
4344 * Return: void
4345 */
4346void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
4347{
4348 struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
4349
4350 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4351 FL("val %d"), val);
4352 vdev->mesh_rx_filter = val;
4353}
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05304354#endif
4355
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05304356/*
4357 * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
4358 * Current scope is bar recieved count
4359 *
4360 * @pdev_handle: DP_PDEV handle
4361 *
4362 * Return: void
4363 */
4364#define STATS_PROC_TIMEOUT (HZ/10)
4365
4366static void
4367dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
4368{
4369 struct dp_vdev *vdev;
4370 struct dp_peer *peer;
4371 uint32_t waitcnt;
4372
4373 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4374 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4375 if (!peer) {
4376 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4377 FL("DP Invalid Peer refernce"));
4378 return;
4379 }
4380 waitcnt = 0;
4381 dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05304382 while (!(qdf_atomic_read(&(pdev->stats_cmd_complete)))
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05304383 && waitcnt < 10) {
4384 schedule_timeout_interruptible(
4385 STATS_PROC_TIMEOUT);
4386 waitcnt++;
4387 }
Prathyusha Guduri184b6402018-02-04 23:01:49 +05304388 qdf_atomic_set(&(pdev->stats_cmd_complete), 0);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05304389 }
4390 }
4391}
4392
4393/**
4394 * dp_rx_bar_stats_cb(): BAR received stats callback
4395 * @soc: SOC handle
4396 * @cb_ctxt: Call back context
4397 * @reo_status: Reo status
4398 *
4399 * return: void
4400 */
4401void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
4402 union hal_reo_status *reo_status)
4403{
4404 struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
4405 struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
4406
4407 if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
4408 DP_TRACE_STATS(FATAL, "REO stats failure %d \n",
4409 queue_status->header.status);
Prathyusha Guduri184b6402018-02-04 23:01:49 +05304410 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05304411 return;
4412 }
4413
4414 pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
Prathyusha Guduri184b6402018-02-04 23:01:49 +05304415 qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05304416
4417}
4418
Ishank Jain1e7401c2017-02-17 15:38:39 +05304419/**
4420 * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
4421 * @vdev: DP VDEV handle
4422 *
4423 * return: void
4424 */
4425void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
4426{
4427 struct dp_peer *peer = NULL;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304428 struct dp_soc *soc = vdev->pdev->soc;
Ishank Jain1e7401c2017-02-17 15:38:39 +05304429
4430 qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
4431 qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
4432
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304433 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
4434 DP_UPDATE_STATS(vdev, peer);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304435
psimhafb49db32017-08-31 15:33:33 -07004436 if (soc->cdp_soc.ol_ops->update_dp_stats)
4437 soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304438 &vdev->stats, (uint16_t) vdev->vdev_id,
4439 UPDATE_VDEV_STATS);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07004440
Ishank Jain1e7401c2017-02-17 15:38:39 +05304441}
4442
4443/**
4444 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
4445 * @pdev: DP PDEV handle
4446 *
4447 * return: void
4448 */
4449static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
4450{
4451 struct dp_vdev *vdev = NULL;
Pranita Solankea38c7a32018-01-04 10:50:59 +05304452 struct dp_soc *soc = pdev->soc;
Ishank Jain1e7401c2017-02-17 15:38:39 +05304453
4454 qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
4455 qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
4456 qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
4457
4458 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05304459
Ishank Jain1e7401c2017-02-17 15:38:39 +05304460 dp_aggregate_vdev_stats(vdev);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304461 DP_UPDATE_STATS(pdev, vdev);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304462
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304463 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304464
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304465 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
4466 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
4467 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
4468 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
4469 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
4470 DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
4471 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
4472 DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
4473 DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
4474 DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
4475 DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
4476 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
4477 DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
4478 DP_STATS_AGGR(pdev, vdev,
4479 tx_i.mcast_en.dropped_map_error);
4480 DP_STATS_AGGR(pdev, vdev,
4481 tx_i.mcast_en.dropped_self_mac);
4482 DP_STATS_AGGR(pdev, vdev,
4483 tx_i.mcast_en.dropped_send_fail);
4484 DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
4485 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
4486 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
4487 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
4488 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
4489 DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
4490 DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified);
4491 DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05304492 DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw);
4493 DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304494
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304495 pdev->stats.tx_i.dropped.dropped_pkt.num =
4496 pdev->stats.tx_i.dropped.dma_error +
4497 pdev->stats.tx_i.dropped.ring_full +
4498 pdev->stats.tx_i.dropped.enqueue_fail +
4499 pdev->stats.tx_i.dropped.desc_na +
4500 pdev->stats.tx_i.dropped.res_full;
Ishank Jain1e7401c2017-02-17 15:38:39 +05304501
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304502 pdev->stats.tx.last_ack_rssi =
4503 vdev->stats.tx.last_ack_rssi;
4504 pdev->stats.tx_i.tso.num_seg =
4505 vdev->stats.tx_i.tso.num_seg;
Ishank Jain1e7401c2017-02-17 15:38:39 +05304506 }
Pranita Solankea38c7a32018-01-04 10:50:59 +05304507 if (soc->cdp_soc.ol_ops->update_dp_stats)
4508 soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
4509 &pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS);
Tallapragada Kalyan4f894922018-01-03 14:26:28 +05304510
Ishank Jain1e7401c2017-02-17 15:38:39 +05304511}
4512
4513/**
4514 * dp_print_pdev_tx_stats(): Print Pdev level TX stats
4515 * @pdev: DP_PDEV Handle
4516 *
4517 * Return:void
4518 */
4519static inline void
4520dp_print_pdev_tx_stats(struct dp_pdev *pdev)
4521{
Soumya Bhat0d6245c2018-02-08 21:02:57 +05304522 uint8_t index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304523 DP_PRINT_STATS("PDEV Tx Stats:\n");
4524 DP_PRINT_STATS("Received From Stack:");
4525 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304526 pdev->stats.tx_i.rcvd.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304527 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304528 pdev->stats.tx_i.rcvd.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304529 DP_PRINT_STATS("Processed:");
4530 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304531 pdev->stats.tx_i.processed.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304532 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304533 pdev->stats.tx_i.processed.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304534 DP_PRINT_STATS("Completions:");
4535 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304536 pdev->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304537 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304538 pdev->stats.tx.comp_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304539 DP_PRINT_STATS("Dropped:");
4540 DP_PRINT_STATS(" Total = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304541 pdev->stats.tx_i.dropped.dropped_pkt.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304542 DP_PRINT_STATS(" Dma_map_error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304543 pdev->stats.tx_i.dropped.dma_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304544 DP_PRINT_STATS(" Ring Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304545 pdev->stats.tx_i.dropped.ring_full);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304546 DP_PRINT_STATS(" Descriptor Not available = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304547 pdev->stats.tx_i.dropped.desc_na);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304548 DP_PRINT_STATS(" HW enqueue failed= %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304549 pdev->stats.tx_i.dropped.enqueue_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304550 DP_PRINT_STATS(" Resources Full = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304551 pdev->stats.tx_i.dropped.res_full);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304552 DP_PRINT_STATS(" FW removed = %d",
4553 pdev->stats.tx.dropped.fw_rem);
4554 DP_PRINT_STATS(" FW removed transmitted = %d",
4555 pdev->stats.tx.dropped.fw_rem_tx);
4556 DP_PRINT_STATS(" FW removed untransmitted = %d",
4557 pdev->stats.tx.dropped.fw_rem_notx);
4558 DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d",
4559 pdev->stats.tx.dropped.age_out);
4560 DP_PRINT_STATS("Scatter Gather:");
4561 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304562 pdev->stats.tx_i.sg.sg_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304563 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304564 pdev->stats.tx_i.sg.sg_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304565 DP_PRINT_STATS(" Dropped By Host = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304566 pdev->stats.tx_i.sg.dropped_host);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304567 DP_PRINT_STATS(" Dropped By Target = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304568 pdev->stats.tx_i.sg.dropped_target);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304569 DP_PRINT_STATS("TSO:");
4570 DP_PRINT_STATS(" Number of Segments = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304571 pdev->stats.tx_i.tso.num_seg);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304572 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304573 pdev->stats.tx_i.tso.tso_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304574 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304575 pdev->stats.tx_i.tso.tso_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304576 DP_PRINT_STATS(" Dropped By Host = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304577 pdev->stats.tx_i.tso.dropped_host);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304578 DP_PRINT_STATS("Mcast Enhancement:");
4579 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304580 pdev->stats.tx_i.mcast_en.mcast_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304581 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05304582 pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304583 DP_PRINT_STATS(" Dropped: Map Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304584 pdev->stats.tx_i.mcast_en.dropped_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304585 DP_PRINT_STATS(" Dropped: Self Mac = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304586 pdev->stats.tx_i.mcast_en.dropped_self_mac);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304587 DP_PRINT_STATS(" Dropped: Send Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304588 pdev->stats.tx_i.mcast_en.dropped_send_fail);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304589 DP_PRINT_STATS(" Unicast sent = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304590 pdev->stats.tx_i.mcast_en.ucast);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304591 DP_PRINT_STATS("Raw:");
4592 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304593 pdev->stats.tx_i.raw.raw_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304594 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05304595 pdev->stats.tx_i.raw.raw_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304596 DP_PRINT_STATS(" DMA map error = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304597 pdev->stats.tx_i.raw.dma_map_error);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304598 DP_PRINT_STATS("Reinjected:");
4599 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304600 pdev->stats.tx_i.reinject_pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304601 DP_PRINT_STATS("Bytes = %llu\n",
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304602 pdev->stats.tx_i.reinject_pkts.bytes);
4603 DP_PRINT_STATS("Inspected:");
4604 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304605 pdev->stats.tx_i.inspect_pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304606 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05304607 pdev->stats.tx_i.inspect_pkts.bytes);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05304608 DP_PRINT_STATS("Nawds Multicast:");
4609 DP_PRINT_STATS(" Packets = %d",
4610 pdev->stats.tx_i.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304611 DP_PRINT_STATS(" Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05304612 pdev->stats.tx_i.nawds_mcast.bytes);
Ruchi, Agrawal34721392017-11-13 18:02:09 +05304613 DP_PRINT_STATS("CCE Classified:");
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05304614 DP_PRINT_STATS(" CCE Classified Packets: %u",
Ruchi, Agrawal34721392017-11-13 18:02:09 +05304615 pdev->stats.tx_i.cce_classified);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05304616 DP_PRINT_STATS(" RAW CCE Classified Packets: %u",
Ruchi, Agrawal4c1468f2017-12-08 00:04:33 +05304617 pdev->stats.tx_i.cce_classified_raw);
Venkateswara Swamy Bandaru58c80852018-01-29 17:52:02 +05304618 DP_PRINT_STATS("Mesh stats:");
4619 DP_PRINT_STATS(" frames to firmware: %u",
4620 pdev->stats.tx_i.mesh.exception_fw);
4621 DP_PRINT_STATS(" completions from fw: %u",
4622 pdev->stats.tx_i.mesh.completion_fw);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05304623 DP_PRINT_STATS("PPDU stats counter");
4624 for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) {
4625 DP_PRINT_STATS(" Tag[%d] = %llu", index,
4626 pdev->stats.ppdu_stats_counter[index]);
4627 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05304628}
4629
4630/**
4631 * dp_print_pdev_rx_stats(): Print Pdev level RX stats
4632 * @pdev: DP_PDEV Handle
4633 *
4634 * Return: void
4635 */
4636static inline void
4637dp_print_pdev_rx_stats(struct dp_pdev *pdev)
4638{
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304639 DP_PRINT_STATS("PDEV Rx Stats:\n");
4640 DP_PRINT_STATS("Received From HW (Per Rx Ring):");
4641 DP_PRINT_STATS(" Packets = %d %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304642 pdev->stats.rx.rcvd_reo[0].num,
4643 pdev->stats.rx.rcvd_reo[1].num,
4644 pdev->stats.rx.rcvd_reo[2].num,
4645 pdev->stats.rx.rcvd_reo[3].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304646 DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05304647 pdev->stats.rx.rcvd_reo[0].bytes,
4648 pdev->stats.rx.rcvd_reo[1].bytes,
4649 pdev->stats.rx.rcvd_reo[2].bytes,
4650 pdev->stats.rx.rcvd_reo[3].bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304651 DP_PRINT_STATS("Replenished:");
4652 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304653 pdev->stats.replenish.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304654 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05304655 pdev->stats.replenish.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304656 DP_PRINT_STATS(" Buffers Added To Freelist = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304657 pdev->stats.buf_freelist);
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -07004658 DP_PRINT_STATS(" Low threshold intr = %d",
4659 pdev->stats.replenish.low_thresh_intrs);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304660 DP_PRINT_STATS("Dropped:");
4661 DP_PRINT_STATS(" msdu_not_done = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304662 pdev->stats.dropped.msdu_not_done);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304663 DP_PRINT_STATS("Sent To Stack:");
4664 DP_PRINT_STATS(" Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304665 pdev->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304666 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304667 pdev->stats.rx.to_stack.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304668 DP_PRINT_STATS("Multicast/Broadcast:");
4669 DP_PRINT_STATS(" Packets = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304670 pdev->stats.rx.multicast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304671 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05304672 pdev->stats.rx.multicast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304673 DP_PRINT_STATS("Errors:");
4674 DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304675 pdev->stats.replenish.rxdma_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304676 DP_PRINT_STATS(" Desc Alloc Failed: = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304677 pdev->stats.err.desc_alloc_fail);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05304678
4679 /* Get bar_recv_cnt */
4680 dp_aggregate_pdev_ctrl_frames_stats(pdev);
4681 DP_PRINT_STATS("BAR Received Count: = %d",
4682 pdev->stats.rx.bar_recv_cnt);
4683
Ishank Jain1e7401c2017-02-17 15:38:39 +05304684}
4685
4686/**
4687 * dp_print_soc_tx_stats(): Print SOC level stats
4688 * @soc DP_SOC Handle
4689 *
4690 * Return: void
4691 */
4692static inline void
4693dp_print_soc_tx_stats(struct dp_soc *soc)
4694{
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304695 DP_PRINT_STATS("SOC Tx Stats:\n");
4696 DP_PRINT_STATS("Tx Descriptors In Use = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304697 soc->stats.tx.desc_in_use);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304698 DP_PRINT_STATS("Invalid peer:");
4699 DP_PRINT_STATS(" Packets = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05304700 soc->stats.tx.tx_invalid_peer.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05304701 DP_PRINT_STATS(" Bytes = %llu",
Ishank Jaine73c4032017-03-16 11:48:15 +05304702 soc->stats.tx.tx_invalid_peer.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304703 DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05304704 soc->stats.tx.tcl_ring_full[0],
4705 soc->stats.tx.tcl_ring_full[1],
4706 soc->stats.tx.tcl_ring_full[2]);
4707
Ishank Jain1e7401c2017-02-17 15:38:39 +05304708}
4709
4710
4711/**
4712 * dp_print_soc_rx_stats: Print SOC level Rx stats
4713 * @soc: DP_SOC Handle
4714 *
4715 * Return:void
4716 */
4717static inline void
4718dp_print_soc_rx_stats(struct dp_soc *soc)
4719{
4720 uint32_t i;
4721 char reo_error[DP_REO_ERR_LENGTH];
4722 char rxdma_error[DP_RXDMA_ERR_LENGTH];
4723 uint8_t index = 0;
4724
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304725 DP_PRINT_STATS("SOC Rx Stats:\n");
4726 DP_PRINT_STATS("Errors:\n");
4727 DP_PRINT_STATS("Rx Decrypt Errors = %d",
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05304728 (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] +
4729 soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC]));
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304730 DP_PRINT_STATS("Invalid RBM = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304731 soc->stats.rx.err.invalid_rbm);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304732 DP_PRINT_STATS("Invalid Vdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304733 soc->stats.rx.err.invalid_vdev);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304734 DP_PRINT_STATS("Invalid Pdev = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304735 soc->stats.rx.err.invalid_pdev);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304736 DP_PRINT_STATS("Invalid Peer = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05304737 soc->stats.rx.err.rx_invalid_peer.num);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304738 DP_PRINT_STATS("HAL Ring Access Fail = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304739 soc->stats.rx.err.hal_ring_access_fail);
Pamidipati, Vijayc2cb4272017-05-23 10:09:26 +05304740
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304741 for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05304742 index += qdf_snprint(&rxdma_error[index],
4743 DP_RXDMA_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05304744 " %d", soc->stats.rx.err.rxdma_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304745 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304746 DP_PRINT_STATS("RXDMA Error (0-31):%s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304747 rxdma_error);
4748
4749 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304750 for (i = 0; i < HAL_REO_ERR_MAX; i++) {
Ishank Jain1e7401c2017-02-17 15:38:39 +05304751 index += qdf_snprint(&reo_error[index],
4752 DP_REO_ERR_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05304753 " %d", soc->stats.rx.err.reo_error[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304754 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304755 DP_PRINT_STATS("REO Error(0-14):%s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304756 reo_error);
4757}
4758
sumedh baikady72b1c712017-08-24 12:11:46 -07004759
4760/**
4761 * dp_print_ring_stat_from_hal(): Print hal level ring stats
4762 * @soc: DP_SOC handle
4763 * @srng: DP_SRNG handle
4764 * @ring_name: SRNG name
4765 *
4766 * Return: void
4767 */
4768static inline void
4769dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng,
4770 char *ring_name)
4771{
4772 uint32_t tailp;
4773 uint32_t headp;
4774
4775 if (srng->hal_srng != NULL) {
4776 hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp);
4777 DP_PRINT_STATS("%s : Head pointer = %d Tail Pointer = %d\n",
4778 ring_name, headp, tailp);
4779 }
4780}
4781
4782/**
4783 * dp_print_ring_stats(): Print tail and head pointer
4784 * @pdev: DP_PDEV handle
4785 *
4786 * Return:void
4787 */
4788static inline void
4789dp_print_ring_stats(struct dp_pdev *pdev)
4790{
4791 uint32_t i;
4792 char ring_name[STR_MAXLEN + 1];
4793
4794 dp_print_ring_stat_from_hal(pdev->soc,
4795 &pdev->soc->reo_exception_ring,
4796 "Reo Exception Ring");
4797 dp_print_ring_stat_from_hal(pdev->soc,
4798 &pdev->soc->reo_reinject_ring,
4799 "Reo Inject Ring");
4800 dp_print_ring_stat_from_hal(pdev->soc,
4801 &pdev->soc->reo_cmd_ring,
4802 "Reo Command Ring");
4803 dp_print_ring_stat_from_hal(pdev->soc,
4804 &pdev->soc->reo_status_ring,
4805 "Reo Status Ring");
4806 dp_print_ring_stat_from_hal(pdev->soc,
4807 &pdev->soc->rx_rel_ring,
4808 "Rx Release ring");
4809 dp_print_ring_stat_from_hal(pdev->soc,
4810 &pdev->soc->tcl_cmd_ring,
4811 "Tcl command Ring");
4812 dp_print_ring_stat_from_hal(pdev->soc,
4813 &pdev->soc->tcl_status_ring,
4814 "Tcl Status Ring");
4815 dp_print_ring_stat_from_hal(pdev->soc,
4816 &pdev->soc->wbm_desc_rel_ring,
4817 "Wbm Desc Rel Ring");
4818 for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
4819 snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i);
4820 dp_print_ring_stat_from_hal(pdev->soc,
4821 &pdev->soc->reo_dest_ring[i],
4822 ring_name);
4823 }
4824 for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
4825 snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i);
4826 dp_print_ring_stat_from_hal(pdev->soc,
4827 &pdev->soc->tcl_data_ring[i],
4828 ring_name);
4829 }
4830 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
4831 snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i);
4832 dp_print_ring_stat_from_hal(pdev->soc,
4833 &pdev->soc->tx_comp_ring[i],
4834 ring_name);
4835 }
4836 dp_print_ring_stat_from_hal(pdev->soc,
4837 &pdev->rx_refill_buf_ring,
4838 "Rx Refill Buf Ring");
4839
sumedh baikady72b1c712017-08-24 12:11:46 -07004840 dp_print_ring_stat_from_hal(pdev->soc,
Yun Park601d0d82017-08-28 21:49:31 -07004841 &pdev->rx_refill_buf_ring2,
4842 "Second Rx Refill Buf Ring");
sumedh baikady72b1c712017-08-24 12:11:46 -07004843
4844 dp_print_ring_stat_from_hal(pdev->soc,
4845 &pdev->rxdma_mon_buf_ring,
4846 "Rxdma Mon Buf Ring");
4847 dp_print_ring_stat_from_hal(pdev->soc,
4848 &pdev->rxdma_mon_dst_ring,
4849 "Rxdma Mon Dst Ring");
4850 dp_print_ring_stat_from_hal(pdev->soc,
4851 &pdev->rxdma_mon_status_ring,
4852 "Rxdma Mon Status Ring");
4853 dp_print_ring_stat_from_hal(pdev->soc,
4854 &pdev->rxdma_mon_desc_ring,
4855 "Rxdma mon desc Ring");
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08004856
4857 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
4858 snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i);
4859 dp_print_ring_stat_from_hal(pdev->soc,
4860 &pdev->rxdma_err_dst_ring[i],
4861 ring_name);
4862 }
4863
sumedh baikady72b1c712017-08-24 12:11:46 -07004864 for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
4865 snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i);
4866 dp_print_ring_stat_from_hal(pdev->soc,
4867 &pdev->rx_mac_buf_ring[i],
4868 ring_name);
4869 }
4870}
4871
Ishank Jain1e7401c2017-02-17 15:38:39 +05304872/**
4873 * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
4874 * @vdev: DP_VDEV handle
4875 *
4876 * Return:void
4877 */
4878static inline void
4879dp_txrx_host_stats_clr(struct dp_vdev *vdev)
4880{
4881 struct dp_peer *peer = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +05304882 struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
4883
Ishank Jain1e7401c2017-02-17 15:38:39 +05304884 DP_STATS_CLR(vdev->pdev);
4885 DP_STATS_CLR(vdev->pdev->soc);
4886 DP_STATS_CLR(vdev);
4887 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4888 if (!peer)
4889 return;
4890 DP_STATS_CLR(peer);
Anish Nataraj28490c42018-01-19 19:34:54 +05304891
4892 if (soc->cdp_soc.ol_ops->update_dp_stats) {
4893 soc->cdp_soc.ol_ops->update_dp_stats(
4894 vdev->pdev->osif_pdev,
4895 &peer->stats,
4896 peer->peer_ids[0],
4897 UPDATE_PEER_STATS);
4898 }
4899
Ishank Jain1e7401c2017-02-17 15:38:39 +05304900 }
4901
Anish Nataraj28490c42018-01-19 19:34:54 +05304902 if (soc->cdp_soc.ol_ops->update_dp_stats)
4903 soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
4904 &vdev->stats, (uint16_t)vdev->vdev_id,
4905 UPDATE_VDEV_STATS);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304906}
4907
4908/**
4909 * dp_print_rx_rates(): Print Rx rate stats
4910 * @vdev: DP_VDEV handle
4911 *
4912 * Return:void
4913 */
4914static inline void
4915dp_print_rx_rates(struct dp_vdev *vdev)
4916{
4917 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304918 uint8_t i, mcs, pkt_type;
Ishank Jain1e7401c2017-02-17 15:38:39 +05304919 uint8_t index = 0;
Ishank Jain1e7401c2017-02-17 15:38:39 +05304920 char nss[DP_NSS_LENGTH];
4921
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304922 DP_PRINT_STATS("Rx Rate Info:\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05304923
Ishank Jain57c42a12017-04-12 10:42:22 +05304924 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
4925 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304926 for (mcs = 0; mcs < MAX_MCS; mcs++) {
4927 if (!dp_rate_string[pkt_type][mcs].valid)
4928 continue;
4929
4930 DP_PRINT_STATS(" %s = %d",
4931 dp_rate_string[pkt_type][mcs].mcs_type,
Ishank Jain57c42a12017-04-12 10:42:22 +05304932 pdev->stats.rx.pkt_type[pkt_type].
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304933 mcs_count[mcs]);
Ishank Jain57c42a12017-04-12 10:42:22 +05304934 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304935
4936 DP_PRINT_STATS("\n");
Ishank Jain57c42a12017-04-12 10:42:22 +05304937 }
4938
Ishank Jain1e7401c2017-02-17 15:38:39 +05304939 index = 0;
4940 for (i = 0; i < SS_COUNT; i++) {
4941 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05304942 " %d", pdev->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05304943 }
Anish Nataraj072d8972018-01-09 18:23:33 +05304944 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304945 nss);
4946
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304947 DP_PRINT_STATS("SGI ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05304948 " 0.8us %d,"
4949 " 0.4us %d,"
4950 " 1.6us %d,"
4951 " 3.2us %d,",
4952 pdev->stats.rx.sgi_count[0],
4953 pdev->stats.rx.sgi_count[1],
4954 pdev->stats.rx.sgi_count[2],
4955 pdev->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304956 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304957 pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
4958 pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304959 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05304960 " SU: %d,"
4961 " MU_MIMO:%d,"
4962 " MU_OFDMA:%d,"
Ishank Jain57c42a12017-04-12 10:42:22 +05304963 " MU_OFDMA_MIMO:%d\n",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304964 pdev->stats.rx.reception_type[0],
4965 pdev->stats.rx.reception_type[1],
4966 pdev->stats.rx.reception_type[2],
4967 pdev->stats.rx.reception_type[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304968 DP_PRINT_STATS("Aggregation:\n");
4969 DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304970 pdev->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304971 DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304972 pdev->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304973 DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304974 pdev->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304975 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05304976 pdev->stats.rx.non_amsdu_cnt);
4977}
4978
4979/**
4980 * dp_print_tx_rates(): Print tx rates
4981 * @vdev: DP_VDEV handle
4982 *
4983 * Return:void
4984 */
4985static inline void
4986dp_print_tx_rates(struct dp_vdev *vdev)
4987{
4988 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304989 uint8_t mcs, pkt_type;
Ishank Jain1e7401c2017-02-17 15:38:39 +05304990 uint32_t index;
4991
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304992 DP_PRINT_STATS("Tx Rate Info:\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05304993
4994 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
4995 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05304996 for (mcs = 0; mcs < MAX_MCS; mcs++) {
4997 if (!dp_rate_string[pkt_type][mcs].valid)
4998 continue;
4999
5000 DP_PRINT_STATS(" %s = %d",
5001 dp_rate_string[pkt_type][mcs].mcs_type,
Ishank Jain1e7401c2017-02-17 15:38:39 +05305002 pdev->stats.tx.pkt_type[pkt_type].
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305003 mcs_count[mcs]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305004 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305005
5006 DP_PRINT_STATS("\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305007 }
5008
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305009 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05305010 " 0.8us %d"
5011 " 0.4us %d"
5012 " 1.6us %d"
5013 " 3.2us %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305014 pdev->stats.tx.sgi_count[0],
5015 pdev->stats.tx.sgi_count[1],
5016 pdev->stats.tx.sgi_count[2],
5017 pdev->stats.tx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305018
5019 DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305020 pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
5021 pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305022
5023 DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma);
5024 DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc);
5025 DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc);
5026 DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries);
5027 DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
5028
5029 DP_PRINT_STATS("Aggregation:\n");
5030 DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305031 pdev->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305032 DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305033 pdev->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305034}
5035
5036/**
5037 * dp_print_peer_stats():print peer stats
5038 * @peer: DP_PEER handle
5039 *
5040 * return void
5041 */
5042static inline void dp_print_peer_stats(struct dp_peer *peer)
5043{
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305044 uint8_t i, mcs, pkt_type;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305045 uint32_t index;
5046 char nss[DP_NSS_LENGTH];
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305047 DP_PRINT_STATS("Node Tx Stats:\n");
5048 DP_PRINT_STATS("Total Packet Completions = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305049 peer->stats.tx.comp_pkt.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305050 DP_PRINT_STATS("Total Bytes Completions = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305051 peer->stats.tx.comp_pkt.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305052 DP_PRINT_STATS("Success Packets = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305053 peer->stats.tx.tx_success.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305054 DP_PRINT_STATS("Success Bytes = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305055 peer->stats.tx.tx_success.bytes);
Pranita Solankefc2ff392017-12-15 19:25:13 +05305056 DP_PRINT_STATS("Unicast Success Packets = %d",
5057 peer->stats.tx.ucast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305058 DP_PRINT_STATS("Unicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05305059 peer->stats.tx.ucast.bytes);
5060 DP_PRINT_STATS("Multicast Success Packets = %d",
5061 peer->stats.tx.mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305062 DP_PRINT_STATS("Multicast Success Bytes = %llu",
Pranita Solankefc2ff392017-12-15 19:25:13 +05305063 peer->stats.tx.mcast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05305064 DP_PRINT_STATS("Broadcast Success Packets = %d",
5065 peer->stats.tx.bcast.num);
5066 DP_PRINT_STATS("Broadcast Success Bytes = %llu",
5067 peer->stats.tx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305068 DP_PRINT_STATS("Packets Failed = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305069 peer->stats.tx.tx_failed);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305070 DP_PRINT_STATS("Packets In OFDMA = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305071 peer->stats.tx.ofdma);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305072 DP_PRINT_STATS("Packets In STBC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305073 peer->stats.tx.stbc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305074 DP_PRINT_STATS("Packets In LDPC = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305075 peer->stats.tx.ldpc);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305076 DP_PRINT_STATS("Packet Retries = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305077 peer->stats.tx.retries);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305078 DP_PRINT_STATS("MSDU's Part of AMSDU = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305079 peer->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305080 DP_PRINT_STATS("Last Packet RSSI = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305081 peer->stats.tx.last_ack_rssi);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305082 DP_PRINT_STATS("Dropped At FW: Removed = %d",
5083 peer->stats.tx.dropped.fw_rem);
5084 DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d",
5085 peer->stats.tx.dropped.fw_rem_tx);
5086 DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d",
5087 peer->stats.tx.dropped.fw_rem_notx);
5088 DP_PRINT_STATS("Dropped : Age Out = %d",
5089 peer->stats.tx.dropped.age_out);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305090 DP_PRINT_STATS("NAWDS : ");
5091 DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d",
5092 peer->stats.tx.nawds_mcast_drop);
5093 DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d",
5094 peer->stats.tx.nawds_mcast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305095 DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305096 peer->stats.tx.nawds_mcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305097
5098 DP_PRINT_STATS("Rate Info:");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305099
5100 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5101 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305102 for (mcs = 0; mcs < MAX_MCS; mcs++) {
5103 if (!dp_rate_string[pkt_type][mcs].valid)
5104 continue;
5105
5106 DP_PRINT_STATS(" %s = %d",
5107 dp_rate_string[pkt_type][mcs].mcs_type,
Ishank Jain1e7401c2017-02-17 15:38:39 +05305108 peer->stats.tx.pkt_type[pkt_type].
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305109 mcs_count[mcs]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305110 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305111
5112 DP_PRINT_STATS("\n");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305113 }
5114
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305115 DP_PRINT_STATS("SGI = "
Ishank Jain57c42a12017-04-12 10:42:22 +05305116 " 0.8us %d"
5117 " 0.4us %d"
5118 " 1.6us %d"
5119 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305120 peer->stats.tx.sgi_count[0],
5121 peer->stats.tx.sgi_count[1],
5122 peer->stats.tx.sgi_count[2],
5123 peer->stats.tx.sgi_count[3]);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05305124 DP_PRINT_STATS("Excess Retries per AC ");
5125 DP_PRINT_STATS(" Best effort = %d",
5126 peer->stats.tx.excess_retries_per_ac[0]);
5127 DP_PRINT_STATS(" Background= %d",
5128 peer->stats.tx.excess_retries_per_ac[1]);
5129 DP_PRINT_STATS(" Video = %d",
5130 peer->stats.tx.excess_retries_per_ac[2]);
5131 DP_PRINT_STATS(" Voice = %d",
5132 peer->stats.tx.excess_retries_per_ac[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305133 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
Pranita Solanked7e10ba2017-12-13 15:40:38 +05305134 peer->stats.tx.bw[2], peer->stats.tx.bw[3],
5135 peer->stats.tx.bw[4], peer->stats.tx.bw[5]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305136
Pranita Solankeed0aba62018-01-12 19:14:31 +05305137 index = 0;
5138 for (i = 0; i < SS_COUNT; i++) {
5139 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
5140 " %d", peer->stats.tx.nss[i]);
5141 }
5142 DP_PRINT_STATS("NSS(1-8) = %s",
5143 nss);
5144
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305145 DP_PRINT_STATS("Aggregation:");
5146 DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305147 peer->stats.tx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305148 DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n",
Ishank Jaine73c4032017-03-16 11:48:15 +05305149 peer->stats.tx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305150
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305151 DP_PRINT_STATS("Node Rx Stats:");
5152 DP_PRINT_STATS("Packets Sent To Stack = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305153 peer->stats.rx.to_stack.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305154 DP_PRINT_STATS("Bytes Sent To Stack = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305155 peer->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05305156 for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
Pranita Solankefc2ff392017-12-15 19:25:13 +05305157 DP_PRINT_STATS("Ring Id = %d", i);
5158 DP_PRINT_STATS(" Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305159 peer->stats.rx.rcvd_reo[i].num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305160 DP_PRINT_STATS(" Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305161 peer->stats.rx.rcvd_reo[i].bytes);
5162 }
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305163 DP_PRINT_STATS("Multicast Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305164 peer->stats.rx.multicast.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305165 DP_PRINT_STATS("Multicast Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305166 peer->stats.rx.multicast.bytes);
Pranita Solankea5a3ae72018-01-18 21:45:27 +05305167 DP_PRINT_STATS("Broadcast Packets Received = %d",
5168 peer->stats.rx.bcast.num);
5169 DP_PRINT_STATS("Broadcast Bytes Received = %llu",
5170 peer->stats.rx.bcast.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305171 DP_PRINT_STATS("WDS Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305172 peer->stats.rx.wds.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305173 DP_PRINT_STATS("WDS Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305174 peer->stats.rx.wds.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305175 DP_PRINT_STATS("Intra BSS Packets Received = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305176 peer->stats.rx.intra_bss.pkts.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305177 DP_PRINT_STATS("Intra BSS Bytes Received = %llu",
Ishank Jain57c42a12017-04-12 10:42:22 +05305178 peer->stats.rx.intra_bss.pkts.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305179 DP_PRINT_STATS("Raw Packets Received = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305180 peer->stats.rx.raw.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305181 DP_PRINT_STATS("Raw Bytes Received = %llu",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305182 peer->stats.rx.raw.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305183 DP_PRINT_STATS("Errors: MIC Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305184 peer->stats.rx.err.mic_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305185 DP_PRINT_STATS("Erros: Decryption Errors = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305186 peer->stats.rx.err.decrypt_err);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305187 DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305188 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305189 DP_PRINT_STATS("Msdu's Recived As Ampdu = %d",
Ishank Jain57c42a12017-04-12 10:42:22 +05305190 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305191 DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305192 peer->stats.rx.non_amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305193 DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305194 peer->stats.rx.amsdu_cnt);
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305195 DP_PRINT_STATS("NAWDS : ");
5196 DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d",
5197 peer->stats.rx.nawds_mcast_drop.num);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305198 DP_PRINT_STATS(" Nawds multicast Drop Rx Packet Bytes = %llu",
Ruchi, Agrawalbd894b32017-11-03 17:24:56 +05305199 peer->stats.rx.nawds_mcast_drop.bytes);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305200 DP_PRINT_STATS("SGI ="
Ishank Jain57c42a12017-04-12 10:42:22 +05305201 " 0.8us %d"
5202 " 0.4us %d"
5203 " 1.6us %d"
5204 " 3.2us %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305205 peer->stats.rx.sgi_count[0],
5206 peer->stats.rx.sgi_count[1],
5207 peer->stats.rx.sgi_count[2],
5208 peer->stats.rx.sgi_count[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305209 DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305210 peer->stats.rx.bw[0], peer->stats.rx.bw[1],
5211 peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305212 DP_PRINT_STATS("Reception Type ="
Ishank Jain1e7401c2017-02-17 15:38:39 +05305213 " SU %d,"
5214 " MU_MIMO %d,"
5215 " MU_OFDMA %d,"
5216 " MU_OFDMA_MIMO %d",
5217 peer->stats.rx.reception_type[0],
5218 peer->stats.rx.reception_type[1],
5219 peer->stats.rx.reception_type[2],
5220 peer->stats.rx.reception_type[3]);
5221
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305222
Ishank Jain57c42a12017-04-12 10:42:22 +05305223 for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
5224 index = 0;
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305225 for (mcs = 0; mcs < MAX_MCS; mcs++) {
5226 if (!dp_rate_string[pkt_type][mcs].valid)
5227 continue;
Ishank Jain57c42a12017-04-12 10:42:22 +05305228
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305229 DP_PRINT_STATS(" %s = %d",
5230 dp_rate_string[pkt_type][mcs].mcs_type,
5231 peer->stats.rx.pkt_type[pkt_type].
5232 mcs_count[mcs]);
5233 }
5234
5235 DP_PRINT_STATS("\n");
5236 }
Ishank Jain1e7401c2017-02-17 15:38:39 +05305237
5238 index = 0;
5239 for (i = 0; i < SS_COUNT; i++) {
5240 index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
Ishank Jain57c42a12017-04-12 10:42:22 +05305241 " %d", peer->stats.rx.nss[i]);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305242 }
Anish Nataraj072d8972018-01-09 18:23:33 +05305243 DP_PRINT_STATS("NSS(1-8) = %s",
Ishank Jain1e7401c2017-02-17 15:38:39 +05305244 nss);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305245
5246 DP_PRINT_STATS("Aggregation:");
5247 DP_PRINT_STATS(" Msdu's Part of Ampdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305248 peer->stats.rx.ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305249 DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305250 peer->stats.rx.non_ampdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305251 DP_PRINT_STATS(" Msdu's Part of Amsdu = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305252 peer->stats.rx.amsdu_cnt);
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305253 DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d",
Ishank Jaine73c4032017-03-16 11:48:15 +05305254 peer->stats.rx.non_amsdu_cnt);
Ishank Jain1e7401c2017-02-17 15:38:39 +05305255}
5256
5257/**
5258 * dp_print_host_stats()- Function to print the stats aggregated at host
5259 * @vdev_handle: DP_VDEV handle
Ishank Jain1e7401c2017-02-17 15:38:39 +05305260 * @type: host stats type
5261 *
5262 * Available Stat types
Ishank Jain6290a3c2017-03-21 10:49:39 +05305263 * TXRX_CLEAR_STATS : Clear the stats
Ishank Jain1e7401c2017-02-17 15:38:39 +05305264 * TXRX_RX_RATE_STATS: Print Rx Rate Info
5265 * TXRX_TX_RATE_STATS: Print Tx Rate Info
5266 * TXRX_TX_HOST_STATS: Print Tx Stats
5267 * TXRX_RX_HOST_STATS: Print Rx Stats
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05305268 * TXRX_AST_STATS: Print AST Stats
sumedh baikady72b1c712017-08-24 12:11:46 -07005269 * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats
Ishank Jain1e7401c2017-02-17 15:38:39 +05305270 *
5271 * Return: 0 on success, print error message in case of failure
5272 */
5273static int
Ishank Jain6290a3c2017-03-21 10:49:39 +05305274dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type)
Ishank Jain1e7401c2017-02-17 15:38:39 +05305275{
5276 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5277 struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
5278
5279 dp_aggregate_pdev_stats(pdev);
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05305280
Ishank Jain1e7401c2017-02-17 15:38:39 +05305281 switch (type) {
Ishank Jain6290a3c2017-03-21 10:49:39 +05305282 case TXRX_CLEAR_STATS:
5283 dp_txrx_host_stats_clr(vdev);
5284 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305285 case TXRX_RX_RATE_STATS:
5286 dp_print_rx_rates(vdev);
5287 break;
5288 case TXRX_TX_RATE_STATS:
5289 dp_print_tx_rates(vdev);
5290 break;
5291 case TXRX_TX_HOST_STATS:
5292 dp_print_pdev_tx_stats(pdev);
5293 dp_print_soc_tx_stats(pdev->soc);
5294 break;
5295 case TXRX_RX_HOST_STATS:
5296 dp_print_pdev_rx_stats(pdev);
5297 dp_print_soc_rx_stats(pdev->soc);
5298 break;
Pamidipati, Vijay899e7752017-07-25 22:09:28 +05305299 case TXRX_AST_STATS:
5300 dp_print_ast_stats(pdev->soc);
5301 break;
sumedh baikady72b1c712017-08-24 12:11:46 -07005302 case TXRX_SRNG_PTR_STATS:
5303 dp_print_ring_stats(pdev);
5304 break;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305305 default:
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005306 DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
Ishank Jain1e7401c2017-02-17 15:38:39 +05305307 break;
5308 }
5309 return 0;
5310}
5311
5312/*
Ishank Jain6290a3c2017-03-21 10:49:39 +05305313 * dp_get_host_peer_stats()- function to print peer stats
Ishank Jain1e7401c2017-02-17 15:38:39 +05305314 * @pdev_handle: DP_PDEV handle
5315 * @mac_addr: mac address of the peer
5316 *
5317 * Return: void
5318 */
5319static void
Ishank Jain6290a3c2017-03-21 10:49:39 +05305320dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
Ishank Jain1e7401c2017-02-17 15:38:39 +05305321{
5322 struct dp_peer *peer;
5323 uint8_t local_id;
5324 peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
5325 &local_id);
5326
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07005327 if (!peer) {
5328 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5329 "%s: Invalid peer\n", __func__);
5330 return;
5331 }
5332
Ishank Jain6290a3c2017-03-21 10:49:39 +05305333 dp_print_peer_stats(peer);
Pratik Gandhi51b6b6d2017-09-18 15:02:43 +05305334 dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
Ishank Jain6290a3c2017-03-21 10:49:39 +05305335 return;
Ishank Jain1e7401c2017-02-17 15:38:39 +05305336}
Ishank Jain6290a3c2017-03-21 10:49:39 +05305337
5338/*
Soumya Bhat7422db82017-12-15 13:48:53 +05305339 * dp_ppdu_ring_reset()- Reset PPDU Stats ring
5340 * @pdev: DP_PDEV handle
5341 *
5342 * Return: void
5343 */
5344static void
5345dp_ppdu_ring_reset(struct dp_pdev *pdev)
5346{
5347 struct htt_rx_ring_tlv_filter htt_tlv_filter;
5348
5349 qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0);
5350
5351 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, pdev->pdev_id,
5352 pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
5353 RX_BUFFER_SIZE, &htt_tlv_filter);
5354
5355}
5356
5357/*
Anish Nataraj38a29562017-08-18 19:41:17 +05305358 * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
5359 * @pdev: DP_PDEV handle
5360 *
5361 * Return: void
5362 */
5363static void
5364dp_ppdu_ring_cfg(struct dp_pdev *pdev)
5365{
5366 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
5367
5368 htt_tlv_filter.mpdu_start = 0;
5369 htt_tlv_filter.msdu_start = 0;
5370 htt_tlv_filter.packet = 0;
5371 htt_tlv_filter.msdu_end = 0;
5372 htt_tlv_filter.mpdu_end = 0;
5373 htt_tlv_filter.packet_header = 1;
5374 htt_tlv_filter.attention = 1;
5375 htt_tlv_filter.ppdu_start = 1;
5376 htt_tlv_filter.ppdu_end = 1;
5377 htt_tlv_filter.ppdu_end_user_stats = 1;
5378 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
5379 htt_tlv_filter.ppdu_end_status_done = 1;
5380 htt_tlv_filter.enable_fp = 1;
5381 htt_tlv_filter.enable_md = 0;
5382 htt_tlv_filter.enable_mo = 0;
nobeljd124b742017-10-16 11:59:12 -07005383 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
5384 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
5385 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
5386 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
5387 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
5388 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Anish Nataraj38a29562017-08-18 19:41:17 +05305389
5390 htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, pdev->pdev_id,
5391 pdev->rxdma_mon_status_ring.hal_srng, RXDMA_MONITOR_STATUS,
5392 RX_BUFFER_SIZE, &htt_tlv_filter);
5393}
5394
5395/*
Soumya Bhat6fee59c2017-10-31 13:12:37 +05305396 * dp_config_debug_sniffer()- API to enable/disable debug sniffer
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305397 * @pdev_handle: DP_PDEV handle
5398 * @val: user provided value
5399 *
5400 * Return: void
5401 */
5402static void
Soumya Bhat6fee59c2017-10-31 13:12:37 +05305403dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305404{
5405 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5406
Soumya Bhat89647ef2017-11-16 17:23:48 +05305407 switch (val) {
5408 case 0:
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305409 pdev->tx_sniffer_enable = 0;
Soumya Bhat7422db82017-12-15 13:48:53 +05305410 pdev->mcopy_mode = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305411
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305412 if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en) {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07005413 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05305414 dp_ppdu_ring_reset(pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305415 } else if (pdev->enhanced_stats_en) {
5416 dp_h2t_cfg_stats_msg_send(pdev,
5417 DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05305418 }
Soumya Bhat89647ef2017-11-16 17:23:48 +05305419 break;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305420
Soumya Bhat89647ef2017-11-16 17:23:48 +05305421 case 1:
5422 pdev->tx_sniffer_enable = 1;
Soumya Bhat7422db82017-12-15 13:48:53 +05305423 pdev->mcopy_mode = 0;
5424
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305425 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05305426 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305427 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05305428 break;
5429 case 2:
Soumya Bhat7422db82017-12-15 13:48:53 +05305430 pdev->mcopy_mode = 1;
Soumya Bhat89647ef2017-11-16 17:23:48 +05305431 pdev->tx_sniffer_enable = 0;
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305432 if (!pdev->enhanced_stats_en)
Soumya Bhat7422db82017-12-15 13:48:53 +05305433 dp_ppdu_ring_cfg(pdev);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305434
5435 if (!pdev->pktlog_ppdu_stats)
Soumya Bhat7422db82017-12-15 13:48:53 +05305436 dp_h2t_cfg_stats_msg_send(pdev,
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305437 DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
Soumya Bhat89647ef2017-11-16 17:23:48 +05305438 break;
5439 default:
5440 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5441 "Invalid value\n");
5442 break;
5443 }
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305444}
5445
5446/*
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05305447 * dp_enable_enhanced_stats()- API to enable enhanced statistcs
5448 * @pdev_handle: DP_PDEV handle
5449 *
5450 * Return: void
5451 */
5452static void
5453dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
5454{
5455 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5456 pdev->enhanced_stats_en = 1;
Anish Nataraj38a29562017-08-18 19:41:17 +05305457
Soumya Bhat7422db82017-12-15 13:48:53 +05305458 if (!pdev->mcopy_mode)
5459 dp_ppdu_ring_cfg(pdev);
5460
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305461 if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
5462 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05305463}
5464
5465/*
5466 * dp_disable_enhanced_stats()- API to disable enhanced statistcs
5467 * @pdev_handle: DP_PDEV handle
5468 *
5469 * Return: void
5470 */
5471static void
5472dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
5473{
5474 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305475
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05305476 pdev->enhanced_stats_en = 0;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305477
Soumya Bhat0d6245c2018-02-08 21:02:57 +05305478 if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode)
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07005479 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
Soumya Bhat7422db82017-12-15 13:48:53 +05305480
5481 if (!pdev->mcopy_mode)
5482 dp_ppdu_ring_reset(pdev);
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05305483}
5484
5485/*
Ishank Jain6290a3c2017-03-21 10:49:39 +05305486 * dp_get_fw_peer_stats()- function to print peer stats
5487 * @pdev_handle: DP_PDEV handle
5488 * @mac_addr: mac address of the peer
5489 * @cap: Type of htt stats requested
5490 *
5491 * Currently Supporting only MAC ID based requests Only
5492 * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
5493 * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
5494 * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
5495 *
5496 * Return: void
5497 */
5498static void
5499dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
5500 uint32_t cap)
5501{
5502 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05305503 int i;
Ishank Jain6290a3c2017-03-21 10:49:39 +05305504 uint32_t config_param0 = 0;
5505 uint32_t config_param1 = 0;
5506 uint32_t config_param2 = 0;
5507 uint32_t config_param3 = 0;
5508
5509 HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
5510 config_param0 |= (1 << (cap + 1));
5511
Pamidipati, Vijayc2cf6692017-11-22 10:17:34 +05305512 for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
5513 config_param1 |= (1 << i);
5514 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05305515
5516 config_param2 |= (mac_addr[0] & 0x000000ff);
5517 config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
5518 config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
5519 config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
5520
5521 config_param3 |= (mac_addr[4] & 0x000000ff);
5522 config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
5523
5524 dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
5525 config_param0, config_param1, config_param2,
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05305526 config_param3, 0);
Karunakar Dasineni93f633c2017-06-02 19:04:46 -07005527
Ishank Jain6290a3c2017-03-21 10:49:39 +05305528}
5529
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05305530/* This struct definition will be removed from here
5531 * once it get added in FW headers*/
5532struct httstats_cmd_req {
5533 uint32_t config_param0;
5534 uint32_t config_param1;
5535 uint32_t config_param2;
5536 uint32_t config_param3;
5537 int cookie;
5538 u_int8_t stats_id;
5539};
5540
5541/*
5542 * dp_get_htt_stats: function to process the httstas request
5543 * @pdev_handle: DP pdev handle
5544 * @data: pointer to request data
5545 * @data_len: length for request data
5546 *
5547 * return: void
5548 */
5549static void
5550dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
5551{
5552 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5553 struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
5554
5555 QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
5556 dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
5557 req->config_param0, req->config_param1,
5558 req->config_param2, req->config_param3,
5559 req->cookie);
5560}
Ishank Jain9f174c62017-03-30 18:37:42 +05305561/*
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305562 * dp_set_pdev_param: function to set parameters in pdev
5563 * @pdev_handle: DP pdev handle
5564 * @param: parameter type to be set
5565 * @val: value of parameter to be set
5566 *
5567 * return: void
5568 */
5569static void dp_set_pdev_param(struct cdp_pdev *pdev_handle,
5570 enum cdp_pdev_param_type param, uint8_t val)
5571{
5572 switch (param) {
Soumya Bhat6fee59c2017-10-31 13:12:37 +05305573 case CDP_CONFIG_DEBUG_SNIFFER:
5574 dp_config_debug_sniffer(pdev_handle, val);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05305575 break;
5576 default:
5577 break;
5578 }
5579}
5580
5581/*
Ishank Jain9f174c62017-03-30 18:37:42 +05305582 * dp_set_vdev_param: function to set parameters in vdev
5583 * @param: parameter type to be set
5584 * @val: value of parameter to be set
5585 *
5586 * return: void
5587 */
5588static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
5589 enum cdp_vdev_param_type param, uint32_t val)
5590{
5591 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
Ishank Jain9f174c62017-03-30 18:37:42 +05305592 switch (param) {
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05305593 case CDP_ENABLE_WDS:
5594 vdev->wds_enabled = val;
5595 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05305596 case CDP_ENABLE_NAWDS:
5597 vdev->nawds_enabled = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05305598 break;
Ishank Jainc838b132017-02-17 11:08:18 +05305599 case CDP_ENABLE_MCAST_EN:
5600 vdev->mcast_enhancement_en = val;
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05305601 break;
5602 case CDP_ENABLE_PROXYSTA:
5603 vdev->proxysta_vdev = val;
5604 break;
Kabilan Kannan56bfd8f2017-04-26 13:26:47 -07005605 case CDP_UPDATE_TDLS_FLAGS:
5606 vdev->tdls_link_connected = val;
Ishank Jainb463d9a2017-05-08 14:59:47 +05305607 break;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05305608 case CDP_CFG_WDS_AGING_TIMER:
5609 if (val == 0)
5610 qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer);
5611 else if (val != vdev->wds_aging_timer_val)
5612 qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val);
5613
5614 vdev->wds_aging_timer_val = val;
5615 break;
URAJ SASAN81d95712017-08-21 20:51:03 +05305616 case CDP_ENABLE_AP_BRIDGE:
5617 if (wlan_op_mode_sta != vdev->opmode)
5618 vdev->ap_bridge_enabled = val;
5619 else
5620 vdev->ap_bridge_enabled = false;
5621 break;
ruchi agrawal45f3ac42017-10-25 09:03:28 +05305622 case CDP_ENABLE_CIPHER:
5623 vdev->sec_type = val;
5624 break;
Nandha Kishore Easwaran47e74162017-12-12 11:54:01 +05305625 case CDP_ENABLE_QWRAP_ISOLATION:
5626 vdev->isolation_vdev = val;
5627 break;
Ishank Jain9f174c62017-03-30 18:37:42 +05305628 default:
5629 break;
5630 }
Pamidipati, Vijayc9a13a52017-04-06 17:45:49 +05305631
5632 dp_tx_vdev_update_search_flags(vdev);
Ishank Jain9f174c62017-03-30 18:37:42 +05305633}
5634
5635/**
5636 * dp_peer_set_nawds: set nawds bit in peer
5637 * @peer_handle: pointer to peer
5638 * @value: enable/disable nawds
5639 *
5640 * return: void
5641 */
c_cgodavbd5b3c22017-06-07 12:31:40 +05305642static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
Ishank Jain9f174c62017-03-30 18:37:42 +05305643{
5644 struct dp_peer *peer = (struct dp_peer *)peer_handle;
5645 peer->nawds_enabled = value;
5646}
Ishank Jain1e7401c2017-02-17 15:38:39 +05305647
Ishank Jain949674c2017-02-27 17:09:29 +05305648/*
5649 * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
5650 * @vdev_handle: DP_VDEV handle
5651 * @map_id:ID of map that needs to be updated
5652 *
5653 * Return: void
5654 */
5655static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
5656 uint8_t map_id)
5657{
5658 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5659 vdev->dscp_tid_map_id = map_id;
5660 return;
5661}
5662
Prathyusha Guduri184b6402018-02-04 23:01:49 +05305663/*
5664 * dp_txrx_stats_publish(): publish pdev stats into a buffer
5665 * @pdev_handle: DP_PDEV handle
5666 * @buf: to hold pdev_stats
5667 *
5668 * Return: int
5669 */
5670static int
5671dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf)
5672{
5673 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5674 struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
5675
5676 dp_aggregate_pdev_stats(pdev);
5677
5678 qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
5679
5680 return TXRX_STATS_LEVEL;
5681}
5682
Ishank Jain949674c2017-02-27 17:09:29 +05305683/**
5684 * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
5685 * @pdev: DP_PDEV handle
5686 * @map_id: ID of map that needs to be updated
5687 * @tos: index value in map
5688 * @tid: tid value passed by the user
5689 *
5690 * Return: void
5691 */
5692static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
5693 uint8_t map_id, uint8_t tos, uint8_t tid)
5694{
5695 uint8_t dscp;
5696 struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
5697 dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
5698 pdev->dscp_tid_map[map_id][dscp] = tid;
Om Prakash Tripathi5425c522017-08-18 11:11:34 +05305699 if (map_id < HAL_MAX_HW_DSCP_TID_MAPS)
5700 hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
Ishank Jain949674c2017-02-27 17:09:29 +05305701 map_id, dscp);
5702 return;
5703}
5704
Ishank Jain6290a3c2017-03-21 10:49:39 +05305705/**
5706 * dp_fw_stats_process(): Process TxRX FW stats request
5707 * @vdev_handle: DP VDEV handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305708 * @req: stats request
Ishank Jain6290a3c2017-03-21 10:49:39 +05305709 *
5710 * return: int
5711 */
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305712static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
5713 struct cdp_txrx_stats_req *req)
Ishank Jain6290a3c2017-03-21 10:49:39 +05305714{
5715 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5716 struct dp_pdev *pdev = NULL;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305717 uint32_t stats = req->stats;
Ishank Jain6290a3c2017-03-21 10:49:39 +05305718
5719 if (!vdev) {
5720 DP_TRACE(NONE, "VDEV not found");
5721 return 1;
5722 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05305723 pdev = vdev->pdev;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305724
chenguocda25122018-01-24 17:39:38 +08005725 /*
5726 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
5727 * from param0 to param3 according to below rule:
5728 *
5729 * PARAM:
5730 * - config_param0 : start_offset (stats type)
5731 * - config_param1 : stats bmask from start offset
5732 * - config_param2 : stats bmask from start offset + 32
5733 * - config_param3 : stats bmask from start offset + 64
5734 */
5735 if (req->stats == CDP_TXRX_STATS_0) {
5736 req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
5737 req->param1 = 0xFFFFFFFF;
5738 req->param2 = 0xFFFFFFFF;
5739 req->param3 = 0xFFFFFFFF;
5740 }
5741
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305742 return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05305743 req->param1, req->param2, req->param3, 0);
Ishank Jain6290a3c2017-03-21 10:49:39 +05305744}
5745
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305746/**
5747 * dp_txrx_stats_request - function to map to firmware and host stats
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08005748 * @vdev: virtual handle
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305749 * @req: stats request
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08005750 *
5751 * Return: integer
5752 */
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305753static int dp_txrx_stats_request(struct cdp_vdev *vdev,
5754 struct cdp_txrx_stats_req *req)
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08005755{
5756 int host_stats;
5757 int fw_stats;
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305758 enum cdp_stats stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08005759
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305760 if (!vdev || !req) {
5761 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5762 "Invalid vdev/req instance");
5763 return 0;
5764 }
5765 stats = req->stats;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08005766 if (stats >= CDP_TXRX_MAX_STATS)
5767 return 0;
5768
Ishank Jain6290a3c2017-03-21 10:49:39 +05305769 /*
5770 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
5771 * has to be updated if new FW HTT stats added
5772 */
5773 if (stats > CDP_TXRX_STATS_HTT_MAX)
5774 stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08005775 fw_stats = dp_stats_mapping_table[stats][STATS_FW];
5776 host_stats = dp_stats_mapping_table[stats][STATS_HOST];
5777
5778 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5779 "stats: %u fw_stats_type: %d host_stats_type: %d",
5780 stats, fw_stats, host_stats);
5781
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305782 if (fw_stats != TXRX_FW_STATS_INVALID) {
5783 /* update request with FW stats type */
5784 req->stats = fw_stats;
5785 return dp_fw_stats_process(vdev, req);
5786 }
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08005787
Ishank Jain57c42a12017-04-12 10:42:22 +05305788 if ((host_stats != TXRX_HOST_STATS_INVALID) &&
5789 (host_stats <= TXRX_HOST_STATS_MAX))
Ishank Jain6290a3c2017-03-21 10:49:39 +05305790 return dp_print_host_stats(vdev, host_stats);
Ishank Jain57c42a12017-04-12 10:42:22 +05305791 else
5792 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5793 "Wrong Input for TxRx Stats");
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08005794
5795 return 0;
5796}
5797
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05305798/**
5799 * dp_txrx_stats() - function to map to firmware and host stats
5800 * @vdev: virtual handle
5801 * @stats: type of statistics requested
5802 *
5803 * Return: integer
5804 */
5805static int dp_txrx_stats(struct cdp_vdev *vdev, enum cdp_stats stats)
5806{
5807 struct cdp_txrx_stats_req req = {0,};
5808
5809 req.stats = stats;
5810
5811 return dp_txrx_stats_request(vdev, &req);
5812}
5813
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005814/*
psimha61b1a362017-07-27 15:45:49 -07005815 * dp_print_napi_stats(): NAPI stats
5816 * @soc - soc handle
5817 */
5818static void dp_print_napi_stats(struct dp_soc *soc)
5819{
5820 hif_print_napi_stats(soc->hif_handle);
5821}
5822
5823/*
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07005824 * dp_print_per_ring_stats(): Packet count per ring
5825 * @soc - soc handle
5826 */
5827static void dp_print_per_ring_stats(struct dp_soc *soc)
5828{
chenguo8107b662017-12-13 16:31:13 +08005829 uint8_t ring;
5830 uint16_t core;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07005831 uint64_t total_packets;
5832
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005833 DP_TRACE(FATAL, "Reo packets per ring:");
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07005834 for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
5835 total_packets = 0;
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005836 DP_TRACE(FATAL, "Packets on ring %u:", ring);
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07005837 for (core = 0; core < NR_CPUS; core++) {
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005838 DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07005839 core, soc->stats.rx.ring_packets[core][ring]);
5840 total_packets += soc->stats.rx.ring_packets[core][ring];
5841 }
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005842 DP_TRACE(FATAL, "Total packets on ring %u: %llu",
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07005843 ring, total_packets);
5844 }
5845}
5846
5847/*
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005848 * dp_txrx_path_stats() - Function to display dump stats
5849 * @soc - soc handle
5850 *
5851 * return: none
5852 */
5853static void dp_txrx_path_stats(struct dp_soc *soc)
5854{
5855 uint8_t error_code;
5856 uint8_t loop_pdev;
5857 struct dp_pdev *pdev;
Ishank Jain57c42a12017-04-12 10:42:22 +05305858 uint8_t i;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005859
5860 for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
5861
5862 pdev = soc->pdev_list[loop_pdev];
5863 dp_aggregate_pdev_stats(pdev);
5864 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5865 "Tx path Statistics:");
5866
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305867 DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005868 pdev->stats.tx_i.rcvd.num,
5869 pdev->stats.tx_i.rcvd.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305870 DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005871 pdev->stats.tx_i.processed.num,
5872 pdev->stats.tx_i.processed.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305873 DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005874 pdev->stats.tx.tx_success.num,
5875 pdev->stats.tx.tx_success.bytes);
5876
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005877 DP_TRACE(FATAL, "Dropped in host:");
5878 DP_TRACE(FATAL, "Total packets dropped: %u,",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005879 pdev->stats.tx_i.dropped.dropped_pkt.num);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005880 DP_TRACE(FATAL, "Descriptor not available: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005881 pdev->stats.tx_i.dropped.desc_na);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005882 DP_TRACE(FATAL, "Ring full: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005883 pdev->stats.tx_i.dropped.ring_full);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005884 DP_TRACE(FATAL, "Enqueue fail: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005885 pdev->stats.tx_i.dropped.enqueue_fail);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005886 DP_TRACE(FATAL, "DMA Error: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005887 pdev->stats.tx_i.dropped.dma_error);
5888
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005889 DP_TRACE(FATAL, "Dropped in hardware:");
5890 DP_TRACE(FATAL, "total packets dropped: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005891 pdev->stats.tx.tx_failed);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005892 DP_TRACE(FATAL, "mpdu age out: %u",
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305893 pdev->stats.tx.dropped.age_out);
5894 DP_TRACE(FATAL, "firmware removed: %u",
5895 pdev->stats.tx.dropped.fw_rem);
5896 DP_TRACE(FATAL, "firmware removed tx: %u",
5897 pdev->stats.tx.dropped.fw_rem_tx);
5898 DP_TRACE(FATAL, "firmware removed notx %u",
5899 pdev->stats.tx.dropped.fw_rem_notx);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005900 DP_TRACE(FATAL, "peer_invalid: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005901 pdev->soc->stats.tx.tx_invalid_peer.num);
5902
5903
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005904 DP_TRACE(FATAL, "Tx packets sent per interrupt:");
5905 DP_TRACE(FATAL, "Single Packet: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005906 pdev->stats.tx_comp_histogram.pkts_1);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005907 DP_TRACE(FATAL, "2-20 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005908 pdev->stats.tx_comp_histogram.pkts_2_20);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005909 DP_TRACE(FATAL, "21-40 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005910 pdev->stats.tx_comp_histogram.pkts_21_40);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005911 DP_TRACE(FATAL, "41-60 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005912 pdev->stats.tx_comp_histogram.pkts_41_60);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005913 DP_TRACE(FATAL, "61-80 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005914 pdev->stats.tx_comp_histogram.pkts_61_80);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005915 DP_TRACE(FATAL, "81-100 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005916 pdev->stats.tx_comp_histogram.pkts_81_100);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005917 DP_TRACE(FATAL, "101-200 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005918 pdev->stats.tx_comp_histogram.pkts_101_200);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005919 DP_TRACE(FATAL, " 201+ Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005920 pdev->stats.tx_comp_histogram.pkts_201_plus);
5921
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005922 DP_TRACE(FATAL, "Rx path statistics");
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005923
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305924 DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005925 pdev->stats.rx.to_stack.num,
5926 pdev->stats.rx.to_stack.bytes);
Ishank Jain57c42a12017-04-12 10:42:22 +05305927 for (i = 0; i < CDP_MAX_RX_RINGS; i++)
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305928 DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),",
Ishank Jain57c42a12017-04-12 10:42:22 +05305929 i, pdev->stats.rx.rcvd_reo[i].num,
5930 pdev->stats.rx.rcvd_reo[i].bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305931 DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),",
Ishank Jain57c42a12017-04-12 10:42:22 +05305932 pdev->stats.rx.intra_bss.pkts.num,
5933 pdev->stats.rx.intra_bss.pkts.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305934 DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),",
Yun Park92af7132017-09-13 16:33:35 -07005935 pdev->stats.rx.intra_bss.fail.num,
5936 pdev->stats.rx.intra_bss.fail.bytes);
Pamidipati, Vijay8e798652018-01-03 15:32:12 +05305937 DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005938 pdev->stats.rx.raw.num,
5939 pdev->stats.rx.raw.bytes);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005940 DP_TRACE(FATAL, "dropped: error %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005941 pdev->stats.rx.err.mic_err);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005942 DP_TRACE(FATAL, "peer invalid %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005943 pdev->soc->stats.rx.err.rx_invalid_peer.num);
5944
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005945 DP_TRACE(FATAL, "Reo Statistics");
5946 DP_TRACE(FATAL, "rbm error: %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005947 pdev->soc->stats.rx.err.invalid_rbm);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005948 DP_TRACE(FATAL, "hal ring access fail: %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005949 pdev->soc->stats.rx.err.hal_ring_access_fail);
5950
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005951 DP_TRACE(FATAL, "Reo errors");
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005952
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305953 for (error_code = 0; error_code < HAL_REO_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005954 error_code++) {
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005955 DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005956 error_code,
5957 pdev->soc->stats.rx.err.reo_error[error_code]);
5958 }
5959
Pamidipati, Vijay623fbee2017-07-07 10:58:15 +05305960 for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005961 error_code++) {
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005962 DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005963 error_code,
5964 pdev->soc->stats.rx.err
5965 .rxdma_error[error_code]);
5966 }
5967
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005968 DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
5969 DP_TRACE(FATAL, "Single Packet: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005970 pdev->stats.rx_ind_histogram.pkts_1);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005971 DP_TRACE(FATAL, "2-20 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005972 pdev->stats.rx_ind_histogram.pkts_2_20);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005973 DP_TRACE(FATAL, "21-40 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005974 pdev->stats.rx_ind_histogram.pkts_21_40);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005975 DP_TRACE(FATAL, "41-60 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005976 pdev->stats.rx_ind_histogram.pkts_41_60);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005977 DP_TRACE(FATAL, "61-80 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005978 pdev->stats.rx_ind_histogram.pkts_61_80);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005979 DP_TRACE(FATAL, "81-100 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005980 pdev->stats.rx_ind_histogram.pkts_81_100);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005981 DP_TRACE(FATAL, "101-200 Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005982 pdev->stats.rx_ind_histogram.pkts_101_200);
Adil Saeed Musthafa03e2ac22017-05-19 16:55:23 -07005983 DP_TRACE(FATAL, " 201+ Packets: %u",
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005984 pdev->stats.rx_ind_histogram.pkts_201_plus);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07005985
5986 DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u",
5987 __func__,
5988 pdev->soc->wlan_cfg_ctx->tso_enabled,
5989 pdev->soc->wlan_cfg_ctx->lro_enabled,
5990 pdev->soc->wlan_cfg_ctx->rx_hash,
5991 pdev->soc->wlan_cfg_ctx->napi_enabled);
5992#ifdef QCA_LL_TX_FLOW_CONTROL_V2
5993 DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u",
5994 __func__,
5995 pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold,
5996 pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset);
5997#endif
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08005998 }
5999}
6000
6001/*
6002 * dp_txrx_dump_stats() - Dump statistics
6003 * @value - Statistics option
6004 */
Mohit Khanna90d7ebd2017-09-12 21:54:21 -07006005static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
6006 enum qdf_stats_verbosity_level level)
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006007{
6008 struct dp_soc *soc =
6009 (struct dp_soc *)psoc;
6010 QDF_STATUS status = QDF_STATUS_SUCCESS;
6011
6012 if (!soc) {
6013 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6014 "%s: soc is NULL", __func__);
6015 return QDF_STATUS_E_INVAL;
6016 }
6017
6018 switch (value) {
6019 case CDP_TXRX_PATH_STATS:
6020 dp_txrx_path_stats(soc);
6021 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006022
6023 case CDP_RX_RING_STATS:
6024 dp_print_per_ring_stats(soc);
6025 break;
6026
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006027 case CDP_TXRX_TSO_STATS:
6028 /* TODO: NOT IMPLEMENTED */
6029 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006030
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006031 case CDP_DUMP_TX_FLOW_POOL_INFO:
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07006032 cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006033 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006034
psimha61b1a362017-07-27 15:45:49 -07006035 case CDP_DP_NAPI_STATS:
6036 dp_print_napi_stats(soc);
6037 break;
6038
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006039 case CDP_TXRX_DESC_STATS:
6040 /* TODO: NOT IMPLEMENTED */
6041 break;
Venkata Sharath Chandra Manchala918aefe2017-04-10 10:21:56 -07006042
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006043 default:
6044 status = QDF_STATUS_E_INVAL;
6045 break;
6046 }
6047
6048 return status;
6049
6050}
6051
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07006052#ifdef QCA_LL_TX_FLOW_CONTROL_V2
6053/**
6054 * dp_update_flow_control_parameters() - API to store datapath
6055 * config parameters
6056 * @soc: soc handle
6057 * @cfg: ini parameter handle
6058 *
6059 * Return: void
6060 */
6061static inline
6062void dp_update_flow_control_parameters(struct dp_soc *soc,
6063 struct cdp_config_params *params)
6064{
6065 soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
6066 params->tx_flow_stop_queue_threshold;
6067 soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
6068 params->tx_flow_start_queue_offset;
6069}
6070#else
6071static inline
6072void dp_update_flow_control_parameters(struct dp_soc *soc,
6073 struct cdp_config_params *params)
6074{
6075}
6076#endif
6077
6078/**
6079 * dp_update_config_parameters() - API to store datapath
6080 * config parameters
6081 * @soc: soc handle
6082 * @cfg: ini parameter handle
6083 *
6084 * Return: status
6085 */
6086static
6087QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
6088 struct cdp_config_params *params)
6089{
6090 struct dp_soc *soc = (struct dp_soc *)psoc;
6091
6092 if (!(soc)) {
6093 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6094 "%s: Invalid handle", __func__);
6095 return QDF_STATUS_E_INVAL;
6096 }
6097
6098 soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
6099 soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
6100 soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
6101 soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
6102 params->tcp_udp_checksumoffload;
6103 soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
6104
6105 dp_update_flow_control_parameters(soc, params);
6106
6107 return QDF_STATUS_SUCCESS;
6108}
6109
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05306110/**
6111 * dp_txrx_set_wds_rx_policy() - API to store datapath
6112 * config parameters
6113 * @vdev_handle - datapath vdev handle
6114 * @cfg: ini parameter handle
6115 *
6116 * Return: status
6117 */
6118#ifdef WDS_VENDOR_EXTENSION
6119void
6120dp_txrx_set_wds_rx_policy(
6121 struct cdp_vdev *vdev_handle,
6122 u_int32_t val)
6123{
6124 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6125 struct dp_peer *peer;
6126 if (vdev->opmode == wlan_op_mode_ap) {
6127 /* for ap, set it on bss_peer */
6128 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
6129 if (peer->bss_peer) {
6130 peer->wds_ecm.wds_rx_filter = 1;
6131 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6132 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6133 break;
6134 }
6135 }
6136 } else if (vdev->opmode == wlan_op_mode_sta) {
6137 peer = TAILQ_FIRST(&vdev->peer_list);
6138 peer->wds_ecm.wds_rx_filter = 1;
6139 peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0;
6140 peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0;
6141 }
6142}
6143
6144/**
6145 * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
6146 *
6147 * @peer_handle - datapath peer handle
6148 * @wds_tx_ucast: policy for unicast transmission
6149 * @wds_tx_mcast: policy for multicast transmission
6150 *
6151 * Return: void
6152 */
6153void
6154dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
6155 int wds_tx_ucast, int wds_tx_mcast)
6156{
6157 struct dp_peer *peer = (struct dp_peer *)peer_handle;
6158 if (wds_tx_ucast || wds_tx_mcast) {
6159 peer->wds_enabled = 1;
6160 peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
6161 peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
6162 } else {
6163 peer->wds_enabled = 0;
6164 peer->wds_ecm.wds_tx_ucast_4addr = 0;
6165 peer->wds_ecm.wds_tx_mcast_4addr = 0;
6166 }
6167
6168 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6169 FL("Policy Update set to :\
6170 peer->wds_enabled %d\
6171 peer->wds_ecm.wds_tx_ucast_4addr %d\
6172 peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
6173 peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
6174 peer->wds_ecm.wds_tx_mcast_4addr);
6175 return;
6176}
6177#endif
6178
Karunakar Dasinenica792542017-01-16 10:08:58 -08006179static struct cdp_wds_ops dp_ops_wds = {
6180 .vdev_set_wds = dp_vdev_set_wds,
Tallapragada Kalyan2a5fc622017-12-08 21:07:43 +05306181#ifdef WDS_VENDOR_EXTENSION
6182 .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
6183 .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
6184#endif
Karunakar Dasinenica792542017-01-16 10:08:58 -08006185};
6186
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306187/*
6188 * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
6189 * @soc - datapath soc handle
6190 * @peer - datapath peer handle
6191 *
6192 * Delete the AST entries belonging to a peer
6193 */
6194#ifdef FEATURE_WDS
6195static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6196 struct dp_peer *peer)
6197{
Tallapragada Kalyan1c14d5d2017-07-26 00:31:35 +05306198 struct dp_ast_entry *ast_entry, *temp_ast_entry;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306199
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05306200 qdf_spin_lock_bh(&soc->ast_lock);
6201 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306202 dp_peer_del_ast(soc, ast_entry);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05306203
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306204 qdf_spin_unlock_bh(&soc->ast_lock);
6205}
6206#else
6207static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
6208 struct dp_peer *peer)
6209{
6210}
6211#endif
6212
Kabilan Kannan60e3b302017-09-07 20:06:17 -07006213/*
6214 * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
6215 * @vdev_handle - datapath vdev handle
6216 * @callback - callback function
6217 * @ctxt: callback context
6218 *
6219 */
6220static void
6221dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
6222 ol_txrx_data_tx_cb callback, void *ctxt)
6223{
6224 struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6225
6226 vdev->tx_non_std_data_callback.func = callback;
6227 vdev->tx_non_std_data_callback.ctxt = ctxt;
6228}
6229
Santosh Anbu2280e862018-01-03 22:25:53 +05306230/**
6231 * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
6232 * @pdev_hdl: datapath pdev handle
6233 *
6234 * Return: opaque pointer to dp txrx handle
6235 */
6236static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
6237{
6238 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6239
6240 return pdev->dp_txrx_handle;
6241}
6242
6243/**
6244 * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
6245 * @pdev_hdl: datapath pdev handle
6246 * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
6247 *
6248 * Return: void
6249 */
6250static void
6251dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
6252{
6253 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
6254
6255 pdev->dp_txrx_handle = dp_txrx_hdl;
6256}
6257
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05306258/**
6259 * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
6260 * @soc_handle: datapath soc handle
6261 *
6262 * Return: opaque pointer to external dp (non-core DP)
6263 */
6264static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
6265{
6266 struct dp_soc *soc = (struct dp_soc *)soc_handle;
6267
6268 return soc->external_txrx_handle;
6269}
6270
6271/**
6272 * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
6273 * @soc_handle: datapath soc handle
6274 * @txrx_handle: opaque pointer to external dp (non-core DP)
6275 *
6276 * Return: void
6277 */
6278static void
6279dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
6280{
6281 struct dp_soc *soc = (struct dp_soc *)soc_handle;
6282
6283 soc->external_txrx_handle = txrx_handle;
6284}
6285
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306286#ifdef CONFIG_WIN
6287static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
6288{
6289 struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl;
6290 struct dp_peer *peer = (struct dp_peer *) peer_hdl;
6291 struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
6292
Karunakar Dasineni372647d2018-01-15 22:27:39 -08006293 peer->delete_in_progress = true;
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306294 dp_peer_delete_ast_entries(soc, peer);
6295}
6296#endif
6297
Leo Chang5ea93a42016-11-03 12:39:49 -07006298static struct cdp_cmn_ops dp_ops_cmn = {
6299 .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
6300 .txrx_vdev_attach = dp_vdev_attach_wifi3,
6301 .txrx_vdev_detach = dp_vdev_detach_wifi3,
6302 .txrx_pdev_attach = dp_pdev_attach_wifi3,
6303 .txrx_pdev_detach = dp_pdev_detach_wifi3,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08006304 .txrx_peer_create = dp_peer_create_wifi3,
6305 .txrx_peer_setup = dp_peer_setup_wifi3,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306306#ifdef CONFIG_WIN
6307 .txrx_peer_teardown = dp_peer_teardown_wifi3,
6308#else
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08006309 .txrx_peer_teardown = NULL,
Pamidipati, Vijayb8bbf162017-06-26 23:47:39 +05306310#endif
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +05306311 .txrx_peer_add_ast = dp_peer_add_ast_wifi3,
6312 .txrx_peer_del_ast = dp_peer_del_ast_wifi3,
6313 .txrx_peer_update_ast = dp_peer_update_ast_wifi3,
6314 .txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3,
6315 .txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3,
6316 .txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3,
6317 .txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3,
Dhanashri Atre6d90ef32016-11-10 16:27:38 -08006318 .txrx_peer_delete = dp_peer_delete_wifi3,
Leo Chang5ea93a42016-11-03 12:39:49 -07006319 .txrx_vdev_register = dp_vdev_register_wifi3,
6320 .txrx_soc_detach = dp_soc_detach_wifi3,
6321 .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
6322 .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
6323 .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
Karunakar Dasinenied1de122016-08-02 11:57:59 -07006324 .addba_requestprocess = dp_addba_requestprocess_wifi3,
6325 .addba_responsesetup = dp_addba_responsesetup_wifi3,
6326 .delba_process = dp_delba_process_wifi3,
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -08006327 .set_addba_response = dp_set_addba_response,
Ishank Jain1e7401c2017-02-17 15:38:39 +05306328 .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
Manikandan Mohane2fa8b72017-03-22 11:18:26 -07006329 .flush_cache_rx_queue = NULL,
Ishank Jain949674c2017-02-27 17:09:29 +05306330 /* TODO: get API's for dscp-tid need to be added*/
6331 .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
6332 .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
Venkata Sharath Chandra Manchalaa77da0d2017-02-27 22:44:37 -08006333 .txrx_stats = dp_txrx_stats,
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +05306334 .txrx_stats_request = dp_txrx_stats_request,
Kai Chen6eca1a62017-01-12 10:17:53 -08006335 .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006336 .display_stats = dp_txrx_dump_stats,
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05306337 .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
6338 .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
psimhac983d7e2017-07-26 15:20:07 -07006339#ifdef DP_INTR_POLL_BASED
6340 .txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
6341#else
Venkateswara Swamy Bandarua95b3242017-05-19 20:20:30 +05306342 .txrx_intr_attach = dp_soc_interrupt_attach,
psimhac983d7e2017-07-26 15:20:07 -07006343#endif
Venkateswara Swamy Bandarua95b3242017-05-19 20:20:30 +05306344 .txrx_intr_detach = dp_soc_interrupt_detach,
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +05306345 .set_pn_check = dp_set_pn_check_wifi3,
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -07006346 .update_config_parameters = dp_update_config_parameters,
Leo Chang5ea93a42016-11-03 12:39:49 -07006347 /* TODO: Add other functions */
Santosh Anbu2280e862018-01-03 22:25:53 +05306348 .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
6349 .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
6350 .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +05306351 .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
6352 .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
6353 .tx_send = dp_tx_send,
Leo Chang5ea93a42016-11-03 12:39:49 -07006354};
6355
6356static struct cdp_ctrl_ops dp_ops_ctrl = {
6357 .txrx_peer_authorize = dp_peer_authorize,
Stephan Raj Ignatious Durairajba291292018-01-04 13:36:34 +05306358#ifdef QCA_SUPPORT_SON
6359 .txrx_set_inact_params = dp_set_inact_params,
6360 .txrx_start_inact_timer = dp_start_inact_timer,
6361 .txrx_set_overload = dp_set_overload,
6362 .txrx_peer_is_inact = dp_peer_is_inact,
6363 .txrx_mark_peer_inact = dp_mark_peer_inact,
6364#endif
Kalyan Tallapragada277f45e2017-01-30 14:25:27 +05306365 .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
6366 .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306367#ifdef MESH_MODE_SUPPORT
6368 .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +05306369 .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
Venkateswara Swamy Bandaru3f623702017-02-25 00:12:59 +05306370#endif
Ishank Jain9f174c62017-03-30 18:37:42 +05306371 .txrx_set_vdev_param = dp_set_vdev_param,
6372 .txrx_peer_set_nawds = dp_peer_set_nawds,
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +05306373 .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
6374 .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
Pratik Gandhi8b8334b2017-03-09 17:41:40 +05306375 .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
6376 .txrx_update_filter_neighbour_peers =
6377 dp_update_filter_neighbour_peers,
Chaitanya Kiran Godavarthi6228e3b2017-06-15 14:28:19 +05306378 .txrx_get_sec_type = dp_get_sec_type,
Leo Chang5ea93a42016-11-03 12:39:49 -07006379 /* TODO: Add other functions */
Keyur Parekhfad6d082017-05-07 08:54:47 -07006380 .txrx_wdi_event_sub = dp_wdi_event_sub,
6381 .txrx_wdi_event_unsub = dp_wdi_event_unsub,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006382#ifdef WDI_EVENT_ENABLE
6383 .txrx_get_pldev = dp_get_pldev,
6384#endif
Soumya Bhatcfbb8952017-10-03 15:04:09 +05306385 .txrx_set_pdev_param = dp_set_pdev_param,
Leo Chang5ea93a42016-11-03 12:39:49 -07006386};
6387
6388static struct cdp_me_ops dp_ops_me = {
Ishank Jainc838b132017-02-17 11:08:18 +05306389#ifdef ATH_SUPPORT_IQUE
6390 .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
6391 .tx_me_free_descriptor = dp_tx_me_free_descriptor,
6392 .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
6393#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07006394};
6395
6396static struct cdp_mon_ops dp_ops_mon = {
Kai Chen6eca1a62017-01-12 10:17:53 -08006397 .txrx_monitor_set_filter_ucast_data = NULL,
6398 .txrx_monitor_set_filter_mcast_data = NULL,
6399 .txrx_monitor_set_filter_non_data = NULL,
nobeljc8eb4d62018-01-04 14:29:32 -08006400 .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
6401 .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
6402 .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
sumedh baikady84613b02017-09-19 16:36:14 -07006403 .txrx_reset_monitor_mode = dp_reset_monitor_mode,
nobeljd124b742017-10-16 11:59:12 -07006404 /* Added support for HK advance filter */
6405 .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
Leo Chang5ea93a42016-11-03 12:39:49 -07006406};
6407
6408static struct cdp_host_stats_ops dp_ops_host_stats = {
Ishank Jain6290a3c2017-03-21 10:49:39 +05306409 .txrx_per_peer_stats = dp_get_host_peer_stats,
6410 .get_fw_peer_stats = dp_get_fw_peer_stats,
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05306411 .get_htt_stats = dp_get_htt_stats,
Pamidipati, Vijaybe379452017-06-21 00:31:06 +05306412 .txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
6413 .txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
Prathyusha Guduri184b6402018-02-04 23:01:49 +05306414 .txrx_stats_publish = dp_txrx_stats_publish,
Leo Chang5ea93a42016-11-03 12:39:49 -07006415 /* TODO */
6416};
6417
Leo Chang5ea93a42016-11-03 12:39:49 -07006418static struct cdp_raw_ops dp_ops_raw = {
6419 /* TODO */
6420};
6421
6422#ifdef CONFIG_WIN
6423static struct cdp_pflow_ops dp_ops_pflow = {
6424 /* TODO */
6425};
6426#endif /* CONFIG_WIN */
6427
Yue Ma245b47b2017-02-21 16:35:31 -08006428#ifdef FEATURE_RUNTIME_PM
6429/**
6430 * dp_runtime_suspend() - ensure DP is ready to runtime suspend
6431 * @opaque_pdev: DP pdev context
6432 *
6433 * DP is ready to runtime suspend if there are no pending TX packets.
6434 *
6435 * Return: QDF_STATUS
6436 */
6437static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
6438{
6439 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6440 struct dp_soc *soc = pdev->soc;
6441
6442 /* Call DP TX flow control API to check if there is any
6443 pending packets */
6444
6445 if (soc->intr_mode == DP_INTR_POLL)
6446 qdf_timer_stop(&soc->int_timer);
6447
6448 return QDF_STATUS_SUCCESS;
6449}
6450
6451/**
6452 * dp_runtime_resume() - ensure DP is ready to runtime resume
6453 * @opaque_pdev: DP pdev context
6454 *
6455 * Resume DP for runtime PM.
6456 *
6457 * Return: QDF_STATUS
6458 */
6459static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
6460{
6461 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6462 struct dp_soc *soc = pdev->soc;
6463 void *hal_srng;
6464 int i;
6465
6466 if (soc->intr_mode == DP_INTR_POLL)
6467 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6468
6469 for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
6470 hal_srng = soc->tcl_data_ring[i].hal_srng;
6471 if (hal_srng) {
6472 /* We actually only need to acquire the lock */
6473 hal_srng_access_start(soc->hal_soc, hal_srng);
6474 /* Update SRC ring head pointer for HW to send
6475 all pending packets */
6476 hal_srng_access_end(soc->hal_soc, hal_srng);
6477 }
6478 }
6479
6480 return QDF_STATUS_SUCCESS;
6481}
6482#endif /* FEATURE_RUNTIME_PM */
6483
Dustin Brown4a3b96b2017-05-10 15:49:38 -07006484static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
6485{
6486 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6487 struct dp_soc *soc = pdev->soc;
6488
psimhac983d7e2017-07-26 15:20:07 -07006489 if (soc->intr_mode == DP_INTR_POLL)
6490 qdf_timer_stop(&soc->int_timer);
Dustin Brown4a3b96b2017-05-10 15:49:38 -07006491
6492 return QDF_STATUS_SUCCESS;
6493}
6494
6495static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
6496{
6497 struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
6498 struct dp_soc *soc = pdev->soc;
6499
psimhac983d7e2017-07-26 15:20:07 -07006500 if (soc->intr_mode == DP_INTR_POLL)
6501 qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
Dustin Brown4a3b96b2017-05-10 15:49:38 -07006502
6503 return QDF_STATUS_SUCCESS;
6504}
Dustin Brown4a3b96b2017-05-10 15:49:38 -07006505
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05306506#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07006507static struct cdp_misc_ops dp_ops_misc = {
Kabilan Kannan60e3b302017-09-07 20:06:17 -07006508 .tx_non_std = dp_tx_non_std,
Leo Chang5ea93a42016-11-03 12:39:49 -07006509 .get_opmode = dp_get_opmode,
Dustin Brown4a3b96b2017-05-10 15:49:38 -07006510#ifdef FEATURE_RUNTIME_PM
Yue Ma245b47b2017-02-21 16:35:31 -08006511 .runtime_suspend = dp_runtime_suspend,
6512 .runtime_resume = dp_runtime_resume,
6513#endif /* FEATURE_RUNTIME_PM */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006514 .pkt_log_init = dp_pkt_log_init,
6515 .pkt_log_con_service = dp_pkt_log_con_service,
Leo Chang5ea93a42016-11-03 12:39:49 -07006516};
6517
6518static struct cdp_flowctl_ops dp_ops_flowctl = {
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07006519 /* WIFI 3.0 DP implement as required. */
6520#ifdef QCA_LL_TX_FLOW_CONTROL_V2
6521 .register_pause_cb = dp_txrx_register_pause_cb,
6522 .dump_flow_pool_info = dp_tx_dump_flow_pool_info,
6523#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
Leo Chang5ea93a42016-11-03 12:39:49 -07006524};
6525
6526static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
6527 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
6528};
6529
Yun Parkfde6b9e2017-06-26 17:13:11 -07006530#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07006531static struct cdp_ipa_ops dp_ops_ipa = {
Yun Parkfde6b9e2017-06-26 17:13:11 -07006532 .ipa_get_resource = dp_ipa_get_resource,
6533 .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
6534 .ipa_op_response = dp_ipa_op_response,
6535 .ipa_register_op_cb = dp_ipa_register_op_cb,
6536 .ipa_get_stat = dp_ipa_get_stat,
6537 .ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
6538 .ipa_enable_autonomy = dp_ipa_enable_autonomy,
6539 .ipa_disable_autonomy = dp_ipa_disable_autonomy,
6540 .ipa_setup = dp_ipa_setup,
6541 .ipa_cleanup = dp_ipa_cleanup,
6542 .ipa_setup_iface = dp_ipa_setup_iface,
6543 .ipa_cleanup_iface = dp_ipa_cleanup_iface,
6544 .ipa_enable_pipes = dp_ipa_enable_pipes,
6545 .ipa_disable_pipes = dp_ipa_disable_pipes,
6546 .ipa_set_perf_level = dp_ipa_set_perf_level
Leo Chang5ea93a42016-11-03 12:39:49 -07006547};
Yun Parkfde6b9e2017-06-26 17:13:11 -07006548#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07006549
Leo Chang5ea93a42016-11-03 12:39:49 -07006550static struct cdp_bus_ops dp_ops_bus = {
Dustin Brown4a3b96b2017-05-10 15:49:38 -07006551 .bus_suspend = dp_bus_suspend,
6552 .bus_resume = dp_bus_resume
Leo Chang5ea93a42016-11-03 12:39:49 -07006553};
6554
6555static struct cdp_ocb_ops dp_ops_ocb = {
6556 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
6557};
6558
6559
6560static struct cdp_throttle_ops dp_ops_throttle = {
6561 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
6562};
6563
6564static struct cdp_mob_stats_ops dp_ops_mob_stats = {
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -08006565 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
Leo Chang5ea93a42016-11-03 12:39:49 -07006566};
6567
6568static struct cdp_cfg_ops dp_ops_cfg = {
6569 /* WIFI 3.0 DP NOT IMPLEMENTED YET */
6570};
6571
Mohit Khannaadfe9082017-11-17 13:11:17 -08006572/*
6573 * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer
6574 * @dev: physical device instance
6575 * @peer_mac_addr: peer mac address
6576 * @local_id: local id for the peer
6577 * @debug_id: to track enum peer access
6578
6579 * Return: peer instance pointer
6580 */
6581static inline void *
6582dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr,
6583 u8 *local_id,
6584 enum peer_debug_id_type debug_id)
6585{
6586 /*
6587 * Currently this function does not implement the "get ref"
6588 * functionality and is mapped to dp_find_peer_by_addr which does not
6589 * increment the peer ref count. So the peer state is uncertain after
6590 * calling this API. The functionality needs to be implemented.
6591 * Accordingly the corresponding release_ref function is NULL.
6592 */
6593 return dp_find_peer_by_addr(dev, peer_mac_addr, local_id);
6594}
6595
Leo Chang5ea93a42016-11-03 12:39:49 -07006596static struct cdp_peer_ops dp_ops_peer = {
6597 .register_peer = dp_register_peer,
6598 .clear_peer = dp_clear_peer,
6599 .find_peer_by_addr = dp_find_peer_by_addr,
6600 .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
Mohit Khannaadfe9082017-11-17 13:11:17 -08006601 .peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr,
6602 .peer_release_ref = NULL,
Leo Chang5ea93a42016-11-03 12:39:49 -07006603 .local_peer_id = dp_local_peer_id,
6604 .peer_find_by_local_id = dp_peer_find_by_local_id,
6605 .peer_state_update = dp_peer_state_update,
6606 .get_vdevid = dp_get_vdevid,
Yun Parkfde6b9e2017-06-26 17:13:11 -07006607 .get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
Leo Chang5ea93a42016-11-03 12:39:49 -07006608 .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
6609 .get_vdev_for_peer = dp_get_vdev_for_peer,
6610 .get_peer_state = dp_get_peer_state,
Krishna Kumaar Natarajand684ba22017-01-25 15:48:43 -08006611 .last_assoc_received = dp_get_last_assoc_received,
6612 .last_disassoc_received = dp_get_last_disassoc_received,
6613 .last_deauth_received = dp_get_last_deauth_received,
Leo Chang5ea93a42016-11-03 12:39:49 -07006614};
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05306615#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07006616
6617static struct cdp_ops dp_txrx_ops = {
6618 .cmn_drv_ops = &dp_ops_cmn,
6619 .ctrl_ops = &dp_ops_ctrl,
6620 .me_ops = &dp_ops_me,
6621 .mon_ops = &dp_ops_mon,
6622 .host_stats_ops = &dp_ops_host_stats,
6623 .wds_ops = &dp_ops_wds,
6624 .raw_ops = &dp_ops_raw,
6625#ifdef CONFIG_WIN
6626 .pflow_ops = &dp_ops_pflow,
6627#endif /* CONFIG_WIN */
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05306628#ifndef CONFIG_WIN
Leo Chang5ea93a42016-11-03 12:39:49 -07006629 .misc_ops = &dp_ops_misc,
6630 .cfg_ops = &dp_ops_cfg,
6631 .flowctl_ops = &dp_ops_flowctl,
6632 .l_flowctl_ops = &dp_ops_l_flowctl,
Yun Parkfde6b9e2017-06-26 17:13:11 -07006633#ifdef IPA_OFFLOAD
Leo Chang5ea93a42016-11-03 12:39:49 -07006634 .ipa_ops = &dp_ops_ipa,
Yun Parkfde6b9e2017-06-26 17:13:11 -07006635#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07006636 .bus_ops = &dp_ops_bus,
6637 .ocb_ops = &dp_ops_ocb,
6638 .peer_ops = &dp_ops_peer,
6639 .throttle_ops = &dp_ops_throttle,
6640 .mob_stats_ops = &dp_ops_mob_stats,
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +05306641#endif
Leo Chang5ea93a42016-11-03 12:39:49 -07006642};
6643
6644/*
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05306645 * dp_soc_set_txrx_ring_map()
6646 * @dp_soc: DP handler for soc
6647 *
6648 * Return: Void
6649 */
6650static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
6651{
6652 uint32_t i;
6653 for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
6654 soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i];
6655 }
6656}
6657
6658/*
Leo Chang5ea93a42016-11-03 12:39:49 -07006659 * dp_soc_attach_wifi3() - Attach txrx SOC
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05306660 * @ctrl_psoc: Opaque SOC handle from control plane
Leo Chang5ea93a42016-11-03 12:39:49 -07006661 * @htc_handle: Opaque HTC handle
6662 * @hif_handle: Opaque HIF handle
6663 * @qdf_osdev: QDF device
6664 *
6665 * Return: DP SOC handle on success, NULL on failure
6666 */
Jeff Johnson07718572017-01-10 13:57:15 -08006667/*
6668 * Local prototype added to temporarily address warning caused by
6669 * -Wmissing-prototypes. A more correct solution, namely to expose
6670 * a prototype in an appropriate header file, will come later.
6671 */
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05306672void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
Jeff Johnson07718572017-01-10 13:57:15 -08006673 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05306674 struct ol_if_ops *ol_ops);
6675void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07006676 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05306677 struct ol_if_ops *ol_ops)
Leo Chang5ea93a42016-11-03 12:39:49 -07006678{
6679 struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
6680
6681 if (!soc) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05306682 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6683 FL("DP SOC memory allocation failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07006684 goto fail0;
6685 }
6686
6687 soc->cdp_soc.ops = &dp_txrx_ops;
6688 soc->cdp_soc.ol_ops = ol_ops;
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05306689 soc->ctrl_psoc = ctrl_psoc;
Leo Chang5ea93a42016-11-03 12:39:49 -07006690 soc->osdev = qdf_osdev;
6691 soc->hif_handle = hif_handle;
6692
6693 soc->hal_soc = hif_get_hal_handle(hif_handle);
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05306694 soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
Leo Chang5ea93a42016-11-03 12:39:49 -07006695 soc->hal_soc, qdf_osdev);
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05306696 if (!soc->htt_handle) {
6697 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6698 FL("HTT attach failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07006699 goto fail1;
6700 }
6701
6702 soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
6703 if (!soc->wlan_cfg_ctx) {
Tallapragada Kalyan1ef54802016-11-30 12:54:55 +05306704 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6705 FL("wlan_cfg_soc_attach failed"));
Leo Chang5ea93a42016-11-03 12:39:49 -07006706 goto fail2;
6707 }
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05306708
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +05306709 wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, rx_hash);
Ruchi, Agrawal34721392017-11-13 18:02:09 +05306710 soc->cce_disable = false;
Tallapragada Kalyan4c183b82017-09-13 23:48:14 +05306711
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05306712 if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05306713 int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05306714 CDP_CFG_MAX_PEER_ID);
6715
6716 if (ret != -EINVAL) {
6717 wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
6718 }
Ruchi, Agrawal34721392017-11-13 18:02:09 +05306719
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05306720 ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
Ruchi, Agrawal34721392017-11-13 18:02:09 +05306721 CDP_CFG_CCE_DISABLE);
6722 if (ret)
6723 soc->cce_disable = true;
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +05306724 }
6725
Leo Chang5ea93a42016-11-03 12:39:49 -07006726 qdf_spinlock_create(&soc->peer_ref_mutex);
6727
Karunakar Dasineni8bebb002017-02-09 22:15:23 -08006728 qdf_spinlock_create(&soc->reo_desc_freelist_lock);
6729 qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
6730
Aniruddha Paul0b1c4d22017-07-13 19:38:08 +05306731 /* fill the tx/rx cpu ring map*/
6732 dp_soc_set_txrx_ring_map(soc);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05306733
6734 qdf_spinlock_create(&soc->htt_stats.lock);
6735 /* initialize work queue for stats processing */
6736 qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6737
Stephan Raj Ignatious Durairaje5dd51c2018-02-09 16:39:12 +05306738 /*Initialize inactivity timer for wifison */
6739 dp_init_inact_timer(soc);
6740
Leo Chang5ea93a42016-11-03 12:39:49 -07006741 return (void *)soc;
6742
6743fail2:
6744 htt_soc_detach(soc->htt_handle);
6745fail1:
6746 qdf_mem_free(soc);
6747fail0:
6748 return NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07006749}
Keyur Parekhfad6d082017-05-07 08:54:47 -07006750
Manjunathappa Prakashe23acaf2017-11-10 00:17:24 -08006751/*
6752 * dp_get_pdev_for_mac_id() - Return pdev for mac_id
6753 *
6754 * @soc: handle to DP soc
6755 * @mac_id: MAC id
6756 *
6757 * Return: Return pdev corresponding to MAC
6758 */
6759void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
6760{
6761 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
6762 return soc->pdev_list[mac_id];
6763
6764 /* Typically for MCL as there only 1 PDEV*/
6765 return soc->pdev_list[0];
6766}
6767
6768/*
6769 * dp_get_ring_id_for_mac_id() - Return pdev for mac_id
6770 *
6771 * @soc: handle to DP soc
6772 * @mac_id: MAC id
6773 *
6774 * Return: ring id
6775 */
6776int dp_get_ring_id_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
6777{
6778 /*
6779 * Single pdev using both MACs will operate on both MAC rings,
6780 * which is the case for MCL.
6781 */
6782 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
6783 return mac_id;
6784
6785 /* For WIN each PDEV will operate one ring, so index is zero. */
6786 return 0;
6787}
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006788
6789/*
6790 * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
6791 * @soc: DP SoC context
6792 * @max_mac_rings: No of MAC rings
6793 *
6794 * Return: None
6795 */
6796static
6797void dp_is_hw_dbs_enable(struct dp_soc *soc,
6798 int *max_mac_rings)
6799{
6800 bool dbs_enable = false;
6801 if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
6802 dbs_enable = soc->cdp_soc.ol_ops->
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05306803 is_hw_dbs_2x2_capable(soc->ctrl_psoc);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006804
6805 *max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
6806}
6807
Keyur Parekhfad6d082017-05-07 08:54:47 -07006808/*
6809* dp_set_pktlog_wifi3() - attach txrx vdev
6810* @pdev: Datapath PDEV handle
6811* @event: which event's notifications are being subscribed to
6812* @enable: WDI event subscribe or not. (True or False)
6813*
6814* Return: Success, NULL on failure
6815*/
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006816#ifdef WDI_EVENT_ENABLE
Keyur Parekhfad6d082017-05-07 08:54:47 -07006817int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
6818 bool enable)
6819{
6820 struct dp_soc *soc = pdev->soc;
6821 struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006822 int max_mac_rings = wlan_cfg_get_num_mac_rings
6823 (pdev->wlan_cfg_ctx);
6824 uint8_t mac_id = 0;
6825
6826 dp_is_hw_dbs_enable(soc, &max_mac_rings);
6827
6828 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6829 FL("Max_mac_rings %d \n"),
6830 max_mac_rings);
Keyur Parekhfad6d082017-05-07 08:54:47 -07006831
6832 if (enable) {
6833 switch (event) {
6834 case WDI_EVENT_RX_DESC:
6835 if (pdev->monitor_vdev) {
6836 /* Nothing needs to be done if monitor mode is
6837 * enabled
6838 */
6839 return 0;
6840 }
6841 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
6842 pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
6843 htt_tlv_filter.mpdu_start = 1;
6844 htt_tlv_filter.msdu_start = 1;
6845 htt_tlv_filter.msdu_end = 1;
6846 htt_tlv_filter.mpdu_end = 1;
6847 htt_tlv_filter.packet_header = 1;
6848 htt_tlv_filter.attention = 1;
6849 htt_tlv_filter.ppdu_start = 1;
6850 htt_tlv_filter.ppdu_end = 1;
6851 htt_tlv_filter.ppdu_end_user_stats = 1;
6852 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6853 htt_tlv_filter.ppdu_end_status_done = 1;
6854 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07006855 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6856 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6857 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6858 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6859 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6860 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Keyur Parekhfad6d082017-05-07 08:54:47 -07006861
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006862 for (mac_id = 0; mac_id < max_mac_rings;
6863 mac_id++) {
6864 htt_h2t_rx_ring_cfg(soc->htt_handle,
6865 pdev->pdev_id + mac_id,
6866 pdev->rxdma_mon_status_ring
6867 .hal_srng,
6868 RXDMA_MONITOR_STATUS,
6869 RX_BUFFER_SIZE,
6870 &htt_tlv_filter);
6871
6872 }
6873
6874 if (soc->reap_timer_init)
6875 qdf_timer_mod(&soc->mon_reap_timer,
6876 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07006877 }
6878 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006879
Keyur Parekhfad6d082017-05-07 08:54:47 -07006880 case WDI_EVENT_LITE_RX:
6881 if (pdev->monitor_vdev) {
6882 /* Nothing needs to be done if monitor mode is
6883 * enabled
6884 */
6885 return 0;
6886 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006887
Keyur Parekhfad6d082017-05-07 08:54:47 -07006888 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
6889 pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006890
Keyur Parekhfad6d082017-05-07 08:54:47 -07006891 htt_tlv_filter.ppdu_start = 1;
6892 htt_tlv_filter.ppdu_end = 1;
6893 htt_tlv_filter.ppdu_end_user_stats = 1;
6894 htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6895 htt_tlv_filter.ppdu_end_status_done = 1;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006896 htt_tlv_filter.mpdu_start = 1;
Keyur Parekhfad6d082017-05-07 08:54:47 -07006897 htt_tlv_filter.enable_fp = 1;
nobeljd124b742017-10-16 11:59:12 -07006898 htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6899 htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6900 htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6901 htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6902 htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6903 htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
Keyur Parekhfad6d082017-05-07 08:54:47 -07006904
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006905 for (mac_id = 0; mac_id < max_mac_rings;
6906 mac_id++) {
6907 htt_h2t_rx_ring_cfg(soc->htt_handle,
6908 pdev->pdev_id + mac_id,
6909 pdev->rxdma_mon_status_ring
6910 .hal_srng,
Keyur Parekhfad6d082017-05-07 08:54:47 -07006911 RXDMA_MONITOR_STATUS,
6912 RX_BUFFER_SIZE_PKTLOG_LITE,
6913 &htt_tlv_filter);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006914 }
6915
6916 if (soc->reap_timer_init)
6917 qdf_timer_mod(&soc->mon_reap_timer,
6918 DP_INTR_POLL_TIMER_MS);
Keyur Parekhfad6d082017-05-07 08:54:47 -07006919 }
6920 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006921
Keyur Parekhdb0fa142017-07-13 19:40:22 -07006922 case WDI_EVENT_LITE_T2H:
6923 if (pdev->monitor_vdev) {
6924 /* Nothing needs to be done if monitor mode is
6925 * enabled
6926 */
6927 return 0;
6928 }
6929 /* To enable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006930 * passing value 0xffff. Once these macros will define
6931 * in htt header file will use proper macros
Keyur Parekhdb0fa142017-07-13 19:40:22 -07006932 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006933 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306934 pdev->pktlog_ppdu_stats = true;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006935 dp_h2t_cfg_stats_msg_send(pdev, 0xffff,
6936 pdev->pdev_id + mac_id);
6937 }
Keyur Parekhdb0fa142017-07-13 19:40:22 -07006938 break;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006939
Keyur Parekhfad6d082017-05-07 08:54:47 -07006940 default:
6941 /* Nothing needs to be done for other pktlog types */
6942 break;
6943 }
6944 } else {
6945 switch (event) {
6946 case WDI_EVENT_RX_DESC:
6947 case WDI_EVENT_LITE_RX:
6948 if (pdev->monitor_vdev) {
6949 /* Nothing needs to be done if monitor mode is
6950 * enabled
6951 */
6952 return 0;
6953 }
6954 if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
6955 pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006956
6957 for (mac_id = 0; mac_id < max_mac_rings;
6958 mac_id++) {
6959 htt_h2t_rx_ring_cfg(soc->htt_handle,
6960 pdev->pdev_id + mac_id,
6961 pdev->rxdma_mon_status_ring
6962 .hal_srng,
6963 RXDMA_MONITOR_STATUS,
6964 RX_BUFFER_SIZE,
6965 &htt_tlv_filter);
6966 }
6967
6968 if (soc->reap_timer_init)
6969 qdf_timer_stop(&soc->mon_reap_timer);
Keyur Parekhfad6d082017-05-07 08:54:47 -07006970 }
6971 break;
Keyur Parekhdb0fa142017-07-13 19:40:22 -07006972 case WDI_EVENT_LITE_T2H:
6973 if (pdev->monitor_vdev) {
6974 /* Nothing needs to be done if monitor mode is
6975 * enabled
6976 */
6977 return 0;
6978 }
6979 /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
6980 * passing value 0. Once these macros will define in htt
6981 * header file will use proper macros
6982 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006983 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
Soumya Bhat0d6245c2018-02-08 21:02:57 +05306984 pdev->pktlog_ppdu_stats = false;
6985 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
6986 dp_h2t_cfg_stats_msg_send(pdev, 0,
6987 pdev->pdev_id + mac_id);
6988 } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
6989 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
6990 pdev->pdev_id + mac_id);
6991 } else if (pdev->enhanced_stats_en) {
6992 dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
6993 pdev->pdev_id + mac_id);
6994 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07006995 }
6996
Keyur Parekhdb0fa142017-07-13 19:40:22 -07006997 break;
Keyur Parekhfad6d082017-05-07 08:54:47 -07006998 default:
6999 /* Nothing needs to be done for other pktlog types */
7000 break;
7001 }
7002 }
7003 return 0;
7004}
7005#endif
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07007006
7007#ifdef CONFIG_MCL
7008/*
7009 * dp_service_mon_rings()- timer to reap monitor rings
7010 * reqd as we are not getting ppdu end interrupts
7011 * @arg: SoC Handle
7012 *
7013 * Return:
7014 *
7015 */
7016static void dp_service_mon_rings(void *arg)
7017{
7018 struct dp_soc *soc = (struct dp_soc *) arg;
7019 int ring = 0, work_done;
7020
7021 work_done = dp_mon_process(soc, ring, QCA_NAPI_BUDGET);
7022 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
7023 FL("Reaped %d descs from Monitor rings"), work_done);
7024
7025 qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
7026}
7027
7028#ifndef REMOVE_PKT_LOG
7029/**
7030 * dp_pkt_log_init() - API to initialize packet log
7031 * @ppdev: physical device handle
7032 * @scn: HIF context
7033 *
7034 * Return: none
7035 */
7036void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
7037{
7038 struct dp_pdev *handle = (struct dp_pdev *)ppdev;
7039
7040 if (handle->pkt_log_init) {
7041 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7042 "%s: Packet log not initialized", __func__);
7043 return;
7044 }
7045
7046 pktlog_sethandle(&handle->pl_dev, scn);
7047 pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION);
7048
7049 if (pktlogmod_init(scn)) {
7050 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7051 "%s: pktlogmod_init failed", __func__);
7052 handle->pkt_log_init = false;
7053 } else {
7054 handle->pkt_log_init = true;
7055 }
7056}
7057
7058/**
7059 * dp_pkt_log_con_service() - connect packet log service
7060 * @ppdev: physical device handle
7061 * @scn: device context
7062 *
7063 * Return: none
7064 */
7065static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
7066{
7067 struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
7068
7069 dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
7070 pktlog_htc_attach();
7071}
7072
7073/**
7074 * dp_pktlogmod_exit() - API to cleanup pktlog info
7075 * @handle: Pdev handle
7076 *
7077 * Return: none
7078 */
7079static void dp_pktlogmod_exit(struct dp_pdev *handle)
7080{
7081 void *scn = (void *)handle->soc->hif_handle;
7082
7083 if (!scn) {
7084 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7085 "%s: Invalid hif(scn) handle", __func__);
7086 return;
7087 }
7088
7089 pktlogmod_exit(scn);
7090 handle->pkt_log_init = false;
7091}
7092#endif
7093#else
7094static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
7095#endif